diff options
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r-- | drivers/media/v4l2-core/Kconfig | 9 | ||||
-rw-r--r-- | drivers/media/v4l2-core/tuner-core.c | 10 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-common.c | 134 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 40 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-ctrls.c | 93 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-dev.c | 34 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-dv-timings.c | 3 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-ioctl.c | 27 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-subdev.c | 9 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf-core.c | 17 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf-dma-contig.c | 9 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf-dma-sg.c | 6 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 115 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 71 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 425 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-vmalloc.c | 194 |
16 files changed, 854 insertions, 342 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 9ca0f8d59a1..ba7e21a7302 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -25,6 +25,15 @@ config VIDEO_FIXED_MINOR_RANGES When in doubt, say N. +config VIDEO_PCI_SKELETON + tristate "Skeleton PCI V4L2 driver" + depends on PCI && BUILD_DOCSRC + depends on VIDEO_V4L2 && VIDEOBUF2_CORE + depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG + ---help--- + Enable build of the skeleton PCI driver, used as a reference + when developing new drivers. + # Used by drivers that need tuner.ko config VIDEO_TUNER tristate diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index 06c18ba16fa..559f8372e2e 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -601,7 +601,7 @@ static int tuner_probe(struct i2c_client *client, t->name = "(tuner unset)"; t->type = UNSET; t->audmode = V4L2_TUNER_MODE_STEREO; - t->standby = 1; + t->standby = true; t->radio_freq = 87.5 * 16000; /* Initial freq range */ t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */ @@ -1260,7 +1260,9 @@ static int tuner_suspend(struct device *dev) tuner_dbg("suspend\n"); - if (!t->standby && analog_ops->standby) + if (t->fe.ops.tuner_ops.suspend) + t->fe.ops.tuner_ops.suspend(&t->fe); + else if (!t->standby && analog_ops->standby) analog_ops->standby(&t->fe); return 0; @@ -1273,7 +1275,9 @@ static int tuner_resume(struct device *dev) tuner_dbg("resume\n"); - if (!t->standby) + if (t->fe.ops.tuner_ops.resume) + t->fe.ops.tuner_ops.resume(&t->fe); + else if (!t->standby) if (set_mode(t, t->mode) == 0) set_freq(t, 0); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index ccaa38f65cf..5b808500e7e 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -80,36 +80,6 @@ MODULE_LICENSE("GPL"); /* Helper functions for control handling */ -/* Check for correctness of the ctrl's value based on the data from - struct v4l2_queryctrl and the available menu items. Note that - menu_items may be NULL, in that case it is ignored. */ -int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, - const char * const *menu_items) -{ - if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED) - return -EINVAL; - if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED) - return -EBUSY; - if (qctrl->type == V4L2_CTRL_TYPE_STRING) - return 0; - if (qctrl->type == V4L2_CTRL_TYPE_BUTTON || - qctrl->type == V4L2_CTRL_TYPE_INTEGER64 || - qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) - return 0; - if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum) - return -ERANGE; - if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) { - if (menu_items[ctrl->value] == NULL || - menu_items[ctrl->value][0] == '\0') - return -EINVAL; - } - if (qctrl->type == V4L2_CTRL_TYPE_BITMASK && - (ctrl->value & ~qctrl->maximum)) - return -ERANGE; - return 0; -} -EXPORT_SYMBOL(v4l2_ctrl_check); - /* Fill in a struct v4l2_queryctrl */ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) { @@ -135,101 +105,6 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _ } EXPORT_SYMBOL(v4l2_ctrl_query_fill); -/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and - the menu. The qctrl pointer may be NULL, in which case it is ignored. - If menu_items is NULL, then the menu items are retrieved using - v4l2_ctrl_get_menu. */ -int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl, - const char * const *menu_items) -{ - int i; - - qmenu->reserved = 0; - if (menu_items == NULL) - menu_items = v4l2_ctrl_get_menu(qmenu->id); - if (menu_items == NULL || - (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum))) - return -EINVAL; - for (i = 0; i < qmenu->index && menu_items[i]; i++) ; - if (menu_items[i] == NULL || menu_items[i][0] == '\0') - return -EINVAL; - strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name)); - return 0; -} -EXPORT_SYMBOL(v4l2_ctrl_query_menu); - -/* Fill in a struct v4l2_querymenu based on the specified array of valid - menu items (terminated by V4L2_CTRL_MENU_IDS_END). - Use this if there are 'holes' in the list of valid menu items. */ -int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids) -{ - const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id); - - qmenu->reserved = 0; - if (menu_items == NULL || ids == NULL) - return -EINVAL; - while (*ids != V4L2_CTRL_MENU_IDS_END) { - if (*ids++ == qmenu->index) { - strlcpy(qmenu->name, menu_items[qmenu->index], - sizeof(qmenu->name)); - return 0; - } - } - return -EINVAL; -} -EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items); - -/* ctrl_classes points to an array of u32 pointers, the last element is - a NULL pointer. Each u32 array is a 0-terminated array of control IDs. - Each array must be sorted low to high and belong to the same control - class. The array of u32 pointers must also be sorted, from low class IDs - to high class IDs. - - This function returns the first ID that follows after the given ID. - When no more controls are available 0 is returned. */ -u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id) -{ - u32 ctrl_class = V4L2_CTRL_ID2CLASS(id); - const u32 *pctrl; - - if (ctrl_classes == NULL) - return 0; - - /* if no query is desired, then check if the ID is part of ctrl_classes */ - if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) { - /* find class */ - while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class) - ctrl_classes++; - if (*ctrl_classes == NULL) - return 0; - pctrl = *ctrl_classes; - /* find control ID */ - while (*pctrl && *pctrl != id) pctrl++; - return *pctrl ? id : 0; - } - id &= V4L2_CTRL_ID_MASK; - id++; /* select next control */ - /* find first class that matches (or is greater than) the class of - the ID */ - while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class) - ctrl_classes++; - /* no more classes */ - if (*ctrl_classes == NULL) - return 0; - pctrl = *ctrl_classes; - /* find first ctrl within the class that is >= ID */ - while (*pctrl && *pctrl < id) pctrl++; - if (*pctrl) - return *pctrl; - /* we are at the end of the controls of the current class. */ - /* continue with next class if available */ - ctrl_classes++; - if (*ctrl_classes == NULL) - return 0; - return **ctrl_classes; -} -EXPORT_SYMBOL(v4l2_ctrl_next); - /* I2C Helper functions */ #if IS_ENABLED(CONFIG_I2C) @@ -435,16 +310,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min, /* Bits that must be zero to be aligned */ unsigned int mask = ~((1 << align) - 1); + /* Clamp to aligned min and max */ + x = clamp(x, (min + ~mask) & mask, max & mask); + /* Round to nearest aligned value */ if (align) x = (x + (1 << (align - 1))) & mask; - /* Clamp to aligned value of min and max */ - if (x < min) - x = (min + ~mask) & mask; - else if (x > max) - x = max & mask; - return x; } diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index cca6c2f76b3..af635430524 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -222,6 +222,9 @@ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { + if (put_user(kp->type, &up->type)) + return -EFAULT; + switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -248,8 +251,7 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) || - put_user(kp->type, &up->type)) + if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) return -EFAULT; return __put_v4l2_format32(kp, up); } @@ -257,8 +259,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt))) - return -EFAULT; + copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; return __put_v4l2_format32(&kp->format, &up->format); } @@ -328,7 +330,7 @@ struct v4l2_buffer32 { __u32 reserved; }; -static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32, +static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, enum v4l2_memory memory) { void __user *up_pln; @@ -357,7 +359,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32, return 0; } -static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32, +static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, enum v4l2_memory memory) { if (copy_in_user(up32, up, 2 * sizeof(__u32)) || @@ -427,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user * by passing a very big num_planes value */ uplane = compat_alloc_user_space(num_planes * sizeof(struct v4l2_plane)); - kp->m.planes = uplane; + kp->m.planes = (__force struct v4l2_plane *)uplane; while (--num_planes >= 0) { ret = get_v4l2_plane32(uplane, uplane32, kp->memory); @@ -498,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user if (num_planes == 0) return 0; - uplane = kp->m.planes; + uplane = (__force struct v4l2_plane __user *)kp->m.planes; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); @@ -562,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame get_user(kp->flags, &up->flags) || copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) return -EFAULT; - kp->base = compat_ptr(tmp); + kp->base = (__force void *)compat_ptr(tmp); return 0; } @@ -667,11 +669,15 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext n * sizeof(struct v4l2_ext_control32))) return -EFAULT; kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control)); - kp->controls = kcontrols; + kp->controls = (__force struct v4l2_ext_control *)kcontrols; while (--n >= 0) { + u32 id; + if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) return -EFAULT; - if (ctrl_is_pointer(kcontrols->id)) { + if (get_user(id, &kcontrols->id)) + return -EFAULT; + if (ctrl_is_pointer(id)) { void __user *s; if (get_user(p, &ucontrols->string)) @@ -689,7 +695,8 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) { struct v4l2_ext_control32 __user *ucontrols; - struct v4l2_ext_control __user *kcontrols = kp->controls; + struct v4l2_ext_control __user *kcontrols = + (__force struct v4l2_ext_control __user *)kp->controls; int n = kp->count; compat_caddr_t p; @@ -711,11 +718,14 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext while (--n >= 0) { unsigned size = sizeof(*ucontrols); + u32 id; + if (get_user(id, &kcontrols->id)) + return -EFAULT; /* Do not modify the pointer when copying a pointer control. The contents of the pointer was changed, not the pointer itself. */ - if (ctrl_is_pointer(kcontrols->id)) + if (ctrl_is_pointer(id)) size -= sizeof(ucontrols->value64); if (copy_in_user(ucontrols, kcontrols, size)) return -EFAULT; @@ -770,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) get_user(tmp, &up->edid) || copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) return -EFAULT; - kp->edid = compat_ptr(tmp); + kp->edid = (__force u8 *)compat_ptr(tmp); return 0; } @@ -783,7 +793,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) put_user(kp->start_block, &up->start_block) || put_user(kp->blocks, &up->blocks) || put_user(tmp, &up->edid) || - copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) return -EFAULT; return 0; } diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index f030d6a9e04..45c5b471060 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -796,6 +796,8 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop"; case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status"; case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range"; + case V4L2_CID_PAN_SPEED: return "Pan, Speed"; + case V4L2_CID_TILT_SPEED: return "Tilt, Speed"; /* FM Radio Modulator controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ @@ -859,6 +861,10 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_VBLANK: return "Vertical Blanking"; case V4L2_CID_HBLANK: return "Horizontal Blanking"; case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain"; + case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value"; + case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value"; + case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value"; /* Image processing controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ @@ -1652,10 +1658,8 @@ static int check_range(enum v4l2_ctrl_type type, } /* Validate a new control */ -static int validate_new(const struct v4l2_ctrl *ctrl, - struct v4l2_ext_control *c) +static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) { - union v4l2_ctrl_ptr ptr; unsigned idx; int err = 0; @@ -1668,19 +1672,14 @@ static int validate_new(const struct v4l2_ctrl *ctrl, case V4L2_CTRL_TYPE_BOOLEAN: case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: - ptr.p_s32 = &c->value; - return ctrl->type_ops->validate(ctrl, 0, ptr); - case V4L2_CTRL_TYPE_INTEGER64: - ptr.p_s64 = &c->value64; - return ctrl->type_ops->validate(ctrl, 0, ptr); + return ctrl->type_ops->validate(ctrl, 0, p_new); default: break; } } - ptr.p = c->ptr; - for (idx = 0; !err && idx < c->size / ctrl->elem_size; idx++) - err = ctrl->type_ops->validate(ctrl, idx, ptr); + for (idx = 0; !err && idx < ctrl->elems; idx++) + err = ctrl->type_ops->validate(ctrl, idx, p_new); return err; } @@ -3006,6 +3005,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs, cs->error_idx = cs->count; for (i = 0; i < cs->count; i++) { struct v4l2_ctrl *ctrl = helpers[i].ctrl; + union v4l2_ctrl_ptr p_new; cs->error_idx = i; @@ -3019,7 +3019,17 @@ static int validate_ctrls(struct v4l2_ext_controls *cs, best-effort to avoid that. */ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) return -EBUSY; - ret = validate_new(ctrl, &cs->controls[i]); + /* + * Skip validation for now if the payload needs to be copied + * from userspace into kernelspace. We'll validate those later. + */ + if (ctrl->is_ptr) + continue; + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + p_new.p_s64 = &cs->controls[i].value64; + else + p_new.p_s32 = &cs->controls[i].value; + ret = validate_new(ctrl, p_new); if (ret) return ret; } @@ -3114,7 +3124,11 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, /* Copy the new caller-supplied control values. user_to_new() sets 'is_new' to 1. */ do { - ret = user_to_new(cs->controls + idx, helpers[idx].ctrl); + struct v4l2_ctrl *ctrl = helpers[idx].ctrl; + + ret = user_to_new(cs->controls + idx, ctrl); + if (!ret && ctrl->is_ptr) + ret = validate_new(ctrl, ctrl->p_new); idx = helpers[idx].next; } while (!ret && idx); @@ -3164,10 +3178,10 @@ int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls); /* Helper function for VIDIOC_S_CTRL compatibility */ -static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, - struct v4l2_ext_control *c, u32 ch_flags) +static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) { struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret; int i; /* Reset the 'is_new' flags of the cluster */ @@ -3175,8 +3189,9 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, if (master->cluster[i]) master->cluster[i]->is_new = 0; - if (c) - user_to_new(c, ctrl); + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + return ret; /* For autoclusters with volatiles that are switched from auto to manual mode we have to update the current volatile values since @@ -3193,15 +3208,14 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) { - int ret = validate_new(ctrl, c); + int ret; - if (!ret) { - v4l2_ctrl_lock(ctrl); - ret = set_ctrl(fh, ctrl, c, 0); - if (!ret) - cur_to_user(c, ctrl); - v4l2_ctrl_unlock(ctrl); - } + v4l2_ctrl_lock(ctrl); + user_to_new(c, ctrl); + ret = set_ctrl(fh, ctrl, 0); + if (!ret) + cur_to_user(c, ctrl); + v4l2_ctrl_unlock(ctrl); return ret; } @@ -3209,7 +3223,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); - struct v4l2_ext_control c; + struct v4l2_ext_control c = { control->id }; int ret; if (ctrl == NULL || !ctrl->is_int) @@ -3238,7 +3252,7 @@ int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) /* It's a driver bug if this happens. */ WARN_ON(!ctrl->is_int); ctrl->val = val; - return set_ctrl(NULL, ctrl, NULL, 0); + return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); @@ -3249,7 +3263,7 @@ int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) /* It's a driver bug if this happens. */ WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64); *ctrl->p_new.p_s64 = val; - return set_ctrl(NULL, ctrl, NULL, 0); + return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); @@ -3260,7 +3274,7 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) /* It's a driver bug if this happens. */ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING); strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); - return set_ctrl(NULL, ctrl, NULL, 0); + return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); @@ -3283,8 +3297,8 @@ EXPORT_SYMBOL(v4l2_ctrl_notify); int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def) { + bool changed; int ret; - struct v4l2_ext_control c; lockdep_assert_held(ctrl->handler->lock); @@ -3311,11 +3325,20 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, ctrl->maximum = max; ctrl->step = step; ctrl->default_value = def; - c.value = *ctrl->p_cur.p_s32; - if (validate_new(ctrl, &c)) - c.value = def; - if (c.value != *ctrl->p_cur.p_s32) - ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE); + cur_to_new(ctrl); + if (validate_new(ctrl, ctrl->p_new)) { + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + *ctrl->p_new.p_s64 = def; + else + *ctrl->p_new.p_s32 = def; + } + + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; + else + changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; + if (changed) + ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); else send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); return ret; diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index 33617c365ac..9aa530a8bea 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -194,7 +194,7 @@ static void v4l2_device_release(struct device *cd) mutex_unlock(&videodev_lock); #if defined(CONFIG_MEDIA_CONTROLLER) - if (v4l2_dev && v4l2_dev->mdev && + if (v4l2_dev->mdev && vdev->vfl_type != VFL_TYPE_SUBDEV) media_device_unregister_entity(&vdev->entity); #endif @@ -207,7 +207,7 @@ static void v4l2_device_release(struct device *cd) * TODO: In the long run all drivers that use v4l2_device should use the * v4l2_device release callback. This check will then be unnecessary. */ - if (v4l2_dev && v4l2_dev->release == NULL) + if (v4l2_dev->release == NULL) v4l2_dev = NULL; /* Release video_device and perform other @@ -360,27 +360,22 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) * hack but it will have to do for those drivers that are not * yet converted to use unlocked_ioctl. * - * There are two options: if the driver implements struct - * v4l2_device, then the lock defined there is used to - * serialize the ioctls. Otherwise the v4l2 core lock defined - * below is used. This lock is really bad since it serializes - * completely independent devices. + * All drivers implement struct v4l2_device, so we use the + * lock defined there to serialize the ioctls. * - * Both variants suffer from the same problem: if the driver - * sleeps, then it blocks all ioctls since the lock is still - * held. This is very common for VIDIOC_DQBUF since that - * normally waits for a frame to arrive. As a result any other - * ioctl calls will proceed very, very slowly since each call - * will have to wait for the VIDIOC_QBUF to finish. Things that - * should take 0.01s may now take 10-20 seconds. + * However, if the driver sleeps, then it blocks all ioctls + * since the lock is still held. This is very common for + * VIDIOC_DQBUF since that normally waits for a frame to arrive. + * As a result any other ioctl calls will proceed very, very + * slowly since each call will have to wait for the VIDIOC_QBUF + * to finish. Things that should take 0.01s may now take 10-20 + * seconds. * * The workaround is to *not* take the lock for VIDIOC_DQBUF. * This actually works OK for videobuf-based drivers, since * videobuf will take its own internal lock. */ - static DEFINE_MUTEX(v4l2_ioctl_mutex); - struct mutex *m = vdev->v4l2_dev ? - &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex; + struct mutex *m = &vdev->v4l2_dev->ioctl_lock; if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m)) return -ERESTARTSYS; @@ -938,12 +933,11 @@ int __video_register_device(struct video_device *vdev, int type, int nr, name_base, nr, video_device_node_name(vdev)); /* Increase v4l2_device refcount */ - if (vdev->v4l2_dev) - v4l2_device_get(vdev->v4l2_dev); + v4l2_device_get(vdev->v4l2_dev); #if defined(CONFIG_MEDIA_CONTROLLER) /* Part 5: Register the entity. */ - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + if (vdev->v4l2_dev->mdev && vdev->vfl_type != VFL_TYPE_SUBDEV) { vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; vdev->entity.name = vdev->name; diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index ce1c9f5d9de..b1d8dbb3966 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -164,7 +164,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, bt->width > cap->max_width || bt->pixelclock < cap->min_pixelclock || bt->pixelclock > cap->max_pixelclock || - (cap->standards && !(bt->standards & cap->standards)) || + (cap->standards && bt->standards && + !(bt->standards & cap->standards)) || (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) return false; diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index d15e16737ee..faac2f4e0f3 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -257,7 +257,7 @@ static void v4l_print_format(const void *arg, bool write_only) pr_cont(", width=%u, height=%u, " "pixelformat=%c%c%c%c, field=%s, " "bytesperline=%u, sizeimage=%u, colorspace=%d, " - "flags %u\n", + "flags %x, ycbcr_enc=%u, quantization=%u\n", pix->width, pix->height, (pix->pixelformat & 0xff), (pix->pixelformat >> 8) & 0xff, @@ -265,21 +265,24 @@ static void v4l_print_format(const void *arg, bool write_only) (pix->pixelformat >> 24) & 0xff, prt_names(pix->field, v4l2_field_names), pix->bytesperline, pix->sizeimage, - pix->colorspace, pix->flags); + pix->colorspace, pix->flags, pix->ycbcr_enc, + pix->quantization); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: mp = &p->fmt.pix_mp; pr_cont(", width=%u, height=%u, " "format=%c%c%c%c, field=%s, " - "colorspace=%d, num_planes=%u\n", + "colorspace=%d, num_planes=%u, flags=%x, " + "ycbcr_enc=%u, quantization=%u\n", mp->width, mp->height, (mp->pixelformat & 0xff), (mp->pixelformat >> 8) & 0xff, (mp->pixelformat >> 16) & 0xff, (mp->pixelformat >> 24) & 0xff, prt_names(mp->field, v4l2_field_names), - mp->colorspace, mp->num_planes); + mp->colorspace, mp->num_planes, mp->flags, + mp->ycbcr_enc, mp->quantization); for (i = 0; i < mp->num_planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, @@ -562,7 +565,7 @@ static void v4l_print_ext_controls(const void *arg, bool write_only) pr_cont("class=0x%x, count=%d, error_idx=%d", p->ctrl_class, p->count, p->error_idx); for (i = 0; i < p->count; i++) { - if (p->controls[i].size) + if (!p->controls[i].size) pr_cont(", id/val=0x%x/0x%x", p->controls[i].id, p->controls[i].value); else @@ -1014,6 +1017,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops, ret = ops->vidioc_querycap(file, fh, cap); cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT; + /* + * Drivers MUST fill in device_caps, so check for this and + * warn if it was forgotten. + */ + WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) || + !cap->device_caps); cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; return ret; @@ -1040,7 +1049,7 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, if (ops->vidioc_g_priority) return ops->vidioc_g_priority(file, fh, arg); vfd = video_devdata(file); - *p = v4l2_prio_max(&vfd->v4l2_dev->prio); + *p = v4l2_prio_max(vfd->prio); return 0; } @@ -1055,7 +1064,7 @@ static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, return ops->vidioc_s_priority(file, fh, *p); vfd = video_devdata(file); vfh = file->private_data; - return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p); + return v4l2_prio_change(vfd->prio, &vfh->prio, *p); } static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, @@ -1153,9 +1162,9 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, switch (p->type) { case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: { - struct v4l2_clip *clips = p->fmt.win.clips; + struct v4l2_clip __user *clips = p->fmt.win.clips; u32 clipcount = p->fmt.win.clipcount; - void *bitmap = p->fmt.win.bitmap; + void __user *bitmap = p->fmt.win.bitmap; memset(&p->fmt, 0, sizeof(p->fmt)); p->fmt.win.clips = clips; diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index b4d235c13fb..543631c3557 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -501,11 +501,20 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { + /* The width, height and code must match. */ if (source_fmt->format.width != sink_fmt->format.width || source_fmt->format.height != sink_fmt->format.height || source_fmt->format.code != sink_fmt->format.code) return -EINVAL; + /* The field order must match, or the sink field order must be NONE + * to support interlaced hardware connected to bridges that support + * progressive formats only. + */ + if (source_fmt->format.field != sink_fmt->format.field && + sink_fmt->format.field != V4L2_FIELD_NONE) + return -EINVAL; + return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index fb5ee5dd8fe..926836d1813 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -51,6 +51,8 @@ MODULE_LICENSE("GPL"); #define CALL(q, f, arg...) \ ((q->int_ops->f) ? q->int_ops->f(arg) : 0) +#define CALLPTR(q, f, arg...) \ + ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) { @@ -441,11 +443,6 @@ int videobuf_reqbufs(struct videobuf_queue *q, unsigned int size, count; int retval; - if (req->count < 1) { - dprintk(1, "reqbufs: count invalid (%d)\n", req->count); - return -EINVAL; - } - if (req->memory != V4L2_MEMORY_MMAP && req->memory != V4L2_MEMORY_USERPTR && req->memory != V4L2_MEMORY_OVERLAY) { @@ -471,6 +468,12 @@ int videobuf_reqbufs(struct videobuf_queue *q, goto done; } + if (req->count == 0) { + dprintk(1, "reqbufs: count invalid (%d)\n", req->count); + retval = __videobuf_free(q); + goto done; + } + count = req->count; if (count > VIDEO_MAX_FRAME) count = VIDEO_MAX_FRAME; @@ -830,7 +833,7 @@ static int __videobuf_copy_to_user(struct videobuf_queue *q, char __user *data, size_t count, int nonblocking) { - void *vaddr = CALL(q, vaddr, buf); + void *vaddr = CALLPTR(q, vaddr, buf); /* copy to userspace */ if (count > buf->size - q->read_off) @@ -847,7 +850,7 @@ static int __videobuf_copy_stream(struct videobuf_queue *q, char __user *data, size_t count, size_t pos, int vbihack, int nonblocking) { - unsigned int *fc = CALL(q, vaddr, buf); + unsigned int *fc = CALLPTR(q, vaddr, buf); if (vbihack) { /* dirty, undocumented hack -- pass the frame counter diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index bf80f0f7dfb..e02353e340d 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -305,6 +305,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, /* Try to remap memory */ size = vma->vm_end - vma->vm_start; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* the "vm_pgoff" is just used in v4l2 to find the + * corresponding buffer data structure which is allocated + * earlier and it does not mean the offset from the physical + * buffer start address as usual. So set it to 0 to pass + * the sanity check in vm_iomap_memory(). + */ + vma->vm_pgoff = 0; + retval = vm_iomap_memory(vma, mem->dma_handle, size); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 3c8cc023a5a..3ff15f1c9d7 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -253,9 +253,11 @@ int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction, return 0; out_free_pages: while (i > 0) { - void *addr = page_address(dma->vaddr_pages[i]); - dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]); + void *addr; + i--; + addr = page_address(dma->vaddr_pages[i]); + dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]); } kfree(dma->dma_addr); dma->dma_addr = NULL; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 25d3ae2188c..d09a8916e94 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -36,7 +36,7 @@ module_param(debug, int, 0644); #define dprintk(level, fmt, arg...) \ do { \ if (debug >= level) \ - pr_debug("vb2: %s: " fmt, __func__, ## arg); \ + pr_info("vb2: %s: " fmt, __func__, ## arg); \ } while (0) #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -189,6 +189,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q); static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; + enum dma_data_direction dma_dir = + V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; void *mem_priv; int plane; @@ -200,7 +202,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], - size, q->gfp_flags); + size, dma_dir, q->gfp_flags); if (IS_ERR_OR_NULL(mem_priv)) goto free; @@ -882,7 +884,9 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) * We already have buffers allocated, so first check if they * are not in use and can be freed. */ + mutex_lock(&q->mmap_lock); if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { + mutex_unlock(&q->mmap_lock); dprintk(1, "memory in use, cannot free\n"); return -EBUSY; } @@ -894,6 +898,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) */ __vb2_queue_cancel(q); ret = __vb2_queue_free(q, q->num_buffers); + mutex_unlock(&q->mmap_lock); if (ret) return ret; @@ -955,6 +960,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) */ } + mutex_lock(&q->mmap_lock); q->num_buffers = allocated_buffers; if (ret < 0) { @@ -963,8 +969,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) * from q->num_buffers. */ __vb2_queue_free(q, allocated_buffers); + mutex_unlock(&q->mmap_lock); return ret; } + mutex_unlock(&q->mmap_lock); /* * Return the number of successfully allocated buffers @@ -1063,6 +1071,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create */ } + mutex_lock(&q->mmap_lock); q->num_buffers += allocated_buffers; if (ret < 0) { @@ -1071,8 +1080,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create * from q->num_buffers. */ __vb2_queue_free(q, allocated_buffers); + mutex_unlock(&q->mmap_lock); return -ENOMEM; } + mutex_unlock(&q->mmap_lock); /* * Return the number of successfully allocated buffers @@ -1349,7 +1360,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) void *mem_priv; unsigned int plane; int ret; - int write = !V4L2_TYPE_IS_OUTPUT(q->type); + enum dma_data_direction dma_dir = + V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1391,7 +1403,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) /* Acquire each plane's memory */ mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], planes[plane].m.userptr, - planes[plane].length, write); + planes[plane].length, dma_dir); if (IS_ERR_OR_NULL(mem_priv)) { dprintk(1, "failed acquiring userspace " "memory for plane %d\n", plane); @@ -1452,7 +1464,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) void *mem_priv; unsigned int plane; int ret; - int write = !V4L2_TYPE_IS_OUTPUT(q->type); + enum dma_data_direction dma_dir = + V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1500,7 +1513,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) /* Acquire each plane's memory */ mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], - dbuf, planes[plane].length, write); + dbuf, planes[plane].length, dma_dir); if (IS_ERR(mem_priv)) { dprintk(1, "failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); @@ -1581,7 +1594,6 @@ static void __enqueue_in_driver(struct vb2_buffer *vb) static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) { struct vb2_queue *q = vb->vb2_queue; - struct rw_semaphore *mmap_sem; int ret; ret = __verify_length(vb, b); @@ -1618,26 +1630,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) ret = __qbuf_mmap(vb, b); break; case V4L2_MEMORY_USERPTR: - /* - * In case of user pointer buffers vb2 allocators need to get - * direct access to userspace pages. This requires getting - * the mmap semaphore for read access in the current process - * structure. The same semaphore is taken before calling mmap - * operation, while both qbuf/prepare_buf and mmap are called - * by the driver or v4l2 core with the driver's lock held. - * To avoid an AB-BA deadlock (mmap_sem then driver's lock in - * mmap and driver's lock then mmap_sem in qbuf/prepare_buf), - * the videobuf2 core releases the driver's lock, takes - * mmap_sem and then takes the driver's lock again. - */ - mmap_sem = ¤t->mm->mmap_sem; - call_void_qop(q, wait_prepare, q); - down_read(mmap_sem); - call_void_qop(q, wait_finish, q); - + down_read(¤t->mm->mmap_sem); ret = __qbuf_userptr(vb, b); - - up_read(mmap_sem); + up_read(¤t->mm->mmap_sem); break; case V4L2_MEMORY_DMABUF: ret = __qbuf_dmabuf(vb, b); @@ -2504,7 +2499,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) return -EINVAL; } + mutex_lock(&q->mmap_lock); ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); + mutex_unlock(&q->mmap_lock); if (ret) return ret; @@ -2523,6 +2520,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned long off = pgoff << PAGE_SHIFT; struct vb2_buffer *vb; unsigned int buffer, plane; + void *vaddr; int ret; if (q->memory != V4L2_MEMORY_MMAP) { @@ -2539,7 +2537,8 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, vb = q->bufs[buffer]; - return (unsigned long)vb2_plane_vaddr(vb, plane); + vaddr = vb2_plane_vaddr(vb, plane); + return vaddr ? (unsigned long)vaddr : -EINVAL; } EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); #endif @@ -2686,6 +2685,7 @@ int vb2_queue_init(struct vb2_queue *q) INIT_LIST_HEAD(&q->queued_list); INIT_LIST_HEAD(&q->done_list); spin_lock_init(&q->done_lock); + mutex_init(&q->mmap_lock); init_waitqueue_head(&q->done_wq); if (q->buf_struct_size == 0) @@ -2707,7 +2707,9 @@ void vb2_queue_release(struct vb2_queue *q) { __vb2_cleanup_fileio(q); __vb2_queue_cancel(q); + mutex_lock(&q->mmap_lock); __vb2_queue_free(q, q->num_buffers); + mutex_unlock(&q->mmap_lock); } EXPORT_SYMBOL_GPL(vb2_queue_release); @@ -2985,6 +2987,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ buf->queued = 0; buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) : vb2_plane_size(q->bufs[index], 0); + /* Compensate for data_offset on read in the multiplanar case. */ + if (is_multiplanar && read && + fileio->b.m.planes[0].data_offset < buf->size) { + buf->pos = fileio->b.m.planes[0].data_offset; + buf->size -= buf->pos; + } } else { buf = &fileio->bufs[index]; } @@ -3372,15 +3380,8 @@ EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) { struct video_device *vdev = video_devdata(file); - struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; - int err; - if (lock && mutex_lock_interruptible(lock)) - return -ERESTARTSYS; - err = vb2_mmap(vdev->queue, vma); - if (lock) - mutex_unlock(lock); - return err; + return vb2_mmap(vdev->queue, vma); } EXPORT_SYMBOL_GPL(vb2_fop_mmap); @@ -3388,14 +3389,14 @@ int _vb2_fop_release(struct file *file, struct mutex *lock) { struct video_device *vdev = video_devdata(file); + if (lock) + mutex_lock(lock); if (file->private_data == vdev->queue->owner) { - if (lock) - mutex_lock(lock); vb2_queue_release(vdev->queue); vdev->queue->owner = NULL; - if (lock) - mutex_unlock(lock); } + if (lock) + mutex_unlock(lock); return v4l2_fh_release(file); } EXPORT_SYMBOL_GPL(_vb2_fop_release); @@ -3458,27 +3459,16 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait) struct video_device *vdev = video_devdata(file); struct vb2_queue *q = vdev->queue; struct mutex *lock = q->lock ? q->lock : vdev->lock; - unsigned long req_events = poll_requested_events(wait); unsigned res; void *fileio; - bool must_lock = false; - /* Try to be smart: only lock if polling might start fileio, - otherwise locking will only introduce unwanted delays. */ - if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { - if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && - (req_events & (POLLIN | POLLRDNORM))) - must_lock = true; - else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && - (req_events & (POLLOUT | POLLWRNORM))) - must_lock = true; - } - - /* If locking is needed, but this helper doesn't know how, then you - shouldn't be using this helper but you should write your own. */ - WARN_ON(must_lock && !lock); + /* + * If this helper doesn't know how to lock, then you shouldn't be using + * it but you should write your own. + */ + WARN_ON(!lock); - if (must_lock && lock && mutex_lock_interruptible(lock)) + if (lock && mutex_lock_interruptible(lock)) return POLLERR; fileio = q->fileio; @@ -3486,9 +3476,9 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait) res = vb2_poll(vdev->queue, file, wait); /* If fileio was started, then we have a new queue owner. */ - if (must_lock && !fileio && q->fileio) + if (!fileio && q->fileio) q->owner = file->private_data; - if (must_lock && lock) + if (lock) mutex_unlock(lock); return res; } @@ -3499,15 +3489,8 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct video_device *vdev = video_devdata(file); - struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; - int ret; - if (lock && mutex_lock_interruptible(lock)) - return -ERESTARTSYS; - ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); - if (lock) - mutex_unlock(lock); - return ret; + return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); } EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); #endif diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4a02ade14b4..b481d20c837 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -155,7 +155,8 @@ static void vb2_dc_put(void *buf_priv) kfree(buf); } -static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_dc_conf *conf = alloc_ctx; struct device *dev = conf->dev; @@ -176,6 +177,7 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) /* Prevent the device from being released while the buffer is used */ buf->dev = get_device(dev); buf->size = size; + buf->dma_dir = dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dc_put; @@ -229,7 +231,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) struct vb2_dc_attachment { struct sg_table sgt; - enum dma_data_direction dir; + enum dma_data_direction dma_dir; }; static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, @@ -264,7 +266,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, wr = sg_next(wr); } - attach->dir = DMA_NONE; + attach->dma_dir = DMA_NONE; dbuf_attach->priv = attach; return 0; @@ -282,16 +284,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, sgt = &attach->sgt; /* release the scatterlist cache */ - if (attach->dir != DMA_NONE) + if (attach->dma_dir != DMA_NONE) dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, - attach->dir); + attach->dma_dir); sg_free_table(sgt); kfree(attach); db_attach->priv = NULL; } static struct sg_table *vb2_dc_dmabuf_ops_map( - struct dma_buf_attachment *db_attach, enum dma_data_direction dir) + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) { struct vb2_dc_attachment *attach = db_attach->priv; /* stealing dmabuf mutex to serialize map/unmap operations */ @@ -303,27 +305,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( sgt = &attach->sgt; /* return previously mapped sg table */ - if (attach->dir == dir) { + if (attach->dma_dir == dma_dir) { mutex_unlock(lock); return sgt; } /* release any previous cache */ - if (attach->dir != DMA_NONE) { + if (attach->dma_dir != DMA_NONE) { dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, - attach->dir); - attach->dir = DMA_NONE; + attach->dma_dir); + attach->dma_dir = DMA_NONE; } /* mapping to the client with new direction */ - ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); if (ret <= 0) { pr_err("failed to map scatterlist\n"); mutex_unlock(lock); return ERR_PTR(-EIO); } - attach->dir = dir; + attach->dma_dir = dma_dir; mutex_unlock(lock); @@ -331,7 +333,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( } static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, - struct sg_table *sgt, enum dma_data_direction dir) + struct sg_table *sgt, enum dma_data_direction dma_dir) { /* nothing to be done here */ } @@ -460,7 +462,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, } static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, - int n_pages, struct vm_area_struct *vma, int write) + int n_pages, struct vm_area_struct *vma, + enum dma_data_direction dma_dir) { if (vma_is_io(vma)) { unsigned int i; @@ -482,7 +485,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, int n; n = get_user_pages(current, current->mm, start & PAGE_MASK, - n_pages, write, 1, pages, NULL); + n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); /* negative error means that no page was pinned */ n = max(n, 0); if (n != n_pages) { @@ -508,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv) struct sg_table *sgt = buf->dma_sgt; if (sgt) { - dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + /* + * No need to sync to CPU, it's already synced to the CPU + * since the finish() memop will have been called before this. + */ + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, + buf->dma_dir, &attrs); if (!vma_is_io(buf->vma)) vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); @@ -551,7 +562,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn #endif static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dc_conf *conf = alloc_ctx; struct vb2_dc_buf *buf; @@ -565,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, struct sg_table *sgt; unsigned long contig_size; unsigned long dma_align = dma_get_cache_alignment(); + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); /* Only cache aligned DMA transfers are reliable */ if (!IS_ALIGNED(vaddr | size, dma_align)) { @@ -582,7 +596,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, return ERR_PTR(-ENOMEM); buf->dev = conf->dev; - buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + buf->dma_dir = dma_dir; start = vaddr & PAGE_MASK; offset = vaddr & ~PAGE_MASK; @@ -618,7 +632,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, } /* extract page list from userspace mapping */ - ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); + ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, + dma_dir == DMA_FROM_DEVICE); if (ret) { unsigned long pfn; if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { @@ -650,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, kfree(pages); pages = NULL; - sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir); + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, + buf->dma_dir, &attrs); if (sgt->nents <= 0) { pr_err("failed to map scatterlist\n"); ret = -EIO; @@ -673,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, return buf; fail_map_sg: - dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, + buf->dma_dir, &attrs); fail_sgt_init: if (!vma_is_io(buf->vma)) @@ -782,7 +802,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv) } static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dc_conf *conf = alloc_ctx; struct vb2_dc_buf *buf; @@ -804,7 +824,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, return dba; } - buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + buf->dma_dir = dma_dir; buf->size = size; buf->db_attach = dba; @@ -850,7 +870,8 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx); void vb2_dma_contig_cleanup_ctx(void *alloc_ctx) { - kfree(alloc_ctx); + if (!IS_ERR_OR_NULL(alloc_ctx)) + kfree(alloc_ctx); } EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx); diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 9b163a440f8..b1838abb6d0 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -30,17 +30,30 @@ module_param(debug, int, 0644); printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ } while (0) +struct vb2_dma_sg_conf { + struct device *dev; +}; + struct vb2_dma_sg_buf { + struct device *dev; void *vaddr; struct page **pages; - int write; int offset; + enum dma_data_direction dma_dir; struct sg_table sg_table; + /* + * This will point to sg_table when used with the MMAP or USERPTR + * memory model, and to the dma_buf sglist when used with the + * DMABUF memory model. + */ + struct sg_table *dma_sgt; size_t size; unsigned int num_pages; atomic_t refcount; struct vb2_vmarea_handler handler; struct vm_area_struct *vma; + + struct dma_buf_attachment *db_attach; }; static void vb2_dma_sg_put(void *buf_priv); @@ -86,22 +99,31 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, return 0; } -static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { + struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; + struct sg_table *sgt; int ret; int num_pages; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + + if (WARN_ON(alloc_ctx == NULL)) + return NULL; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; - buf->write = 0; + buf->dma_dir = dma_dir; buf->offset = 0; buf->size = size; /* size is already page aligned */ buf->num_pages = size >> PAGE_SHIFT; + buf->dma_sgt = &buf->sg_table; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); @@ -112,11 +134,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla if (ret) goto fail_pages_alloc; - ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, 0, size, GFP_KERNEL); if (ret) goto fail_table_alloc; + /* Prevent the device from being released while the buffer is used */ + buf->dev = get_device(conf->dev); + + sgt = &buf->sg_table; + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, + buf->dma_dir, &attrs) == 0) + goto fail_map; + buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; buf->handler.arg = buf; @@ -127,6 +161,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla __func__, buf->num_pages); return buf; +fail_map: + put_device(buf->dev); + sg_free_table(buf->dma_sgt); fail_table_alloc: num_pages = buf->num_pages; while (num_pages--) @@ -141,42 +178,81 @@ fail_pages_array_alloc: static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, + buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); - sg_free_table(&buf->sg_table); + sg_free_table(buf->dma_sgt); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); + put_device(buf->dev); kfree(buf); } } +static void vb2_dma_sg_prepare(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + /* DMABUF exporter will flush the cache for us */ + if (buf->db_attach) + return; + + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); +} + +static void vb2_dma_sg_finish(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + /* DMABUF exporter will flush the cache for us */ + if (buf->db_attach) + return; + + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); +} + static inline int vma_is_io(struct vm_area_struct *vma) { return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); } static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, + enum dma_data_direction dma_dir) { + struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user; struct vm_area_struct *vma; + struct sg_table *sgt; + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; - buf->write = write; + buf->dev = conf->dev; + buf->dma_dir = dma_dir; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; + buf->dma_sgt = &buf->sg_table; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; @@ -221,7 +297,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->num_pages, - write, + buf->dma_dir == DMA_FROM_DEVICE, 1, /* force */ buf->pages, NULL); @@ -229,12 +305,22 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; - if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_alloc_table_from_pages; + sgt = &buf->sg_table; + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, + buf->dma_dir, &attrs) == 0) + goto userptr_fail_map; return buf; +userptr_fail_map: + sg_free_table(&buf->sg_table); userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", @@ -257,15 +343,20 @@ userptr_fail_alloc_pages: static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); - sg_free_table(&buf->sg_table); + sg_free_table(buf->dma_sgt); while (--i >= 0) { - if (buf->write) + if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); if (!vma_is_io(buf->vma)) put_page(buf->pages[i]); @@ -281,14 +372,16 @@ static void *vb2_dma_sg_vaddr(void *buf_priv) BUG_ON(!buf); - if (!buf->vaddr) - buf->vaddr = vm_map_ram(buf->pages, - buf->num_pages, - -1, - PAGE_KERNEL); + if (!buf->vaddr) { + if (buf->db_attach) + buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); + else + buf->vaddr = vm_map_ram(buf->pages, + buf->num_pages, -1, PAGE_KERNEL); + } /* add offset in case userptr is not page-aligned */ - return buf->vaddr + buf->offset; + return buf->vaddr ? buf->vaddr + buf->offset : NULL; } static unsigned int vb2_dma_sg_num_users(void *buf_priv) @@ -335,11 +428,279 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) return 0; } +/*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_dma_sg_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_dma_sg_attachment *attach; + unsigned int i; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + struct vb2_dma_sg_buf *buf = dbuf->priv; + int ret; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + /* Copy the buf->base_sgt scatter list to the attachment, as we can't + * map the same scatter list to multiple attachments at the same time. + */ + ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); + if (ret) { + kfree(attach); + return -ENOMEM; + } + + rd = buf->dma_sgt->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + + return 0; +} + +static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_dma_sg_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_dma_sg_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_dma_sg_attachment *attach = db_attach->priv; + /* stealing dmabuf mutex to serialize map/unmap operations */ + struct mutex *lock = &db_attach->dmabuf->lock; + struct sg_table *sgt; + int ret; + + mutex_lock(lock); + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) { + mutex_unlock(lock); + return sgt; + } + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); + if (ret <= 0) { + pr_err("failed to map scatterlist\n"); + mutex_unlock(lock); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + mutex_unlock(lock); + + return sgt; +} + +static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_dma_sg_get_dmabuf */ + vb2_dma_sg_put(dbuf->priv); +} + +static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + + return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; +} + +static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + + return vb2_dma_sg_vaddr(buf); +} + +static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_dma_sg_mmap(dbuf->priv, vma); +} + +static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { + .attach = vb2_dma_sg_dmabuf_ops_attach, + .detach = vb2_dma_sg_dmabuf_ops_detach, + .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, + .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, + .kmap = vb2_dma_sg_dmabuf_ops_kmap, + .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, + .vmap = vb2_dma_sg_dmabuf_ops_vmap, + .mmap = vb2_dma_sg_dmabuf_ops_mmap, + .release = vb2_dma_sg_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct dma_buf *dbuf; + + if (WARN_ON(!buf->dma_sgt)) + return NULL; + + dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + atomic_inc(&buf->refcount); + + return dbuf; +} + +/*********************************************/ +/* callbacks for DMABUF buffers */ +/*********************************************/ + +static int vb2_dma_sg_map_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + struct sg_table *sgt; + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to pin a non attached buffer\n"); + return -EINVAL; + } + + if (WARN_ON(buf->dma_sgt)) { + pr_err("dmabuf buffer is already pinned\n"); + return 0; + } + + /* get the associated scatterlist for this buffer */ + sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); + if (IS_ERR(sgt)) { + pr_err("Error getting dmabuf scatterlist\n"); + return -EINVAL; + } + + buf->dma_sgt = sgt; + buf->vaddr = NULL; + + return 0; +} + +static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + struct sg_table *sgt = buf->dma_sgt; + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (buf->vaddr) { + dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); + buf->vaddr = NULL; + } + dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); + + buf->dma_sgt = NULL; +} + +static void vb2_dma_sg_detach_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + + /* if vb2 works correctly you should never detach mapped buffer */ + if (WARN_ON(buf->dma_sgt)) + vb2_dma_sg_unmap_dmabuf(buf); + + /* detach this attachment */ + dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); + kfree(buf); +} + +static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, + unsigned long size, enum dma_data_direction dma_dir) +{ + struct vb2_dma_sg_conf *conf = alloc_ctx; + struct vb2_dma_sg_buf *buf; + struct dma_buf_attachment *dba; + + if (dbuf->size < size) + return ERR_PTR(-EFAULT); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dev = conf->dev; + /* create attachment for the dmabuf with the user device */ + dba = dma_buf_attach(dbuf, buf->dev); + if (IS_ERR(dba)) { + pr_err("failed to attach dmabuf\n"); + kfree(buf); + return dba; + } + + buf->dma_dir = dma_dir; + buf->size = size; + buf->db_attach = dba; + + return buf; +} + static void *vb2_dma_sg_cookie(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - return &buf->sg_table; + return buf->dma_sgt; } const struct vb2_mem_ops vb2_dma_sg_memops = { @@ -347,13 +708,41 @@ const struct vb2_mem_ops vb2_dma_sg_memops = { .put = vb2_dma_sg_put, .get_userptr = vb2_dma_sg_get_userptr, .put_userptr = vb2_dma_sg_put_userptr, + .prepare = vb2_dma_sg_prepare, + .finish = vb2_dma_sg_finish, .vaddr = vb2_dma_sg_vaddr, .mmap = vb2_dma_sg_mmap, .num_users = vb2_dma_sg_num_users, + .get_dmabuf = vb2_dma_sg_get_dmabuf, + .map_dmabuf = vb2_dma_sg_map_dmabuf, + .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, + .attach_dmabuf = vb2_dma_sg_attach_dmabuf, + .detach_dmabuf = vb2_dma_sg_detach_dmabuf, .cookie = vb2_dma_sg_cookie, }; EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); +void *vb2_dma_sg_init_ctx(struct device *dev) +{ + struct vb2_dma_sg_conf *conf; + + conf = kzalloc(sizeof(*conf), GFP_KERNEL); + if (!conf) + return ERR_PTR(-ENOMEM); + + conf->dev = dev; + + return conf; +} +EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx); + +void vb2_dma_sg_cleanup_ctx(void *alloc_ctx) +{ + if (!IS_ERR_OR_NULL(alloc_ctx)) + kfree(alloc_ctx); +} +EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx); + MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); MODULE_AUTHOR("Andrzej Pietrasiewicz"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 313d9771b2b..fba944e5022 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -25,7 +25,7 @@ struct vb2_vmalloc_buf { void *vaddr; struct page **pages; struct vm_area_struct *vma; - int write; + enum dma_data_direction dma_dir; unsigned long size; unsigned int n_pages; atomic_t refcount; @@ -35,7 +35,8 @@ struct vb2_vmalloc_buf { static void vb2_vmalloc_put(void *buf_priv); -static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_vmalloc_buf *buf; @@ -45,6 +46,7 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fl buf->size = size; buf->vaddr = vmalloc_user(buf->size); + buf->dma_dir = dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; @@ -70,7 +72,8 @@ static void vb2_vmalloc_put(void *buf_priv) } static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, + enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; unsigned long first, last; @@ -82,7 +85,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, if (!buf) return NULL; - buf->write = write; + buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; @@ -107,7 +110,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, /* current->mm->mmap_sem is taken by videobuf2 core */ n_pages = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->n_pages, - write, 1, /* force */ + dma_dir == DMA_FROM_DEVICE, + 1, /* force */ buf->pages, NULL); if (n_pages != buf->n_pages) goto fail_get_user_pages; @@ -144,14 +148,13 @@ static void vb2_vmalloc_put_userptr(void *buf_priv) if (vaddr) vm_unmap_ram((void *)vaddr, buf->n_pages); for (i = 0; i < buf->n_pages; ++i) { - if (buf->write) + if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } kfree(buf->pages); } else { - if (buf->vma) - vb2_put_vma(buf->vma); + vb2_put_vma(buf->vma); iounmap(buf->vaddr); } kfree(buf); @@ -209,6 +212,176 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) } /*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_vmalloc_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_vmalloc_attachment *attach; + struct vb2_vmalloc_buf *buf = dbuf->priv; + int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; + struct sg_table *sgt; + struct scatterlist *sg; + void *vaddr = buf->vaddr; + int ret; + int i; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); + if (ret) { + kfree(attach); + return ret; + } + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + struct page *page = vmalloc_to_page(vaddr); + + if (!page) { + sg_free_table(sgt); + kfree(attach); + return -ENOMEM; + } + sg_set_page(sg, page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + return 0; +} + +static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_vmalloc_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + /* stealing dmabuf mutex to serialize map/unmap operations */ + struct mutex *lock = &db_attach->dmabuf->lock; + struct sg_table *sgt; + int ret; + + mutex_lock(lock); + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) { + mutex_unlock(lock); + return sgt; + } + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); + if (ret <= 0) { + pr_err("failed to map scatterlist\n"); + mutex_unlock(lock); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + mutex_unlock(lock); + + return sgt; +} + +static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_vmalloc_get_dmabuf */ + vb2_vmalloc_put(dbuf->priv); +} + +static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + return buf->vaddr + pgnum * PAGE_SIZE; +} + +static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + return buf->vaddr; +} + +static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_vmalloc_mmap(dbuf->priv, vma); +} + +static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { + .attach = vb2_vmalloc_dmabuf_ops_attach, + .detach = vb2_vmalloc_dmabuf_ops_detach, + .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, + .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, + .kmap = vb2_vmalloc_dmabuf_ops_kmap, + .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, + .vmap = vb2_vmalloc_dmabuf_ops_vmap, + .mmap = vb2_vmalloc_dmabuf_ops_mmap, + .release = vb2_vmalloc_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + struct dma_buf *dbuf; + + if (WARN_ON(!buf->vaddr)) + return NULL; + + dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + atomic_inc(&buf->refcount); + + return dbuf; +} + +/*********************************************/ /* callbacks for DMABUF buffers */ /*********************************************/ @@ -240,7 +413,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv) } static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; @@ -252,7 +425,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, return ERR_PTR(-ENOMEM); buf->dbuf = dbuf; - buf->write = write; + buf->dma_dir = dma_dir; buf->size = size; return buf; @@ -264,6 +437,7 @@ const struct vb2_mem_ops vb2_vmalloc_memops = { .put = vb2_vmalloc_put, .get_userptr = vb2_vmalloc_get_userptr, .put_userptr = vb2_vmalloc_put_userptr, + .get_dmabuf = vb2_vmalloc_get_dmabuf, .map_dmabuf = vb2_vmalloc_map_dmabuf, .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, .attach_dmabuf = vb2_vmalloc_attach_dmabuf, |