diff options
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r-- | drivers/media/v4l2-core/v4l2-common.c | 125 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 10 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-ctrls.c | 87 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-dev.c | 34 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-ioctl.c | 21 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf-core.c | 6 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf-dma-contig.c | 9 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 49 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 71 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 425 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-vmalloc.c | 194 |
11 files changed, 758 insertions, 273 deletions
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 2e9d81f4c1a..5b808500e7e 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -80,36 +80,6 @@ MODULE_LICENSE("GPL"); /* Helper functions for control handling */ -/* Check for correctness of the ctrl's value based on the data from - struct v4l2_queryctrl and the available menu items. Note that - menu_items may be NULL, in that case it is ignored. */ -int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, - const char * const *menu_items) -{ - if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED) - return -EINVAL; - if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED) - return -EBUSY; - if (qctrl->type == V4L2_CTRL_TYPE_STRING) - return 0; - if (qctrl->type == V4L2_CTRL_TYPE_BUTTON || - qctrl->type == V4L2_CTRL_TYPE_INTEGER64 || - qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) - return 0; - if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum) - return -ERANGE; - if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) { - if (menu_items[ctrl->value] == NULL || - menu_items[ctrl->value][0] == '\0') - return -EINVAL; - } - if (qctrl->type == V4L2_CTRL_TYPE_BITMASK && - (ctrl->value & ~qctrl->maximum)) - return -ERANGE; - return 0; -} -EXPORT_SYMBOL(v4l2_ctrl_check); - /* Fill in a struct v4l2_queryctrl */ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) { @@ -135,101 +105,6 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _ } EXPORT_SYMBOL(v4l2_ctrl_query_fill); -/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and - the menu. The qctrl pointer may be NULL, in which case it is ignored. - If menu_items is NULL, then the menu items are retrieved using - v4l2_ctrl_get_menu. */ -int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl, - const char * const *menu_items) -{ - int i; - - qmenu->reserved = 0; - if (menu_items == NULL) - menu_items = v4l2_ctrl_get_menu(qmenu->id); - if (menu_items == NULL || - (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum))) - return -EINVAL; - for (i = 0; i < qmenu->index && menu_items[i]; i++) ; - if (menu_items[i] == NULL || menu_items[i][0] == '\0') - return -EINVAL; - strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name)); - return 0; -} -EXPORT_SYMBOL(v4l2_ctrl_query_menu); - -/* Fill in a struct v4l2_querymenu based on the specified array of valid - menu items (terminated by V4L2_CTRL_MENU_IDS_END). - Use this if there are 'holes' in the list of valid menu items. */ -int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids) -{ - const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id); - - qmenu->reserved = 0; - if (menu_items == NULL || ids == NULL) - return -EINVAL; - while (*ids != V4L2_CTRL_MENU_IDS_END) { - if (*ids++ == qmenu->index) { - strlcpy(qmenu->name, menu_items[qmenu->index], - sizeof(qmenu->name)); - return 0; - } - } - return -EINVAL; -} -EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items); - -/* ctrl_classes points to an array of u32 pointers, the last element is - a NULL pointer. Each u32 array is a 0-terminated array of control IDs. - Each array must be sorted low to high and belong to the same control - class. The array of u32 pointers must also be sorted, from low class IDs - to high class IDs. - - This function returns the first ID that follows after the given ID. - When no more controls are available 0 is returned. */ -u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id) -{ - u32 ctrl_class = V4L2_CTRL_ID2CLASS(id); - const u32 *pctrl; - - if (ctrl_classes == NULL) - return 0; - - /* if no query is desired, then check if the ID is part of ctrl_classes */ - if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) { - /* find class */ - while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class) - ctrl_classes++; - if (*ctrl_classes == NULL) - return 0; - pctrl = *ctrl_classes; - /* find control ID */ - while (*pctrl && *pctrl != id) pctrl++; - return *pctrl ? id : 0; - } - id &= V4L2_CTRL_ID_MASK; - id++; /* select next control */ - /* find first class that matches (or is greater than) the class of - the ID */ - while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class) - ctrl_classes++; - /* no more classes */ - if (*ctrl_classes == NULL) - return 0; - pctrl = *ctrl_classes; - /* find first ctrl within the class that is >= ID */ - while (*pctrl && *pctrl < id) pctrl++; - if (*pctrl) - return *pctrl; - /* we are at the end of the controls of the current class. */ - /* continue with next class if available */ - ctrl_classes++; - if (*ctrl_classes == NULL) - return 0; - return **ctrl_classes; -} -EXPORT_SYMBOL(v4l2_ctrl_next); - /* I2C Helper functions */ #if IS_ENABLED(CONFIG_I2C) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index e502a5fb299..af635430524 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -222,6 +222,9 @@ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { + if (put_user(kp->type, &up->type)) + return -EFAULT; + switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -248,8 +251,7 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) || - put_user(kp->type, &up->type)) + if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) return -EFAULT; return __put_v4l2_format32(kp, up); } @@ -257,8 +259,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt))) - return -EFAULT; + copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; return __put_v4l2_format32(&kp->format, &up->format); } diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 86012140923..45c5b471060 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1658,10 +1658,8 @@ static int check_range(enum v4l2_ctrl_type type, } /* Validate a new control */ -static int validate_new(const struct v4l2_ctrl *ctrl, - struct v4l2_ext_control *c) +static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) { - union v4l2_ctrl_ptr ptr; unsigned idx; int err = 0; @@ -1674,19 +1672,14 @@ static int validate_new(const struct v4l2_ctrl *ctrl, case V4L2_CTRL_TYPE_BOOLEAN: case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: - ptr.p_s32 = &c->value; - return ctrl->type_ops->validate(ctrl, 0, ptr); - case V4L2_CTRL_TYPE_INTEGER64: - ptr.p_s64 = &c->value64; - return ctrl->type_ops->validate(ctrl, 0, ptr); + return ctrl->type_ops->validate(ctrl, 0, p_new); default: break; } } - ptr.p = c->ptr; - for (idx = 0; !err && idx < c->size / ctrl->elem_size; idx++) - err = ctrl->type_ops->validate(ctrl, idx, ptr); + for (idx = 0; !err && idx < ctrl->elems; idx++) + err = ctrl->type_ops->validate(ctrl, idx, p_new); return err; } @@ -3012,6 +3005,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs, cs->error_idx = cs->count; for (i = 0; i < cs->count; i++) { struct v4l2_ctrl *ctrl = helpers[i].ctrl; + union v4l2_ctrl_ptr p_new; cs->error_idx = i; @@ -3025,7 +3019,17 @@ static int validate_ctrls(struct v4l2_ext_controls *cs, best-effort to avoid that. */ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) return -EBUSY; - ret = validate_new(ctrl, &cs->controls[i]); + /* + * Skip validation for now if the payload needs to be copied + * from userspace into kernelspace. We'll validate those later. + */ + if (ctrl->is_ptr) + continue; + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + p_new.p_s64 = &cs->controls[i].value64; + else + p_new.p_s32 = &cs->controls[i].value; + ret = validate_new(ctrl, p_new); if (ret) return ret; } @@ -3120,7 +3124,11 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, /* Copy the new caller-supplied control values. user_to_new() sets 'is_new' to 1. */ do { - ret = user_to_new(cs->controls + idx, helpers[idx].ctrl); + struct v4l2_ctrl *ctrl = helpers[idx].ctrl; + + ret = user_to_new(cs->controls + idx, ctrl); + if (!ret && ctrl->is_ptr) + ret = validate_new(ctrl, ctrl->p_new); idx = helpers[idx].next; } while (!ret && idx); @@ -3170,10 +3178,10 @@ int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls); /* Helper function for VIDIOC_S_CTRL compatibility */ -static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, - struct v4l2_ext_control *c, u32 ch_flags) +static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) { struct v4l2_ctrl *master = ctrl->cluster[0]; + int ret; int i; /* Reset the 'is_new' flags of the cluster */ @@ -3181,8 +3189,9 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, if (master->cluster[i]) master->cluster[i]->is_new = 0; - if (c) - user_to_new(c, ctrl); + ret = validate_new(ctrl, ctrl->p_new); + if (ret) + return ret; /* For autoclusters with volatiles that are switched from auto to manual mode we have to update the current volatile values since @@ -3199,15 +3208,14 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) { - int ret = validate_new(ctrl, c); + int ret; - if (!ret) { - v4l2_ctrl_lock(ctrl); - ret = set_ctrl(fh, ctrl, c, 0); - if (!ret) - cur_to_user(c, ctrl); - v4l2_ctrl_unlock(ctrl); - } + v4l2_ctrl_lock(ctrl); + user_to_new(c, ctrl); + ret = set_ctrl(fh, ctrl, 0); + if (!ret) + cur_to_user(c, ctrl); + v4l2_ctrl_unlock(ctrl); return ret; } @@ -3215,7 +3223,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); - struct v4l2_ext_control c; + struct v4l2_ext_control c = { control->id }; int ret; if (ctrl == NULL || !ctrl->is_int) @@ -3244,7 +3252,7 @@ int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) /* It's a driver bug if this happens. */ WARN_ON(!ctrl->is_int); ctrl->val = val; - return set_ctrl(NULL, ctrl, NULL, 0); + return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); @@ -3255,7 +3263,7 @@ int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) /* It's a driver bug if this happens. */ WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64); *ctrl->p_new.p_s64 = val; - return set_ctrl(NULL, ctrl, NULL, 0); + return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); @@ -3266,7 +3274,7 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) /* It's a driver bug if this happens. */ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING); strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); - return set_ctrl(NULL, ctrl, NULL, 0); + return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); @@ -3289,8 +3297,8 @@ EXPORT_SYMBOL(v4l2_ctrl_notify); int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def) { + bool changed; int ret; - struct v4l2_ext_control c; lockdep_assert_held(ctrl->handler->lock); @@ -3317,11 +3325,20 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, ctrl->maximum = max; ctrl->step = step; ctrl->default_value = def; - c.value = *ctrl->p_cur.p_s32; - if (validate_new(ctrl, &c)) - c.value = def; - if (c.value != *ctrl->p_cur.p_s32) - ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE); + cur_to_new(ctrl); + if (validate_new(ctrl, ctrl->p_new)) { + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + *ctrl->p_new.p_s64 = def; + else + *ctrl->p_new.p_s32 = def; + } + + if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) + changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; + else + changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; + if (changed) + ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); else send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); return ret; diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index 33617c365ac..9aa530a8bea 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -194,7 +194,7 @@ static void v4l2_device_release(struct device *cd) mutex_unlock(&videodev_lock); #if defined(CONFIG_MEDIA_CONTROLLER) - if (v4l2_dev && v4l2_dev->mdev && + if (v4l2_dev->mdev && vdev->vfl_type != VFL_TYPE_SUBDEV) media_device_unregister_entity(&vdev->entity); #endif @@ -207,7 +207,7 @@ static void v4l2_device_release(struct device *cd) * TODO: In the long run all drivers that use v4l2_device should use the * v4l2_device release callback. This check will then be unnecessary. */ - if (v4l2_dev && v4l2_dev->release == NULL) + if (v4l2_dev->release == NULL) v4l2_dev = NULL; /* Release video_device and perform other @@ -360,27 +360,22 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) * hack but it will have to do for those drivers that are not * yet converted to use unlocked_ioctl. * - * There are two options: if the driver implements struct - * v4l2_device, then the lock defined there is used to - * serialize the ioctls. Otherwise the v4l2 core lock defined - * below is used. This lock is really bad since it serializes - * completely independent devices. + * All drivers implement struct v4l2_device, so we use the + * lock defined there to serialize the ioctls. * - * Both variants suffer from the same problem: if the driver - * sleeps, then it blocks all ioctls since the lock is still - * held. This is very common for VIDIOC_DQBUF since that - * normally waits for a frame to arrive. As a result any other - * ioctl calls will proceed very, very slowly since each call - * will have to wait for the VIDIOC_QBUF to finish. Things that - * should take 0.01s may now take 10-20 seconds. + * However, if the driver sleeps, then it blocks all ioctls + * since the lock is still held. This is very common for + * VIDIOC_DQBUF since that normally waits for a frame to arrive. + * As a result any other ioctl calls will proceed very, very + * slowly since each call will have to wait for the VIDIOC_QBUF + * to finish. Things that should take 0.01s may now take 10-20 + * seconds. * * The workaround is to *not* take the lock for VIDIOC_DQBUF. * This actually works OK for videobuf-based drivers, since * videobuf will take its own internal lock. */ - static DEFINE_MUTEX(v4l2_ioctl_mutex); - struct mutex *m = vdev->v4l2_dev ? - &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex; + struct mutex *m = &vdev->v4l2_dev->ioctl_lock; if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m)) return -ERESTARTSYS; @@ -938,12 +933,11 @@ int __video_register_device(struct video_device *vdev, int type, int nr, name_base, nr, video_device_node_name(vdev)); /* Increase v4l2_device refcount */ - if (vdev->v4l2_dev) - v4l2_device_get(vdev->v4l2_dev); + v4l2_device_get(vdev->v4l2_dev); #if defined(CONFIG_MEDIA_CONTROLLER) /* Part 5: Register the entity. */ - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + if (vdev->v4l2_dev->mdev && vdev->vfl_type != VFL_TYPE_SUBDEV) { vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; vdev->entity.name = vdev->name; diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 9ccb19a435e..faac2f4e0f3 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -257,7 +257,7 @@ static void v4l_print_format(const void *arg, bool write_only) pr_cont(", width=%u, height=%u, " "pixelformat=%c%c%c%c, field=%s, " "bytesperline=%u, sizeimage=%u, colorspace=%d, " - "flags %u\n", + "flags %x, ycbcr_enc=%u, quantization=%u\n", pix->width, pix->height, (pix->pixelformat & 0xff), (pix->pixelformat >> 8) & 0xff, @@ -265,21 +265,24 @@ static void v4l_print_format(const void *arg, bool write_only) (pix->pixelformat >> 24) & 0xff, prt_names(pix->field, v4l2_field_names), pix->bytesperline, pix->sizeimage, - pix->colorspace, pix->flags); + pix->colorspace, pix->flags, pix->ycbcr_enc, + pix->quantization); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: mp = &p->fmt.pix_mp; pr_cont(", width=%u, height=%u, " "format=%c%c%c%c, field=%s, " - "colorspace=%d, num_planes=%u\n", + "colorspace=%d, num_planes=%u, flags=%x, " + "ycbcr_enc=%u, quantization=%u\n", mp->width, mp->height, (mp->pixelformat & 0xff), (mp->pixelformat >> 8) & 0xff, (mp->pixelformat >> 16) & 0xff, (mp->pixelformat >> 24) & 0xff, prt_names(mp->field, v4l2_field_names), - mp->colorspace, mp->num_planes); + mp->colorspace, mp->num_planes, mp->flags, + mp->ycbcr_enc, mp->quantization); for (i = 0; i < mp->num_planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, @@ -1014,6 +1017,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops, ret = ops->vidioc_querycap(file, fh, cap); cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT; + /* + * Drivers MUST fill in device_caps, so check for this and + * warn if it was forgotten. + */ + WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) || + !cap->device_caps); cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; return ret; @@ -1040,7 +1049,7 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, if (ops->vidioc_g_priority) return ops->vidioc_g_priority(file, fh, arg); vfd = video_devdata(file); - *p = v4l2_prio_max(&vfd->v4l2_dev->prio); + *p = v4l2_prio_max(vfd->prio); return 0; } @@ -1055,7 +1064,7 @@ static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, return ops->vidioc_s_priority(file, fh, *p); vfd = video_devdata(file); vfh = file->private_data; - return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p); + return v4l2_prio_change(vfd->prio, &vfh->prio, *p); } static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index b91a266d0b7..926836d1813 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -51,6 +51,8 @@ MODULE_LICENSE("GPL"); #define CALL(q, f, arg...) \ ((q->int_ops->f) ? q->int_ops->f(arg) : 0) +#define CALLPTR(q, f, arg...) \ + ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) { @@ -831,7 +833,7 @@ static int __videobuf_copy_to_user(struct videobuf_queue *q, char __user *data, size_t count, int nonblocking) { - void *vaddr = CALL(q, vaddr, buf); + void *vaddr = CALLPTR(q, vaddr, buf); /* copy to userspace */ if (count > buf->size - q->read_off) @@ -848,7 +850,7 @@ static int __videobuf_copy_stream(struct videobuf_queue *q, char __user *data, size_t count, size_t pos, int vbihack, int nonblocking) { - unsigned int *fc = CALL(q, vaddr, buf); + unsigned int *fc = CALLPTR(q, vaddr, buf); if (vbihack) { /* dirty, undocumented hack -- pass the frame counter diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index bf80f0f7dfb..e02353e340d 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -305,6 +305,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, /* Try to remap memory */ size = vma->vm_end - vma->vm_start; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* the "vm_pgoff" is just used in v4l2 to find the + * corresponding buffer data structure which is allocated + * earlier and it does not mean the offset from the physical + * buffer start address as usual. So set it to 0 to pass + * the sanity check in vm_iomap_memory(). + */ + vma->vm_pgoff = 0; + retval = vm_iomap_memory(vma, mem->dma_handle, size); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index f2e43de3dd8..d09a8916e94 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -189,6 +189,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q); static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; + enum dma_data_direction dma_dir = + V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; void *mem_priv; int plane; @@ -200,7 +202,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], - size, q->gfp_flags); + size, dma_dir, q->gfp_flags); if (IS_ERR_OR_NULL(mem_priv)) goto free; @@ -1358,7 +1360,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) void *mem_priv; unsigned int plane; int ret; - int write = !V4L2_TYPE_IS_OUTPUT(q->type); + enum dma_data_direction dma_dir = + V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1400,7 +1403,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) /* Acquire each plane's memory */ mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], planes[plane].m.userptr, - planes[plane].length, write); + planes[plane].length, dma_dir); if (IS_ERR_OR_NULL(mem_priv)) { dprintk(1, "failed acquiring userspace " "memory for plane %d\n", plane); @@ -1461,7 +1464,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) void *mem_priv; unsigned int plane; int ret; - int write = !V4L2_TYPE_IS_OUTPUT(q->type); + enum dma_data_direction dma_dir = + V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1509,7 +1513,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) /* Acquire each plane's memory */ mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], - dbuf, planes[plane].length, write); + dbuf, planes[plane].length, dma_dir); if (IS_ERR(mem_priv)) { dprintk(1, "failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); @@ -3385,14 +3389,14 @@ int _vb2_fop_release(struct file *file, struct mutex *lock) { struct video_device *vdev = video_devdata(file); + if (lock) + mutex_lock(lock); if (file->private_data == vdev->queue->owner) { - if (lock) - mutex_lock(lock); vb2_queue_release(vdev->queue); vdev->queue->owner = NULL; - if (lock) - mutex_unlock(lock); } + if (lock) + mutex_unlock(lock); return v4l2_fh_release(file); } EXPORT_SYMBOL_GPL(_vb2_fop_release); @@ -3455,27 +3459,16 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait) struct video_device *vdev = video_devdata(file); struct vb2_queue *q = vdev->queue; struct mutex *lock = q->lock ? q->lock : vdev->lock; - unsigned long req_events = poll_requested_events(wait); unsigned res; void *fileio; - bool must_lock = false; - - /* Try to be smart: only lock if polling might start fileio, - otherwise locking will only introduce unwanted delays. */ - if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { - if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && - (req_events & (POLLIN | POLLRDNORM))) - must_lock = true; - else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && - (req_events & (POLLOUT | POLLWRNORM))) - must_lock = true; - } - /* If locking is needed, but this helper doesn't know how, then you - shouldn't be using this helper but you should write your own. */ - WARN_ON(must_lock && !lock); + /* + * If this helper doesn't know how to lock, then you shouldn't be using + * it but you should write your own. + */ + WARN_ON(!lock); - if (must_lock && lock && mutex_lock_interruptible(lock)) + if (lock && mutex_lock_interruptible(lock)) return POLLERR; fileio = q->fileio; @@ -3483,9 +3476,9 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait) res = vb2_poll(vdev->queue, file, wait); /* If fileio was started, then we have a new queue owner. */ - if (must_lock && !fileio && q->fileio) + if (!fileio && q->fileio) q->owner = file->private_data; - if (must_lock && lock) + if (lock) mutex_unlock(lock); return res; } diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4a02ade14b4..b481d20c837 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -155,7 +155,8 @@ static void vb2_dc_put(void *buf_priv) kfree(buf); } -static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_dc_conf *conf = alloc_ctx; struct device *dev = conf->dev; @@ -176,6 +177,7 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) /* Prevent the device from being released while the buffer is used */ buf->dev = get_device(dev); buf->size = size; + buf->dma_dir = dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dc_put; @@ -229,7 +231,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) struct vb2_dc_attachment { struct sg_table sgt; - enum dma_data_direction dir; + enum dma_data_direction dma_dir; }; static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, @@ -264,7 +266,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, wr = sg_next(wr); } - attach->dir = DMA_NONE; + attach->dma_dir = DMA_NONE; dbuf_attach->priv = attach; return 0; @@ -282,16 +284,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, sgt = &attach->sgt; /* release the scatterlist cache */ - if (attach->dir != DMA_NONE) + if (attach->dma_dir != DMA_NONE) dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, - attach->dir); + attach->dma_dir); sg_free_table(sgt); kfree(attach); db_attach->priv = NULL; } static struct sg_table *vb2_dc_dmabuf_ops_map( - struct dma_buf_attachment *db_attach, enum dma_data_direction dir) + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) { struct vb2_dc_attachment *attach = db_attach->priv; /* stealing dmabuf mutex to serialize map/unmap operations */ @@ -303,27 +305,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( sgt = &attach->sgt; /* return previously mapped sg table */ - if (attach->dir == dir) { + if (attach->dma_dir == dma_dir) { mutex_unlock(lock); return sgt; } /* release any previous cache */ - if (attach->dir != DMA_NONE) { + if (attach->dma_dir != DMA_NONE) { dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, - attach->dir); - attach->dir = DMA_NONE; + attach->dma_dir); + attach->dma_dir = DMA_NONE; } /* mapping to the client with new direction */ - ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); if (ret <= 0) { pr_err("failed to map scatterlist\n"); mutex_unlock(lock); return ERR_PTR(-EIO); } - attach->dir = dir; + attach->dma_dir = dma_dir; mutex_unlock(lock); @@ -331,7 +333,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( } static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, - struct sg_table *sgt, enum dma_data_direction dir) + struct sg_table *sgt, enum dma_data_direction dma_dir) { /* nothing to be done here */ } @@ -460,7 +462,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, } static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, - int n_pages, struct vm_area_struct *vma, int write) + int n_pages, struct vm_area_struct *vma, + enum dma_data_direction dma_dir) { if (vma_is_io(vma)) { unsigned int i; @@ -482,7 +485,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, int n; n = get_user_pages(current, current->mm, start & PAGE_MASK, - n_pages, write, 1, pages, NULL); + n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); /* negative error means that no page was pinned */ n = max(n, 0); if (n != n_pages) { @@ -508,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv) struct sg_table *sgt = buf->dma_sgt; if (sgt) { - dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + /* + * No need to sync to CPU, it's already synced to the CPU + * since the finish() memop will have been called before this. + */ + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, + buf->dma_dir, &attrs); if (!vma_is_io(buf->vma)) vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); @@ -551,7 +562,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn #endif static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dc_conf *conf = alloc_ctx; struct vb2_dc_buf *buf; @@ -565,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, struct sg_table *sgt; unsigned long contig_size; unsigned long dma_align = dma_get_cache_alignment(); + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); /* Only cache aligned DMA transfers are reliable */ if (!IS_ALIGNED(vaddr | size, dma_align)) { @@ -582,7 +596,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, return ERR_PTR(-ENOMEM); buf->dev = conf->dev; - buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + buf->dma_dir = dma_dir; start = vaddr & PAGE_MASK; offset = vaddr & ~PAGE_MASK; @@ -618,7 +632,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, } /* extract page list from userspace mapping */ - ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); + ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, + dma_dir == DMA_FROM_DEVICE); if (ret) { unsigned long pfn; if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { @@ -650,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, kfree(pages); pages = NULL; - sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir); + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, + buf->dma_dir, &attrs); if (sgt->nents <= 0) { pr_err("failed to map scatterlist\n"); ret = -EIO; @@ -673,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, return buf; fail_map_sg: - dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, + buf->dma_dir, &attrs); fail_sgt_init: if (!vma_is_io(buf->vma)) @@ -782,7 +802,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv) } static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_dc_conf *conf = alloc_ctx; struct vb2_dc_buf *buf; @@ -804,7 +824,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, return dba; } - buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + buf->dma_dir = dma_dir; buf->size = size; buf->db_attach = dba; @@ -850,7 +870,8 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx); void vb2_dma_contig_cleanup_ctx(void *alloc_ctx) { - kfree(alloc_ctx); + if (!IS_ERR_OR_NULL(alloc_ctx)) + kfree(alloc_ctx); } EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx); diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 9b163a440f8..b1838abb6d0 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -30,17 +30,30 @@ module_param(debug, int, 0644); printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ } while (0) +struct vb2_dma_sg_conf { + struct device *dev; +}; + struct vb2_dma_sg_buf { + struct device *dev; void *vaddr; struct page **pages; - int write; int offset; + enum dma_data_direction dma_dir; struct sg_table sg_table; + /* + * This will point to sg_table when used with the MMAP or USERPTR + * memory model, and to the dma_buf sglist when used with the + * DMABUF memory model. + */ + struct sg_table *dma_sgt; size_t size; unsigned int num_pages; atomic_t refcount; struct vb2_vmarea_handler handler; struct vm_area_struct *vma; + + struct dma_buf_attachment *db_attach; }; static void vb2_dma_sg_put(void *buf_priv); @@ -86,22 +99,31 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, return 0; } -static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { + struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; + struct sg_table *sgt; int ret; int num_pages; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + + if (WARN_ON(alloc_ctx == NULL)) + return NULL; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; - buf->write = 0; + buf->dma_dir = dma_dir; buf->offset = 0; buf->size = size; /* size is already page aligned */ buf->num_pages = size >> PAGE_SHIFT; + buf->dma_sgt = &buf->sg_table; buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); @@ -112,11 +134,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla if (ret) goto fail_pages_alloc; - ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, 0, size, GFP_KERNEL); if (ret) goto fail_table_alloc; + /* Prevent the device from being released while the buffer is used */ + buf->dev = get_device(conf->dev); + + sgt = &buf->sg_table; + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, + buf->dma_dir, &attrs) == 0) + goto fail_map; + buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; buf->handler.arg = buf; @@ -127,6 +161,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla __func__, buf->num_pages); return buf; +fail_map: + put_device(buf->dev); + sg_free_table(buf->dma_sgt); fail_table_alloc: num_pages = buf->num_pages; while (num_pages--) @@ -141,42 +178,81 @@ fail_pages_array_alloc: static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, + buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); - sg_free_table(&buf->sg_table); + sg_free_table(buf->dma_sgt); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); + put_device(buf->dev); kfree(buf); } } +static void vb2_dma_sg_prepare(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + /* DMABUF exporter will flush the cache for us */ + if (buf->db_attach) + return; + + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); +} + +static void vb2_dma_sg_finish(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + /* DMABUF exporter will flush the cache for us */ + if (buf->db_attach) + return; + + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); +} + static inline int vma_is_io(struct vm_area_struct *vma) { return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); } static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, + enum dma_data_direction dma_dir) { + struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user; struct vm_area_struct *vma; + struct sg_table *sgt; + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; - buf->write = write; + buf->dev = conf->dev; + buf->dma_dir = dma_dir; buf->offset = vaddr & ~PAGE_MASK; buf->size = size; + buf->dma_sgt = &buf->sg_table; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; @@ -221,7 +297,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->num_pages, - write, + buf->dma_dir == DMA_FROM_DEVICE, 1, /* force */ buf->pages, NULL); @@ -229,12 +305,22 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, if (num_pages_from_user != buf->num_pages) goto userptr_fail_get_user_pages; - if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, + if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, buf->offset, size, 0)) goto userptr_fail_alloc_table_from_pages; + sgt = &buf->sg_table; + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, + buf->dma_dir, &attrs) == 0) + goto userptr_fail_map; return buf; +userptr_fail_map: + sg_free_table(&buf->sg_table); userptr_fail_alloc_table_from_pages: userptr_fail_get_user_pages: dprintk(1, "get_user_pages requested/got: %d/%d]\n", @@ -257,15 +343,20 @@ userptr_fail_alloc_pages: static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); + dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); - sg_free_table(&buf->sg_table); + sg_free_table(buf->dma_sgt); while (--i >= 0) { - if (buf->write) + if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); if (!vma_is_io(buf->vma)) put_page(buf->pages[i]); @@ -281,14 +372,16 @@ static void *vb2_dma_sg_vaddr(void *buf_priv) BUG_ON(!buf); - if (!buf->vaddr) - buf->vaddr = vm_map_ram(buf->pages, - buf->num_pages, - -1, - PAGE_KERNEL); + if (!buf->vaddr) { + if (buf->db_attach) + buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); + else + buf->vaddr = vm_map_ram(buf->pages, + buf->num_pages, -1, PAGE_KERNEL); + } /* add offset in case userptr is not page-aligned */ - return buf->vaddr + buf->offset; + return buf->vaddr ? buf->vaddr + buf->offset : NULL; } static unsigned int vb2_dma_sg_num_users(void *buf_priv) @@ -335,11 +428,279 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) return 0; } +/*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_dma_sg_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_dma_sg_attachment *attach; + unsigned int i; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + struct vb2_dma_sg_buf *buf = dbuf->priv; + int ret; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + /* Copy the buf->base_sgt scatter list to the attachment, as we can't + * map the same scatter list to multiple attachments at the same time. + */ + ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); + if (ret) { + kfree(attach); + return -ENOMEM; + } + + rd = buf->dma_sgt->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + + return 0; +} + +static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_dma_sg_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_dma_sg_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_dma_sg_attachment *attach = db_attach->priv; + /* stealing dmabuf mutex to serialize map/unmap operations */ + struct mutex *lock = &db_attach->dmabuf->lock; + struct sg_table *sgt; + int ret; + + mutex_lock(lock); + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) { + mutex_unlock(lock); + return sgt; + } + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); + if (ret <= 0) { + pr_err("failed to map scatterlist\n"); + mutex_unlock(lock); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + mutex_unlock(lock); + + return sgt; +} + +static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_dma_sg_get_dmabuf */ + vb2_dma_sg_put(dbuf->priv); +} + +static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + + return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; +} + +static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + + return vb2_dma_sg_vaddr(buf); +} + +static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_dma_sg_mmap(dbuf->priv, vma); +} + +static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { + .attach = vb2_dma_sg_dmabuf_ops_attach, + .detach = vb2_dma_sg_dmabuf_ops_detach, + .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, + .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, + .kmap = vb2_dma_sg_dmabuf_ops_kmap, + .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, + .vmap = vb2_dma_sg_dmabuf_ops_vmap, + .mmap = vb2_dma_sg_dmabuf_ops_mmap, + .release = vb2_dma_sg_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct dma_buf *dbuf; + + if (WARN_ON(!buf->dma_sgt)) + return NULL; + + dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + atomic_inc(&buf->refcount); + + return dbuf; +} + +/*********************************************/ +/* callbacks for DMABUF buffers */ +/*********************************************/ + +static int vb2_dma_sg_map_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + struct sg_table *sgt; + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to pin a non attached buffer\n"); + return -EINVAL; + } + + if (WARN_ON(buf->dma_sgt)) { + pr_err("dmabuf buffer is already pinned\n"); + return 0; + } + + /* get the associated scatterlist for this buffer */ + sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); + if (IS_ERR(sgt)) { + pr_err("Error getting dmabuf scatterlist\n"); + return -EINVAL; + } + + buf->dma_sgt = sgt; + buf->vaddr = NULL; + + return 0; +} + +static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + struct sg_table *sgt = buf->dma_sgt; + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (buf->vaddr) { + dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); + buf->vaddr = NULL; + } + dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); + + buf->dma_sgt = NULL; +} + +static void vb2_dma_sg_detach_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + + /* if vb2 works correctly you should never detach mapped buffer */ + if (WARN_ON(buf->dma_sgt)) + vb2_dma_sg_unmap_dmabuf(buf); + + /* detach this attachment */ + dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); + kfree(buf); +} + +static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, + unsigned long size, enum dma_data_direction dma_dir) +{ + struct vb2_dma_sg_conf *conf = alloc_ctx; + struct vb2_dma_sg_buf *buf; + struct dma_buf_attachment *dba; + + if (dbuf->size < size) + return ERR_PTR(-EFAULT); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dev = conf->dev; + /* create attachment for the dmabuf with the user device */ + dba = dma_buf_attach(dbuf, buf->dev); + if (IS_ERR(dba)) { + pr_err("failed to attach dmabuf\n"); + kfree(buf); + return dba; + } + + buf->dma_dir = dma_dir; + buf->size = size; + buf->db_attach = dba; + + return buf; +} + static void *vb2_dma_sg_cookie(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - return &buf->sg_table; + return buf->dma_sgt; } const struct vb2_mem_ops vb2_dma_sg_memops = { @@ -347,13 +708,41 @@ const struct vb2_mem_ops vb2_dma_sg_memops = { .put = vb2_dma_sg_put, .get_userptr = vb2_dma_sg_get_userptr, .put_userptr = vb2_dma_sg_put_userptr, + .prepare = vb2_dma_sg_prepare, + .finish = vb2_dma_sg_finish, .vaddr = vb2_dma_sg_vaddr, .mmap = vb2_dma_sg_mmap, .num_users = vb2_dma_sg_num_users, + .get_dmabuf = vb2_dma_sg_get_dmabuf, + .map_dmabuf = vb2_dma_sg_map_dmabuf, + .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, + .attach_dmabuf = vb2_dma_sg_attach_dmabuf, + .detach_dmabuf = vb2_dma_sg_detach_dmabuf, .cookie = vb2_dma_sg_cookie, }; EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); +void *vb2_dma_sg_init_ctx(struct device *dev) +{ + struct vb2_dma_sg_conf *conf; + + conf = kzalloc(sizeof(*conf), GFP_KERNEL); + if (!conf) + return ERR_PTR(-ENOMEM); + + conf->dev = dev; + + return conf; +} +EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx); + +void vb2_dma_sg_cleanup_ctx(void *alloc_ctx) +{ + if (!IS_ERR_OR_NULL(alloc_ctx)) + kfree(alloc_ctx); +} +EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx); + MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); MODULE_AUTHOR("Andrzej Pietrasiewicz"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 313d9771b2b..fba944e5022 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -25,7 +25,7 @@ struct vb2_vmalloc_buf { void *vaddr; struct page **pages; struct vm_area_struct *vma; - int write; + enum dma_data_direction dma_dir; unsigned long size; unsigned int n_pages; atomic_t refcount; @@ -35,7 +35,8 @@ struct vb2_vmalloc_buf { static void vb2_vmalloc_put(void *buf_priv); -static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_vmalloc_buf *buf; @@ -45,6 +46,7 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fl buf->size = size; buf->vaddr = vmalloc_user(buf->size); + buf->dma_dir = dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; @@ -70,7 +72,8 @@ static void vb2_vmalloc_put(void *buf_priv) } static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, + enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; unsigned long first, last; @@ -82,7 +85,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, if (!buf) return NULL; - buf->write = write; + buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; @@ -107,7 +110,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, /* current->mm->mmap_sem is taken by videobuf2 core */ n_pages = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->n_pages, - write, 1, /* force */ + dma_dir == DMA_FROM_DEVICE, + 1, /* force */ buf->pages, NULL); if (n_pages != buf->n_pages) goto fail_get_user_pages; @@ -144,14 +148,13 @@ static void vb2_vmalloc_put_userptr(void *buf_priv) if (vaddr) vm_unmap_ram((void *)vaddr, buf->n_pages); for (i = 0; i < buf->n_pages; ++i) { - if (buf->write) + if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } kfree(buf->pages); } else { - if (buf->vma) - vb2_put_vma(buf->vma); + vb2_put_vma(buf->vma); iounmap(buf->vaddr); } kfree(buf); @@ -209,6 +212,176 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) } /*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_vmalloc_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_vmalloc_attachment *attach; + struct vb2_vmalloc_buf *buf = dbuf->priv; + int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; + struct sg_table *sgt; + struct scatterlist *sg; + void *vaddr = buf->vaddr; + int ret; + int i; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); + if (ret) { + kfree(attach); + return ret; + } + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + struct page *page = vmalloc_to_page(vaddr); + + if (!page) { + sg_free_table(sgt); + kfree(attach); + return -ENOMEM; + } + sg_set_page(sg, page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + return 0; +} + +static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_vmalloc_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + /* stealing dmabuf mutex to serialize map/unmap operations */ + struct mutex *lock = &db_attach->dmabuf->lock; + struct sg_table *sgt; + int ret; + + mutex_lock(lock); + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) { + mutex_unlock(lock); + return sgt; + } + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); + if (ret <= 0) { + pr_err("failed to map scatterlist\n"); + mutex_unlock(lock); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + mutex_unlock(lock); + + return sgt; +} + +static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_vmalloc_get_dmabuf */ + vb2_vmalloc_put(dbuf->priv); +} + +static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + return buf->vaddr + pgnum * PAGE_SIZE; +} + +static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + return buf->vaddr; +} + +static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_vmalloc_mmap(dbuf->priv, vma); +} + +static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { + .attach = vb2_vmalloc_dmabuf_ops_attach, + .detach = vb2_vmalloc_dmabuf_ops_detach, + .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, + .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, + .kmap = vb2_vmalloc_dmabuf_ops_kmap, + .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, + .vmap = vb2_vmalloc_dmabuf_ops_vmap, + .mmap = vb2_vmalloc_dmabuf_ops_mmap, + .release = vb2_vmalloc_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + struct dma_buf *dbuf; + + if (WARN_ON(!buf->vaddr)) + return NULL; + + dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + atomic_inc(&buf->refcount); + + return dbuf; +} + +/*********************************************/ /* callbacks for DMABUF buffers */ /*********************************************/ @@ -240,7 +413,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv) } static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; @@ -252,7 +425,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, return ERR_PTR(-ENOMEM); buf->dbuf = dbuf; - buf->write = write; + buf->dma_dir = dma_dir; buf->size = size; return buf; @@ -264,6 +437,7 @@ const struct vb2_mem_ops vb2_vmalloc_memops = { .put = vb2_vmalloc_put, .get_userptr = vb2_vmalloc_get_userptr, .put_userptr = vb2_vmalloc_put_userptr, + .get_dmabuf = vb2_vmalloc_get_dmabuf, .map_dmabuf = vb2_vmalloc_map_dmabuf, .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, .attach_dmabuf = vb2_vmalloc_attach_dmabuf, |