Android-x86
Fork
Donation

  • R/O
  • HTTP
  • SSH
  • HTTPS

kernel: Commit

kernel


Commit MetaInfo

Revisión80933739d5655b8a22ce23451c6415d9c7a0387a (tree)
Tiempo2020-03-17 03:13:07
AutorSwathi Sridhar <swatsrid@code...>
CommiterSuren Baghdasaryan

Log Message

ANDROID: GKI: dma-buf: Add support for XXX_cpu_access_umapped ops

Userspace clients will be able to restrict cache maintenance to only
the subset of the dma-buf which is mmap(ed) by setting the
DMA_BUF_SYNC_USER_MAPPED flag when calling the DMA_BUF_IOCTL_SYNC IOCT.

Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>

Bug: 150611569
Test: build
(cherry-picked from bbbc80b6d8b75ffea6a0eb1f53ab503ccf0011f1)
[surenb: partial cherry-pick from
bbbc80b6d8b7 ion : Merge ion changes from ...
to resolve ABI diffs caused by {begin/end}_cpu_access_umapped
dma_buf_ops.
changed dma_buf_end_cpu_access_umapped to be static.]
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: Ic2029c5218ca99330a0e7e6128e12ac29cdd1c08

Cambiar Resumen

Diferencia incremental

--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -360,12 +360,19 @@ out_unlock:
360360 return ret;
361361 }
362362
363+static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
364+ enum dma_data_direction direction);
365+
366+
367+static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
368+ enum dma_data_direction direction);
369+
363370 static long dma_buf_ioctl(struct file *file,
364371 unsigned int cmd, unsigned long arg)
365372 {
366373 struct dma_buf *dmabuf;
367374 struct dma_buf_sync sync;
368- enum dma_data_direction direction;
375+ enum dma_data_direction dir;
369376 int ret;
370377
371378 dmabuf = file->private_data;
@@ -380,22 +387,30 @@ static long dma_buf_ioctl(struct file *file,
380387
381388 switch (sync.flags & DMA_BUF_SYNC_RW) {
382389 case DMA_BUF_SYNC_READ:
383- direction = DMA_FROM_DEVICE;
390+ dir = DMA_FROM_DEVICE;
384391 break;
385392 case DMA_BUF_SYNC_WRITE:
386- direction = DMA_TO_DEVICE;
393+ dir = DMA_TO_DEVICE;
387394 break;
388395 case DMA_BUF_SYNC_RW:
389- direction = DMA_BIDIRECTIONAL;
396+ dir = DMA_BIDIRECTIONAL;
390397 break;
391398 default:
392399 return -EINVAL;
393400 }
394401
395402 if (sync.flags & DMA_BUF_SYNC_END)
396- ret = dma_buf_end_cpu_access(dmabuf, direction);
403+ if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
404+ ret = dma_buf_end_cpu_access_umapped(dmabuf,
405+ dir);
406+ else
407+ ret = dma_buf_end_cpu_access(dmabuf, dir);
397408 else
398- ret = dma_buf_begin_cpu_access(dmabuf, direction);
409+ if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
410+ ret = dma_buf_begin_cpu_access_umapped(dmabuf,
411+ dir);
412+ else
413+ ret = dma_buf_begin_cpu_access(dmabuf, dir);
399414
400415 return ret;
401416
@@ -862,7 +877,8 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
862877 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
863878 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
864879 * want (with the new data being consumed by say the GPU or the scanout
865- * device)
880+ * device). Optionally SYNC_USER_MAPPED can be set to restrict cache
881+ * maintenance to only the parts of the buffer which are mmap(ed).
866882 * - munmap once you don't need the buffer any more
867883 *
868884 * For correctness and optimal performance, it is always required to use
@@ -949,6 +965,27 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
949965 }
950966 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
951967
968+static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
969+ enum dma_data_direction direction)
970+{
971+ int ret = 0;
972+
973+ if (WARN_ON(!dmabuf))
974+ return -EINVAL;
975+
976+ if (dmabuf->ops->begin_cpu_access_umapped)
977+ ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction);
978+
979+ /* Ensure that all fences are waited upon - but we first allow
980+ * the native handler the chance to do so more efficiently if it
981+ * chooses. A double invocation here will be reasonably cheap no-op.
982+ */
983+ if (ret == 0)
984+ ret = __dma_buf_begin_cpu_access(dmabuf, direction);
985+
986+ return ret;
987+}
988+
952989 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
953990 enum dma_data_direction direction,
954991 unsigned int offset, unsigned int len)
@@ -999,6 +1036,19 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
9991036 }
10001037 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
10011038
1039+static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
1040+ enum dma_data_direction direction)
1041+{
1042+ int ret = 0;
1043+
1044+ WARN_ON(!dmabuf);
1045+
1046+ if (dmabuf->ops->end_cpu_access_umapped)
1047+ ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction);
1048+
1049+ return ret;
1050+}
1051+
10021052 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
10031053 enum dma_data_direction direction,
10041054 unsigned int offset, unsigned int len)
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -188,6 +188,33 @@ struct dma_buf_ops {
188188 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
189189
190190 /**
191+ * @begin_cpu_access_umapped:
192+ *
193+ * This is called as a result of the DMA_BUF_IOCTL_SYNC IOCTL being
194+ * called with the DMA_BUF_SYNC_START and DMA_BUF_SYNC_USER_MAPPED flags
195+ * set. It allows the exporter to ensure that the mmap(ed) portions of
196+ * the buffer are available for cpu access - the exporter might need to
197+ * allocate or swap-in and pin the backing storage.
198+ * The exporter also needs to ensure that cpu access is
199+ * coherent for the access direction. The direction can be used by the
200+ * exporter to optimize the cache flushing, i.e. access with a different
201+ * direction (read instead of write) might return stale or even bogus
202+ * data (e.g. when the exporter needs to copy the data to temporary
203+ * storage).
204+ *
205+ * This callback is optional.
206+ *
207+ * Returns:
208+ *
209+ * 0 on success or a negative error code on failure. This can for
210+ * example fail when the backing storage can't be allocated. Can also
211+ * return -ERESTARTSYS or -EINTR when the call has been interrupted and
212+ * needs to be restarted.
213+ */
214+ int (*begin_cpu_access_umapped)(struct dma_buf *dmabuf,
215+ enum dma_data_direction);
216+
217+ /**
191218 * @begin_cpu_access_partial:
192219 *
193220 * This is called from dma_buf_begin_cpu_access_partial() and allows the
@@ -242,6 +269,28 @@ struct dma_buf_ops {
242269 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
243270
244271 /**
272+ * @end_cpu_access_umapped:
273+ *
274+ * This is called as result a of the DMA_BUF_IOCTL_SYNC IOCTL being
275+ * called with the DMA_BUF_SYNC_END and DMA_BUF_SYNC_USER_MAPPED flags
276+ * set. The exporter can use to limit cache flushing to only those parts
277+ * of the buffer which are mmap(ed) and to unpin any resources pinned in
278+ * @begin_cpu_access_umapped.
279+ * The result of any dma_buf kmap calls after end_cpu_access_umapped is
280+ * undefined.
281+ *
282+ * This callback is optional.
283+ *
284+ * Returns:
285+ *
286+ * 0 on success or a negative error code on failure. Can return
287+ * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
288+ * to be restarted.
289+ */
290+ int (*end_cpu_access_umapped)(struct dma_buf *dmabuf,
291+ enum dma_data_direction);
292+
293+ /**
245294 * @end_cpu_access_partial:
246295 *
247296 * This is called from dma_buf_end_cpu_access_partial() when the
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -32,8 +32,10 @@ struct dma_buf_sync {
3232 #define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
3333 #define DMA_BUF_SYNC_START (0 << 2)
3434 #define DMA_BUF_SYNC_END (1 << 2)
35+#define DMA_BUF_SYNC_USER_MAPPED (1 << 3)
36+
3537 #define DMA_BUF_SYNC_VALID_FLAGS_MASK \
36- (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
38+ (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END | DMA_BUF_SYNC_USER_MAPPED)
3739
3840 #define DMA_BUF_NAME_LEN 32
3941
Show on old repository browser