]> git.dujemihanovic.xyz Git - linux.git/commitdiff
xen/blkback: move blkif_get_x86_*_req() into blkback.c
authorJuergen Gross <jgross@suse.com>
Fri, 16 Dec 2022 13:49:18 +0000 (14:49 +0100)
committerJuergen Gross <jgross@suse.com>
Tue, 25 Apr 2023 09:09:30 +0000 (11:09 +0200)
There is no need to have the functions blkif_get_x86_32_req() and
blkif_get_x86_64_req() in a header file, as they are used in one place
only.

So move them into the using source file and drop the inline qualifier.

While at it fix some style issues, and simplify the code by variable
reusing and using min() instead of open coding it.

Instead of using barrier() use READ_ONCE() for avoiding multiple reads
of nr_segments.

Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h

index 243712b59a05973b4fb9cf5c31e54625b2098ebd..c362f4ad80ab07bfb58caff0ed7da37dc1484fc5 100644 (file)
@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
        bio_put(bio);
 }
 
+static void blkif_get_x86_32_req(struct blkif_request *dst,
+                                const struct blkif_x86_32_request *src)
+{
+       unsigned int i, n;
+
+       dst->operation = READ_ONCE(src->operation);
+
+       switch (dst->operation) {
+       case BLKIF_OP_READ:
+       case BLKIF_OP_WRITE:
+       case BLKIF_OP_WRITE_BARRIER:
+       case BLKIF_OP_FLUSH_DISKCACHE:
+               dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+               dst->u.rw.handle = src->u.rw.handle;
+               dst->u.rw.id = src->u.rw.id;
+               dst->u.rw.sector_number = src->u.rw.sector_number;
+               n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+                         dst->u.rw.nr_segments);
+               for (i = 0; i < n; i++)
+                       dst->u.rw.seg[i] = src->u.rw.seg[i];
+               break;
+
+       case BLKIF_OP_DISCARD:
+               dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
+               dst->u.discard.sector_number = src->u.discard.sector_number;
+               dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+               break;
+
+       case BLKIF_OP_INDIRECT:
+               dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+               dst->u.indirect.nr_segments =
+                       READ_ONCE(src->u.indirect.nr_segments);
+               dst->u.indirect.handle = src->u.indirect.handle;
+               dst->u.indirect.id = src->u.indirect.id;
+               dst->u.indirect.sector_number = src->u.indirect.sector_number;
+               n = min(MAX_INDIRECT_PAGES,
+                       INDIRECT_PAGES(dst->u.indirect.nr_segments));
+               for (i = 0; i < n; i++)
+                       dst->u.indirect.indirect_grefs[i] =
+                               src->u.indirect.indirect_grefs[i];
+               break;
+
+       default:
+               /*
+                * Don't know how to translate this op. Only get the
+                * ID so failure can be reported to the frontend.
+                */
+               dst->u.other.id = src->u.other.id;
+               break;
+       }
+}
 
+static void blkif_get_x86_64_req(struct blkif_request *dst,
+                                const struct blkif_x86_64_request *src)
+{
+       unsigned int i, n;
+
+       dst->operation = READ_ONCE(src->operation);
+
+       switch (dst->operation) {
+       case BLKIF_OP_READ:
+       case BLKIF_OP_WRITE:
+       case BLKIF_OP_WRITE_BARRIER:
+       case BLKIF_OP_FLUSH_DISKCACHE:
+               dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+               dst->u.rw.handle = src->u.rw.handle;
+               dst->u.rw.id = src->u.rw.id;
+               dst->u.rw.sector_number = src->u.rw.sector_number;
+               n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+                         dst->u.rw.nr_segments);
+               for (i = 0; i < n; i++)
+                       dst->u.rw.seg[i] = src->u.rw.seg[i];
+               break;
+
+       case BLKIF_OP_DISCARD:
+               dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
+               dst->u.discard.sector_number = src->u.discard.sector_number;
+               dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+               break;
+
+       case BLKIF_OP_INDIRECT:
+               dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+               dst->u.indirect.nr_segments =
+                       READ_ONCE(src->u.indirect.nr_segments);
+               dst->u.indirect.handle = src->u.indirect.handle;
+               dst->u.indirect.id = src->u.indirect.id;
+               dst->u.indirect.sector_number = src->u.indirect.sector_number;
+               n = min(MAX_INDIRECT_PAGES,
+                       INDIRECT_PAGES(dst->u.indirect.nr_segments));
+               for (i = 0; i < n; i++)
+                       dst->u.indirect.indirect_grefs[i] =
+                               src->u.indirect.indirect_grefs[i];
+               break;
+
+       default:
+               /*
+                * Don't know how to translate this op. Only get the
+                * ID so failure can be reported to the frontend.
+                */
+               dst->u.other.id = src->u.other.id;
+               break;
+       }
+}
 
 /*
  * Function to copy the from the ring buffer the 'struct blkif_request'
index fab8a8dee0da4e6dbfe2ca146fff36d284a1e9bd..40f67bfc052deb5bb1e9f180cb7b1c1043cb46e2 100644 (file)
@@ -394,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
 
-static inline void blkif_get_x86_32_req(struct blkif_request *dst,
-                                       struct blkif_x86_32_request *src)
-{
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
-       dst->operation = READ_ONCE(src->operation);
-       switch (dst->operation) {
-       case BLKIF_OP_READ:
-       case BLKIF_OP_WRITE:
-       case BLKIF_OP_WRITE_BARRIER:
-       case BLKIF_OP_FLUSH_DISKCACHE:
-               dst->u.rw.nr_segments = src->u.rw.nr_segments;
-               dst->u.rw.handle = src->u.rw.handle;
-               dst->u.rw.id = src->u.rw.id;
-               dst->u.rw.sector_number = src->u.rw.sector_number;
-               barrier();
-               if (n > dst->u.rw.nr_segments)
-                       n = dst->u.rw.nr_segments;
-               for (i = 0; i < n; i++)
-                       dst->u.rw.seg[i] = src->u.rw.seg[i];
-               break;
-       case BLKIF_OP_DISCARD:
-               dst->u.discard.flag = src->u.discard.flag;
-               dst->u.discard.id = src->u.discard.id;
-               dst->u.discard.sector_number = src->u.discard.sector_number;
-               dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
-               break;
-       case BLKIF_OP_INDIRECT:
-               dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
-               dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
-               dst->u.indirect.handle = src->u.indirect.handle;
-               dst->u.indirect.id = src->u.indirect.id;
-               dst->u.indirect.sector_number = src->u.indirect.sector_number;
-               barrier();
-               j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
-               for (i = 0; i < j; i++)
-                       dst->u.indirect.indirect_grefs[i] =
-                               src->u.indirect.indirect_grefs[i];
-               break;
-       default:
-               /*
-                * Don't know how to translate this op. Only get the
-                * ID so failure can be reported to the frontend.
-                */
-               dst->u.other.id = src->u.other.id;
-               break;
-       }
-}
-
-static inline void blkif_get_x86_64_req(struct blkif_request *dst,
-                                       struct blkif_x86_64_request *src)
-{
-       int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
-       dst->operation = READ_ONCE(src->operation);
-       switch (dst->operation) {
-       case BLKIF_OP_READ:
-       case BLKIF_OP_WRITE:
-       case BLKIF_OP_WRITE_BARRIER:
-       case BLKIF_OP_FLUSH_DISKCACHE:
-               dst->u.rw.nr_segments = src->u.rw.nr_segments;
-               dst->u.rw.handle = src->u.rw.handle;
-               dst->u.rw.id = src->u.rw.id;
-               dst->u.rw.sector_number = src->u.rw.sector_number;
-               barrier();
-               if (n > dst->u.rw.nr_segments)
-                       n = dst->u.rw.nr_segments;
-               for (i = 0; i < n; i++)
-                       dst->u.rw.seg[i] = src->u.rw.seg[i];
-               break;
-       case BLKIF_OP_DISCARD:
-               dst->u.discard.flag = src->u.discard.flag;
-               dst->u.discard.id = src->u.discard.id;
-               dst->u.discard.sector_number = src->u.discard.sector_number;
-               dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
-               break;
-       case BLKIF_OP_INDIRECT:
-               dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
-               dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
-               dst->u.indirect.handle = src->u.indirect.handle;
-               dst->u.indirect.id = src->u.indirect.id;
-               dst->u.indirect.sector_number = src->u.indirect.sector_number;
-               barrier();
-               j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
-               for (i = 0; i < j; i++)
-                       dst->u.indirect.indirect_grefs[i] =
-                               src->u.indirect.indirect_grefs[i];
-               break;
-       default:
-               /*
-                * Don't know how to translate this op. Only get the
-                * ID so failure can be reported to the frontend.
-                */
-               dst->u.other.id = src->u.other.id;
-               break;
-       }
-}
-
 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */