[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 04/27] drbd: remove assign_p_sizes_qlim



Fold each branch into its only caller.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 drivers/block/drbd/drbd_main.c | 50 ++++++++++++++++------------------
 1 file changed, 23 insertions(+), 27 deletions(-)

diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9676a1d214bc5..74b1b2424efff 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -903,31 +903,6 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device 
*peer_device)
        }
 }
 
-/* communicated if (agreed_features & DRBD_FF_WSAME) */
-static void
-assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
-                                       struct request_queue *q)
-{
-       if (q) {
-               p->qlim->physical_block_size = 
cpu_to_be32(queue_physical_block_size(q));
-               p->qlim->logical_block_size = 
cpu_to_be32(queue_logical_block_size(q));
-               p->qlim->alignment_offset = 
cpu_to_be32(queue_alignment_offset(q));
-               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
-               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
-               p->qlim->discard_enabled = blk_queue_discard(q);
-               p->qlim->write_same_capable = 0;
-       } else {
-               q = device->rq_queue;
-               p->qlim->physical_block_size = 
cpu_to_be32(queue_physical_block_size(q));
-               p->qlim->logical_block_size = 
cpu_to_be32(queue_logical_block_size(q));
-               p->qlim->alignment_offset = 0;
-               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
-               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
-               p->qlim->discard_enabled = 0;
-               p->qlim->write_same_capable = 0;
-       }
-}
-
 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, 
enum dds_flags flags)
 {
        struct drbd_device *device = peer_device->device;
@@ -957,14 +932,35 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, 
int trigger_reply, enu
                q_order_type = drbd_queue_order_type(device);
                max_bio_size = queue_max_hw_sectors(q) << 9;
                max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
-               assign_p_sizes_qlim(device, p, q);
+               p->qlim->physical_block_size =
+                       cpu_to_be32(queue_physical_block_size(q));
+               p->qlim->logical_block_size =
+                       cpu_to_be32(queue_logical_block_size(q));
+               p->qlim->alignment_offset =
+                       cpu_to_be32(queue_alignment_offset(q));
+               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+               p->qlim->discard_enabled = blk_queue_discard(q);
+               p->qlim->write_same_capable =
+                       !!q->limits.max_write_same_sectors;
                put_ldev(device);
        } else {
+               struct request_queue *q = device->rq_queue;
+
+               p->qlim->physical_block_size =
+                       cpu_to_be32(queue_physical_block_size(q));
+               p->qlim->logical_block_size =
+                       cpu_to_be32(queue_logical_block_size(q));
+               p->qlim->alignment_offset = 0;
+               p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+               p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+               p->qlim->discard_enabled = 0;
+               p->qlim->write_same_capable = 0;
+
                d_size = 0;
                u_size = 0;
                q_order_type = QUEUE_ORDERED_NONE;
                max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per 
peer_request */
-               assign_p_sizes_qlim(device, p, NULL);
        }
 
        if (peer_device->connection->agreed_pro_version <= 94)
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.