diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 28ef76bd523046e39cef036e50377fd143f19a25..f20336bc59c85849c5158000aad2e5e72918b857 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -704,9 +704,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
 	struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
 	int max_segments = mdev->ldev->dc.max_bio_bvecs;
 
-	if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
-		max_seg_s = PAGE_SIZE;
-
 	max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
 
 	blk_queue_max_hw_sectors(q, max_seg_s >> 9);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 388a3e8bb0d0f9baa1ea7240cb0e08b33f5de8ee..a04ec01ab3ce0f1b11b7b1dc05e75971577e6399 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3011,7 +3011,11 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
 			ldsc = 1;
 		}
 
-		max_seg_s = be32_to_cpu(p->max_segment_size);
+		if (mdev->agreed_pro_version < 94)
+			max_seg_s = be32_to_cpu(p->max_segment_size);
+		else /* drbd 8.3.8 onwards */
+			max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+
 		if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
 			drbd_setup_queue_param(mdev, max_seg_s);
 
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index d8d9bbfca3b829463b8bc6d23a4a604d494e492d..343e0e6dd532c57035dee819d9b7832983eae821 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1110,7 +1110,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
 	} else if (limit && get_ldev(mdev)) {
 		struct request_queue * const b =
 			mdev->ldev->backing_bdev->bd_disk->queue;
-		if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) {
+		if (b->merge_bvec_fn) {
 			backing_limit = b->merge_bvec_fn(b, bvm, bvec);
 			limit = min(limit, backing_limit);
 		}