diff --git a/block/as-iosched.c b/block/as-iosched.c
index 4c6fafbba9330bf6d785efc7e511b93c936f998f..71f0abb219eee2556d41dcc9f1f1e01834b6d8aa 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -745,11 +745,13 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
  */
 static int as_can_anticipate(struct as_data *ad, struct request *rq)
 {
+#if 0 /* disable for now, we need to check tag level as well */
 	/*
 	 * SSD device without seek penalty, disable idling
 	 */
-	if (blk_queue_nonrot(ad->q))
+	if (blk_queue_nonrot(ad->q)) axman
 		return 0;
+#endif
 
 	if (!ad->io_context)
 		/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 03a5953bb5df0cb16eb004bf8acb9bdbf3baad22..6a062eebbd15301320e7491b5dd45d17f2204a3c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -879,9 +879,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 	unsigned long sl;
 
 	/*
-	 * SSD device without seek penalty, disable idling
+	 * SSD device without seek penalty, disable idling. But only do so
+	 * for devices that support queuing, otherwise we still have a problem
+	 * with sync vs async workloads.
 	 */
-	if (blk_queue_nonrot(cfqd->queue))
+	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
 		return;
 
 	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));