Commit 4c75f741 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc:
  mmc: at91_mci: fix hanging and rework to match flowcharts
  mmc: at91_mci typo
  sdhci: Fix "Unexpected interrupt" handling
  mmc: fix silly copy-and-paste error
  mmc: move layer init and workqueue to core file
  mmc: refactor host class handling
  mmc: refactor bus operations
  sdhci: add ene controller id
  mmc: bounce requests for simple hosts
parents 6ed911fb ed99c541
......@@ -14,3 +14,21 @@ config MMC_BLOCK
mount the filesystem. Almost everyone wishing MMC support
should say Y or M here.
config MMC_BLOCK_BOUNCE
bool "Use bounce buffer for simple hosts"
depends on MMC_BLOCK
default y
help
SD/MMC is a high latency protocol where it is crucial to
send large requests in order to get high performance. Many
controllers, however, are restricted to continuous memory
(i.e. they can't do scatter-gather), something the kernel
rarely can provide.
Say Y here to help these restricted hosts by bouncing
requests back and forth from a large buffer. You will get
a big performance gain at the cost of up to 64 KiB of
physical memory.
If unsure, say Y here.
......@@ -262,7 +262,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
brq.data.sg = mq->sg;
brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);
brq.data.sg_len = mmc_queue_map_sg(mq);
mmc_queue_bounce_pre(mq);
if (brq.data.blocks !=
(req->nr_sectors >> (md->block_bits - 9))) {
......@@ -279,6 +281,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
mmc_wait_for_req(card->host, &brq.mrq);
mmc_queue_bounce_post(mq);
if (brq.cmd.error) {
printk(KERN_ERR "%s: error %d sending read/write command\n",
req->rq_disk->disk_name, brq.cmd.error);
......
......@@ -17,6 +17,8 @@
#include <linux/mmc/host.h>
#include "queue.h"
#define MMC_QUEUE_BOUNCESZ 65536
#define MMC_QUEUE_SUSPENDED (1 << 0)
/*
......@@ -118,6 +120,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
unsigned int bouncesz;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
......@@ -127,21 +130,61 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
if (!mq->queue)
return -ENOMEM;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->queue->queuedata = mq;
mq->req = NULL;
mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_hw_segs == 1) {
bouncesz = MMC_QUEUE_BOUNCESZ;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mq->bounce_buf) {
printk(KERN_WARNING "%s: unable to allocate "
"bounce buffer\n", mmc_card_name(card));
} else {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
mq->sg = kmalloc(sizeof(struct scatterlist),
GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto free_bounce_buf;
}
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
bouncesz / 512, GFP_KERNEL);
if (!mq->bounce_sg) {
ret = -ENOMEM;
goto free_sg;
}
}
}
#endif
if (!mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->sg = kmalloc(sizeof(struct scatterlist) *
host->max_phys_segs, GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
}
init_MUTEX(&mq->thread_sem);
......@@ -149,14 +192,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto free_sg;
goto free_bounce_sg;
}
return 0;
free_bounce_sg:
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
free_sg:
kfree(mq->sg);
mq->sg = NULL;
free_bounce_buf:
if (mq->bounce_buf)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
cleanup_queue:
blk_cleanup_queue(mq->queue);
return ret;
......@@ -178,9 +228,17 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
/* Then terminate our worker thread */
kthread_stop(mq->thread);
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
kfree(mq->sg);
mq->sg = NULL;
if (mq->bounce_buf)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
blk_cleanup_queue(mq->queue);
mq->card = NULL;
......@@ -231,3 +289,108 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
struct scatterlist *src, unsigned int src_len)
{
unsigned int chunk;
char *dst_buf, *src_buf;
unsigned int dst_size, src_size;
dst_buf = NULL;
src_buf = NULL;
dst_size = 0;
src_size = 0;
while (src_len) {
BUG_ON(dst_len == 0);
if (dst_size == 0) {
dst_buf = page_address(dst->page) + dst->offset;
dst_size = dst->length;
}
if (src_size == 0) {
src_buf = page_address(src->page) + src->offset;
src_size = src->length;
}
chunk = min(dst_size, src_size);
memcpy(dst_buf, src_buf, chunk);
dst_buf += chunk;
src_buf += chunk;
dst_size -= chunk;
src_size -= chunk;
if (dst_size == 0) {
dst++;
dst_len--;
}
if (src_size == 0) {
src++;
src_len--;
}
}
}
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
unsigned int sg_len;
if (!mq->bounce_buf)
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
BUG_ON(!mq->bounce_sg);
sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
mq->bounce_sg_len = sg_len;
/*
* Shortcut in the event we only get a single entry.
*/
if (sg_len == 1) {
memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
return 1;
}
mq->sg[0].page = virt_to_page(mq->bounce_buf);
mq->sg[0].offset = offset_in_page(mq->bounce_buf);
mq->sg[0].length = 0;
while (sg_len) {
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
sg_len--;
}
return 1;
}
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
if (!mq->bounce_buf)
return;
if (mq->bounce_sg_len == 1)
return;
if (rq_data_dir(mq->req) != WRITE)
return;
copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
}
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
if (!mq->bounce_buf)
return;
if (mq->bounce_sg_len == 1)
return;
if (rq_data_dir(mq->req) != READ)
return;
copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
}
......@@ -14,6 +14,9 @@ struct mmc_queue {
void *data;
struct request_queue *queue;
struct scatterlist *sg;
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
......@@ -21,4 +24,8 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
extern void mmc_queue_bounce_pre(struct mmc_queue *);
extern void mmc_queue_bounce_post(struct mmc_queue *);
#endif
......@@ -7,5 +7,6 @@ ifeq ($(CONFIG_MMC_DEBUG),y)
endif
obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o sysfs.o mmc.o mmc_ops.o sd.o sd_ops.o
mmc_core-y := core.o sysfs.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o
/*
* linux/drivers/mmc/core/bus.c
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMC card bus driver model
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "sysfs.h"
#include "core.h"
#include "bus.h"
#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
static ssize_t mmc_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mmc_card *card = dev_to_mmc_card(dev);
switch (card->type) {
case MMC_TYPE_MMC:
return sprintf(buf, "MMC\n");
case MMC_TYPE_SD:
return sprintf(buf, "SD\n");
default:
return -EFAULT;
}
}
static struct device_attribute mmc_dev_attrs[] = {
MMC_ATTR_RO(type),
__ATTR_NULL,
};
/*
* This currently matches any MMC driver to any MMC card - drivers
* themselves make the decision whether to drive this card in their
* probe method.
*/
static int mmc_bus_match(struct device *dev, struct device_driver *drv)
{
return 1;
}
static int
mmc_bus_uevent(struct device *dev, char **envp, int num_envp, char *buf,
int buf_size)
{
struct mmc_card *card = dev_to_mmc_card(dev);
int retval = 0, i = 0, length = 0;
#define add_env(fmt,val) do { \
retval = add_uevent_var(envp, num_envp, &i, \
buf, buf_size, &length, \
fmt, val); \
if (retval) \
return retval; \
} while (0);
switch (card->type) {
case MMC_TYPE_MMC:
add_env("MMC_TYPE=%s", "MMC");
break;
case MMC_TYPE_SD:
add_env("MMC_TYPE=%s", "SD");
break;
}
add_env("MMC_NAME=%s", mmc_card_name(card));
#undef add_env
envp[i] = NULL;
return 0;
}
static int mmc_bus_probe(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = dev_to_mmc_card(dev);
return drv->probe(card);
}
static int mmc_bus_remove(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = dev_to_mmc_card(dev);
drv->remove(card);
return 0;
}
static int mmc_bus_suspend(struct device *dev, pm_message_t state)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = dev_to_mmc_card(dev);
int ret = 0;
if (dev->driver && drv->suspend)
ret = drv->suspend(card, state);
return ret;
}
static int mmc_bus_resume(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = dev_to_mmc_card(dev);
int ret = 0;
if (dev->driver && drv->resume)
ret = drv->resume(card);
return ret;
}
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_attrs = mmc_dev_attrs,
.match = mmc_bus_match,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
.suspend = mmc_bus_suspend,
.resume = mmc_bus_resume,
};
int mmc_register_bus(void)
{
return bus_register(&mmc_bus_type);
}
void mmc_unregister_bus(void)
{
bus_unregister(&mmc_bus_type);
}
/**
* mmc_register_driver - register a media driver
* @drv: MMC media driver
*/
int mmc_register_driver(struct mmc_driver *drv)
{
drv->drv.bus = &mmc_bus_type;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL(mmc_register_driver);
/**
* mmc_unregister_driver - unregister a media driver
* @drv: MMC media driver
*/
void mmc_unregister_driver(struct mmc_driver *drv)
{
drv->drv.bus = &mmc_bus_type;
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL(mmc_unregister_driver);
static void mmc_release_card(struct device *dev)
{
struct mmc_card *card = dev_to_mmc_card(dev);
kfree(card);
}
/*
* Allocate and initialise a new MMC card structure.
*/
struct mmc_card *mmc_alloc_card(struct mmc_host *host)
{
struct mmc_card *card;
card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL);
if (!card)
return ERR_PTR(-ENOMEM);
memset(card, 0, sizeof(struct mmc_card));
card->host = host;
device_initialize(&card->dev);
card->dev.parent = mmc_classdev(host);
card->dev.bus = &mmc_bus_type;
card->dev.release = mmc_release_card;
return card;
}
/*
* Register a new MMC card with the driver model.
*/
int mmc_add_card(struct mmc_card *card)
{
int ret;
snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
"%s:%04x", mmc_hostname(card->host), card->rca);
card->dev.uevent_suppress = 1;
ret = device_add(&card->dev);
if (ret)
return ret;
if (card->host->bus_ops->sysfs_add) {
ret = card->host->bus_ops->sysfs_add(card->host, card);
if (ret) {
device_del(&card->dev);
return ret;
}
}
card->dev.uevent_suppress = 0;
kobject_uevent(&card->dev.kobj, KOBJ_ADD);
mmc_card_set_present(card);
return 0;
}
/*
* Unregister a new MMC card with the driver model, and
* (eventually) free it.
*/
void mmc_remove_card(struct mmc_card *card)
{
if (mmc_card_present(card)) {
if (card->host->bus_ops->sysfs_remove)
card->host->bus_ops->sysfs_remove(card->host, card);
device_del(&card->dev);
}
put_device(&card->dev);
}
/*
* linux/drivers/mmc/core/bus.h
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _MMC_CORE_BUS_H
#define _MMC_CORE_BUS_H
struct mmc_card *mmc_alloc_card(struct mmc_host *host);
int mmc_add_card(struct mmc_card *card);
void mmc_remove_card(struct mmc_card *card);
int mmc_register_bus(void);
void mmc_unregister_bus(void);
#endif
......@@ -27,7 +27,8 @@
#include <linux/mmc/sd.h>
#include "core.h"
#include "sysfs.h"
#include "bus.h"
#include "host.h"
#include "mmc_ops.h"
#include "sd_ops.h"
......@@ -35,6 +36,25 @@
extern int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
extern int mmc_attach_sd(struct mmc_host *host, u32 ocr);
static struct workqueue_struct *workqueue;
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
/*
* Internal function. Flush all scheduled work from the MMC work queue.
*/
static void mmc_flush_scheduled_work(void)
{
flush_workqueue(workqueue);
}
/**