1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

mmc: sh_mmcif: Use sg_miter for PIO

Use sg_miter iterator instead of sg_virt() and custom code
to loop over the scatterlist. The memory iterator will do
bounce buffering if the page happens to be located in high memory,
which the driver may or may not be using.

Suggested-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/linux-mmc/20240122073423.GA25859@lst.de/
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://lore.kernel.org/r/20240127-mmc-proper-kmap-v2-9-d8e732aa97d1@linaro.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
Linus Walleij 2024-01-27 01:19:56 +01:00 committed by Ulf Hansson
parent e8a167b848
commit 27b57277d9

View file

@ -227,14 +227,12 @@ struct sh_mmcif_host {
bool dying; bool dying;
long timeout; long timeout;
void __iomem *addr; void __iomem *addr;
u32 *pio_ptr;
spinlock_t lock; /* protect sh_mmcif_host::state */ spinlock_t lock; /* protect sh_mmcif_host::state */
enum sh_mmcif_state state; enum sh_mmcif_state state;
enum sh_mmcif_wait_for wait_for; enum sh_mmcif_wait_for wait_for;
struct delayed_work timeout_work; struct delayed_work timeout_work;
size_t blocksize; size_t blocksize;
int sg_idx; struct sg_mapping_iter sg_miter;
int sg_blkidx;
bool power; bool power;
bool ccs_enable; /* Command Completion Signal support */ bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable; bool clk_ctrl2_enable;
@ -600,32 +598,17 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
return ret; return ret;
} }
static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
{
struct mmc_data *data = host->mrq->data;
host->sg_blkidx += host->blocksize;
/* data->sg->length must be a multiple of host->blocksize? */
BUG_ON(host->sg_blkidx > data->sg->length);
if (host->sg_blkidx == data->sg->length) {
host->sg_blkidx = 0;
if (++host->sg_idx < data->sg_len)
host->pio_ptr = sg_virt(++data->sg);
} else {
host->pio_ptr = p;
}
return host->sg_idx != data->sg_len;
}
static void sh_mmcif_single_read(struct sh_mmcif_host *host, static void sh_mmcif_single_read(struct sh_mmcif_host *host,
struct mmc_request *mrq) struct mmc_request *mrq)
{ {
struct mmc_data *data = mrq->data;
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3; BLOCK_SIZE_MASK) + 3;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
host->wait_for = MMCIF_WAIT_FOR_READ; host->wait_for = MMCIF_WAIT_FOR_READ;
/* buf read enable */ /* buf read enable */
@ -634,20 +617,32 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
static bool sh_mmcif_read_block(struct sh_mmcif_host *host) static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{ {
struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host); struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data; struct mmc_data *data = host->mrq->data;
u32 *p = sg_virt(data->sg); u32 *p;
int i; int i;
if (host->sd_error) { if (host->sd_error) {
sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host); data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error); dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false; return false;
} }
if (!sg_miter_next(sgm)) {
/* This should not happen on single blocks */
sg_miter_stop(sgm);
return false;
}
p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++) for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
sg_miter_stop(&host->sg_miter);
/* buffer read end */ /* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
host->wait_for = MMCIF_WAIT_FOR_READ_END; host->wait_for = MMCIF_WAIT_FOR_READ_END;
@ -666,34 +661,40 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK; BLOCK_SIZE_MASK;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
host->wait_for = MMCIF_WAIT_FOR_MREAD; host->wait_for = MMCIF_WAIT_FOR_MREAD;
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
} }
static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{ {
struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host); struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data; struct mmc_data *data = host->mrq->data;
u32 *p = host->pio_ptr; u32 *p;
int i; int i;
if (host->sd_error) { if (host->sd_error) {
sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host); data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error); dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false; return false;
} }
BUG_ON(!data->sg->length); if (!sg_miter_next(sgm)) {
sg_miter_stop(sgm);
return false;
}
p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++) for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
if (!sh_mmcif_next_block(host, p)) sgm->consumed = host->blocksize;
return false;
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
@ -703,9 +704,14 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
static void sh_mmcif_single_write(struct sh_mmcif_host *host, static void sh_mmcif_single_write(struct sh_mmcif_host *host,
struct mmc_request *mrq) struct mmc_request *mrq)
{ {
struct mmc_data *data = mrq->data;
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3; BLOCK_SIZE_MASK) + 3;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
SG_MITER_ATOMIC | SG_MITER_FROM_SG);
host->wait_for = MMCIF_WAIT_FOR_WRITE; host->wait_for = MMCIF_WAIT_FOR_WRITE;
/* buf write enable */ /* buf write enable */
@ -714,20 +720,32 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
static bool sh_mmcif_write_block(struct sh_mmcif_host *host) static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{ {
struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host); struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data; struct mmc_data *data = host->mrq->data;
u32 *p = sg_virt(data->sg); u32 *p;
int i; int i;
if (host->sd_error) { if (host->sd_error) {
sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host); data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error); dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false; return false;
} }
if (!sg_miter_next(sgm)) {
/* This should not happen on single blocks */
sg_miter_stop(sgm);
return false;
}
p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++) for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
sg_miter_stop(&host->sg_miter);
/* buffer write end */ /* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
host->wait_for = MMCIF_WAIT_FOR_WRITE_END; host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
@ -746,34 +764,40 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK; BLOCK_SIZE_MASK;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
SG_MITER_ATOMIC | SG_MITER_FROM_SG);
host->wait_for = MMCIF_WAIT_FOR_MWRITE; host->wait_for = MMCIF_WAIT_FOR_MWRITE;
host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
} }
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{ {
struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host); struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data; struct mmc_data *data = host->mrq->data;
u32 *p = host->pio_ptr; u32 *p;
int i; int i;
if (host->sd_error) { if (host->sd_error) {
sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host); data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error); dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false; return false;
} }
BUG_ON(!data->sg->length); if (!sg_miter_next(sgm)) {
sg_miter_stop(sgm);
return false;
}
p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++) for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
if (!sh_mmcif_next_block(host, p)) sgm->consumed = host->blocksize;
return false;
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);