|
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
|
|
|
struct nand_device *nand = spinand_to_nand(spinand);
|
|
struct nand_device *nand = spinand_to_nand(spinand);
|
|
|
struct mtd_info *mtd = nanddev_to_mtd(nand);
|
|
struct mtd_info *mtd = nanddev_to_mtd(nand);
|
|
|
struct nand_page_io_req adjreq = *req;
|
|
struct nand_page_io_req adjreq = *req;
|
|
|
- unsigned int nbytes = 0;
|
|
|
|
|
- void *buf = NULL;
|
|
|
|
|
|
|
+ void *buf = spinand->databuf;
|
|
|
|
|
+ unsigned int nbytes;
|
|
|
u16 column = 0;
|
|
u16 column = 0;
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
- memset(spinand->databuf, 0xff,
|
|
|
|
|
- nanddev_page_size(nand) +
|
|
|
|
|
- nanddev_per_page_oobsize(nand));
|
|
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
|
|
|
|
|
+ * the cache content to 0xFF (depends on vendor implementation), so we
|
|
|
|
|
+ * must fill the page cache entirely even if we only want to program
|
|
|
|
|
+ * the data portion of the page, otherwise we might corrupt the BBM or
|
|
|
|
|
+ * user data previously programmed in OOB area.
|
|
|
|
|
+ */
|
|
|
|
|
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
|
|
|
|
|
+ memset(spinand->databuf, 0xff, nbytes);
|
|
|
|
|
+ adjreq.dataoffs = 0;
|
|
|
|
|
+ adjreq.datalen = nanddev_page_size(nand);
|
|
|
|
|
+ adjreq.databuf.out = spinand->databuf;
|
|
|
|
|
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
|
|
|
|
|
+ adjreq.ooboffs = 0;
|
|
|
|
|
+ adjreq.oobbuf.out = spinand->oobbuf;
|
|
|
|
|
|
|
|
- if (req->datalen) {
|
|
|
|
|
|
|
+ if (req->datalen)
|
|
|
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
|
|
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
|
|
|
req->datalen);
|
|
req->datalen);
|
|
|
- adjreq.dataoffs = 0;
|
|
|
|
|
- adjreq.datalen = nanddev_page_size(nand);
|
|
|
|
|
- adjreq.databuf.out = spinand->databuf;
|
|
|
|
|
- nbytes = adjreq.datalen;
|
|
|
|
|
- buf = spinand->databuf;
|
|
|
|
|
- }
|
|
|
|
|
|
|
|
|
|
if (req->ooblen) {
|
|
if (req->ooblen) {
|
|
|
if (req->mode == MTD_OPS_AUTO_OOB)
|
|
if (req->mode == MTD_OPS_AUTO_OOB)
|
|
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
|
|
|
else
|
|
else
|
|
|
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
|
|
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
|
|
|
req->ooblen);
|
|
req->ooblen);
|
|
|
-
|
|
|
|
|
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
|
|
|
|
|
- adjreq.ooboffs = 0;
|
|
|
|
|
- nbytes += nanddev_per_page_oobsize(nand);
|
|
|
|
|
- if (!buf) {
|
|
|
|
|
- buf = spinand->oobbuf;
|
|
|
|
|
- column = nanddev_page_size(nand);
|
|
|
|
|
- }
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
|
|
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
|
|
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
|
* We need to use the RANDOM LOAD CACHE operation if there's
|
|
* We need to use the RANDOM LOAD CACHE operation if there's
|
|
|
- * more than one iteration, because the LOAD operation resets
|
|
|
|
|
- * the cache to 0xff.
|
|
|
|
|
|
|
+ * more than one iteration, because the LOAD operation might
|
|
|
|
|
+ * reset the cache to 0xff.
|
|
|
*/
|
|
*/
|
|
|
if (nbytes) {
|
|
if (nbytes) {
|
|
|
column = op.addr.val;
|
|
column = op.addr.val;
|