summaryrefslogtreecommitdiff
path: root/common/spl/spl_nand.c
diff options
context:
space:
mode:
authorTim Harvey <tharvey@gateworks.com>2021-03-02 01:33:28 +0300
committerStefano Babic <sbabic@denx.de>2021-04-08 21:29:53 +0300
commitaa0032f67267232c6b315b5f6e1c086c217c9aae (patch)
tree19ae1e070b0bb31741cc8122bf54061889ba0ca2 /common/spl/spl_nand.c
parent39cb85043cdbc98d10b49f0b86596043d5f8e3f8 (diff)
downloadu-boot-aa0032f67267232c6b315b5f6e1c086c217c9aae.tar.xz
spl: fit: nand: allow for non-page-aligned elements
Add a weak nand_get_mtd function for nand drivers to provide mtd info and use this to set pagesize such that reading of non page-aligned elements can succeed. The spl_load_simple_fit already handles block block access so all we need to do is provide the nand writesize as the block length. Further cleanup of the drivers which use nand_spl_loaders.c such as am335x_spl_bch.c, atmel_nand.c, and nand_spl_simple.c could be done using info from mtd_info instead of statically defined details. Signed-off-by: Tim Harvey <tharvey@gateworks.com> Reviewed-by: Tom Rini <trini@konsulko.com>
Diffstat (limited to 'common/spl/spl_nand.c')
-rw-r--r--common/spl/spl_nand.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/common/spl/spl_nand.c b/common/spl/spl_nand.c
index 8213836df4..59f4a84a36 100644
--- a/common/spl/spl_nand.c
+++ b/common/spl/spl_nand.c
@@ -48,17 +48,27 @@ static ulong spl_nand_fit_read(struct spl_load_info *load, ulong offs,
sector = *(int *)load->priv;
offs = sector + nand_spl_adjust_offset(sector, offs - sector);
+#else
+ offs *= load->bl_len;
+ size *= load->bl_len;
#endif
err = nand_spl_load_image(offs, size, dst);
if (err)
return 0;
- return size;
+ return size / load->bl_len;
+}
+
+struct mtd_info * __weak nand_get_mtd(void)
+{
+ return NULL;
}
static int spl_nand_load_element(struct spl_image_info *spl_image,
int offset, struct image_header *header)
{
+ struct mtd_info *mtd = nand_get_mtd();
+ int bl_len = mtd ? mtd->writesize : 1;
int err;
err = nand_spl_load_image(offset, sizeof(*header), (void *)header);
@@ -73,18 +83,18 @@ static int spl_nand_load_element(struct spl_image_info *spl_image,
load.dev = NULL;
load.priv = &offset;
load.filename = NULL;
- load.bl_len = 1;
+ load.bl_len = bl_len;
load.read = spl_nand_fit_read;
- return spl_load_simple_fit(spl_image, &load, offset, header);
+ return spl_load_simple_fit(spl_image, &load, offset / bl_len, header);
} else if (IS_ENABLED(CONFIG_SPL_LOAD_IMX_CONTAINER)) {
struct spl_load_info load;
load.dev = NULL;
load.priv = NULL;
load.filename = NULL;
- load.bl_len = 1;
+ load.bl_len = bl_len;
load.read = spl_nand_fit_read;
- return spl_load_imx_container(spl_image, &load, offset);
+ return spl_load_imx_container(spl_image, &load, offset / bl_len);
} else {
err = spl_parse_image_header(spl_image, header);
if (err)