diff --git a/tools/u-boot-tools/Makefile b/tools/u-boot-tools/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..081383d7a790fde91699b532d962cc6ee9849f8f
--- /dev/null
+++ b/tools/u-boot-tools/Makefile
@@ -0,0 +1,280 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2000-2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+# Enable all the config-independent tools
+ifneq ($(HOST_TOOLS_ALL),)
+CONFIG_KIRKWOOD = y
+CONFIG_LCD_LOGO = y
+CONFIG_CMD_LOADS = y
+CONFIG_CMD_NET = y
+CONFIG_XWAY_SWAP_BYTES = y
+CONFIG_NETCONSOLE = y
+CONFIG_SHA1_CHECK_UB_IMG = y
+CONFIG_ARCH_SUNXI = y
+endif
+
+subdir-$(HOST_TOOLS_ALL) += easylogo
+subdir-$(HOST_TOOLS_ALL) += gdb
+
+# Merge all the different vars for envcrc into one
+ENVCRC-$(CONFIG_ENV_IS_EMBEDDED) = y
+ENVCRC-$(CONFIG_ENV_IS_IN_EEPROM) = y
+ENVCRC-$(CONFIG_ENV_IS_IN_FLASH) = y
+ENVCRC-$(CONFIG_ENV_IS_IN_ONENAND) = y
+ENVCRC-$(CONFIG_ENV_IS_IN_NAND) = y
+ENVCRC-$(CONFIG_ENV_IS_IN_NVRAM) = y
+ENVCRC-$(CONFIG_ENV_IS_IN_SPI_FLASH) = y
+CONFIG_BUILD_ENVCRC ?= $(ENVCRC-y)
+
+hostprogs-$(CONFIG_SPL_GENERATE_ATMEL_PMECC_HEADER) += atmel_pmecc_params
+
+hostprogs-$(CONFIG_LCD_LOGO) += bmp_logo
+hostprogs-$(CONFIG_VIDEO_LOGO) += bmp_logo
+HOSTCFLAGS_bmp_logo.o := -pedantic
+
+hostprogs-$(CONFIG_BUILD_ENVCRC) += envcrc
+envcrc-objs := envcrc.o lib/crc32.o env/embedded.o lib/sha1.o
+
+hostprogs-$(CONFIG_CMD_NET) += gen_eth_addr
+HOSTCFLAGS_gen_eth_addr.o := -pedantic
+
+hostprogs-$(CONFIG_CMD_NET) += gen_ethaddr_crc
+gen_ethaddr_crc-objs := gen_ethaddr_crc.o lib/crc8.o
+HOSTCFLAGS_gen_ethaddr_crc.o := -pedantic
+
+hostprogs-$(CONFIG_CMD_LOADS) += img2srec
+HOSTCFLAGS_img2srec.o := -pedantic
+
+hostprogs-$(CONFIG_XWAY_SWAP_BYTES) += xway-swap-bytes
+HOSTCFLAGS_xway-swap-bytes.o := -pedantic
+
+hostprogs-y += mkenvimage
+mkenvimage-objs := mkenvimage.o os_support.o lib/crc32.o
+
+hostprogs-y += dumpimage mkimage
+hostprogs-$(CONFIG_FIT_SIGNATURE) += fit_info fit_check_sign
+
+hostprogs-$(CONFIG_CMD_BOOTEFI_SELFTEST) += file2include
+
+FIT_SIG_OBJS-$(CONFIG_FIT_SIGNATURE) := common/image-sig.o
+
+# The following files are synced with upstream DTC.
+# Use synced versions from scripts/dtc/libfdt/.
+LIBFDT_SRCS_SYNCED := fdt.c fdt_wip.c fdt_sw.c fdt_rw.c \
+		fdt_strerror.c fdt_empty_tree.c fdt_addresses.c fdt_overlay.c
+# The following files are locally modified for U-Boot (unfotunately).
+# Use U-Boot own versions from lib/libfdt/.
+LIBFDT_SRCS_UNSYNCED := fdt_ro.c fdt_region.c
+
+LIBFDT_OBJS := $(addprefix libfdt/, $(patsubst %.c, %.o, $(LIBFDT_SRCS_SYNCED))) \
+	       $(addprefix lib/libfdt/, $(patsubst %.c, %.o, $(LIBFDT_SRCS_UNSYNCED)))
+
+RSA_OBJS-$(CONFIG_FIT_SIGNATURE) := $(addprefix lib/rsa/, \
+					rsa-sign.o rsa-verify.o rsa-checksum.o \
+					rsa-mod-exp.o)
+
+ROCKCHIP_OBS = lib/rc4.o rkcommon.o rkimage.o rksd.o rkspi.o
+
+# common objs for dumpimage and mkimage
+dumpimage-mkimage-objs := aisimage.o \
+			atmelimage.o \
+			$(FIT_SIG_OBJS-y) \
+			common/bootm.o \
+			lib/crc32.o \
+			default_image.o \
+			lib/fdtdec_common.o \
+			lib/fdtdec.o \
+			fit_common.o \
+			fit_image.o \
+			common/image-fit.o \
+			image-host.o \
+			common/image.o \
+			imagetool.o \
+			imximage.o \
+			imx8image.o \
+			imx8mimage.o \
+			kwbimage.o \
+			lib/md5.o \
+			lpc32xximage.o \
+			mxsimage.o \
+			omapimage.o \
+			os_support.o \
+			pblimage.o \
+			pbl_crc32.o \
+			vybridimage.o \
+			stm32image.o \
+			$(ROCKCHIP_OBS) \
+			socfpgaimage.o \
+			lib/crc16.o \
+			lib/sha1.o \
+			lib/sha256.o \
+			common/hash.o \
+			ublimage.o \
+			zynqimage.o \
+			zynqmpimage.o \
+			zynqmpbif.o \
+			$(LIBFDT_OBJS) \
+			gpimage.o \
+			gpimage-common.o \
+			mtk_image.o \
+			$(RSA_OBJS-y)
+
+dumpimage-objs := $(dumpimage-mkimage-objs) dumpimage.o
+mkimage-objs   := $(dumpimage-mkimage-objs) mkimage.o
+fit_info-objs   := $(dumpimage-mkimage-objs) fit_info.o
+fit_check_sign-objs   := $(dumpimage-mkimage-objs) fit_check_sign.o
+file2include-objs := file2include.o
+
+ifneq ($(CONFIG_MX23)$(CONFIG_MX28)$(CONFIG_FIT_SIGNATURE),)
+# Add CONFIG_MXS into host CFLAGS, so we can check whether or not register
+# the mxsimage support within tools/mxsimage.c .
+HOSTCFLAGS_mxsimage.o += -DCONFIG_MXS
+endif
+
+ifdef CONFIG_FIT_SIGNATURE
+# This affects include/image.h, but including the board config file
+# is tricky, so manually define this options here.
+HOST_EXTRACFLAGS	+= -DCONFIG_FIT_SIGNATURE
+HOST_EXTRACFLAGS	+= -DCONFIG_FIT_SIGNATURE_MAX_SIZE=$(CONFIG_FIT_SIGNATURE_MAX_SIZE)
+endif
+
+ifdef CONFIG_SYS_U_BOOT_OFFS
+HOSTCFLAGS_kwbimage.o += -DCONFIG_SYS_U_BOOT_OFFS=$(CONFIG_SYS_U_BOOT_OFFS)
+endif
+
+ifneq ($(CONFIG_ARMADA_38X)$(CONFIG_ARMADA_39X),)
+HOSTCFLAGS_kwbimage.o += -DCONFIG_KWB_SECURE
+endif
+
+# MXSImage needs LibSSL
+ifneq ($(CONFIG_MX23)$(CONFIG_MX28)$(CONFIG_ARMADA_38X)$(CONFIG_ARMADA_39X)$(CONFIG_FIT_SIGNATURE),)
+HOSTLOADLIBES_mkimage += \
+	$(shell pkg-config --libs libssl libcrypto 2> /dev/null || echo "-lssl -lcrypto")
+
+# OS X deprecate openssl in favour of CommonCrypto, supress deprecation
+# warnings on those systems
+ifeq ($(HOSTOS),darwin)
+HOSTCFLAGS_mxsimage.o += -Wno-deprecated-declarations
+HOSTCFLAGS_image-sig.o += -Wno-deprecated-declarations
+HOSTCFLAGS_rsa-sign.o += -Wno-deprecated-declarations
+endif
+endif
+
+HOSTCFLAGS_fit_image.o += -DMKIMAGE_DTC=\"$(CONFIG_MKIMAGE_DTC_PATH)\"
+
+HOSTLOADLIBES_dumpimage := $(HOSTLOADLIBES_mkimage)
+HOSTLOADLIBES_fit_info := $(HOSTLOADLIBES_mkimage)
+HOSTLOADLIBES_fit_check_sign := $(HOSTLOADLIBES_mkimage)
+
+hostprogs-$(CONFIG_EXYNOS5250) += mkexynosspl
+hostprogs-$(CONFIG_EXYNOS5420) += mkexynosspl
+HOSTCFLAGS_mkexynosspl.o := -pedantic
+
+ifdtool-objs := $(LIBFDT_OBJS) ifdtool.o
+hostprogs-$(CONFIG_X86) += ifdtool
+
+hostprogs-$(CONFIG_MX23) += mxsboot
+hostprogs-$(CONFIG_MX28) += mxsboot
+HOSTCFLAGS_mxsboot.o := -pedantic
+
+hostprogs-$(CONFIG_ARCH_SUNXI) += mksunxiboot
+hostprogs-$(CONFIG_ARCH_SUNXI) += sunxi-spl-image-builder
+sunxi-spl-image-builder-objs := sunxi-spl-image-builder.o lib/bch.o
+
+hostprogs-$(CONFIG_NETCONSOLE) += ncb
+hostprogs-$(CONFIG_SHA1_CHECK_UB_IMG) += ubsha1
+
+ubsha1-objs := os_support.o ubsha1.o lib/sha1.o
+
+HOSTCFLAGS_ubsha1.o := -pedantic
+
+hostprogs-$(CONFIG_KIRKWOOD) += kwboot
+hostprogs-$(CONFIG_ARCH_MVEBU) += kwboot
+hostprogs-y += proftool
+hostprogs-$(CONFIG_STATIC_RELA) += relocate-rela
+hostprogs-$(CONFIG_RISCV) += prelink-riscv
+
+hostprogs-y += fdtgrep
+fdtgrep-objs += $(LIBFDT_OBJS) fdtgrep.o
+
+hostprogs-$(CONFIG_MIPS) += mips-relocs
+
+# We build some files with extra pedantic flags to try to minimize things
+# that won't build on some weird host compiler -- though there are lots of
+# exceptions for files that aren't complaint.
+HOSTCFLAGS_crc32.o := -pedantic
+HOSTCFLAGS_crc8.o := -pedantic
+HOSTCFLAGS_md5.o := -pedantic
+HOSTCFLAGS_sha1.o := -pedantic
+HOSTCFLAGS_sha256.o := -pedantic
+
+quiet_cmd_wrap = WRAP    $@
+cmd_wrap = echo "\#include <../$(patsubst $(obj)/%,%,$@)>" >$@
+
+$(obj)/lib/%.c $(obj)/common/%.c $(obj)/env/%.c:
+	$(call cmd,wrap)
+
+clean-dirs := lib common
+
+always := $(hostprogs-y)
+
+# Generated LCD/video logo
+LOGO_H = $(objtree)/include/bmp_logo.h
+LOGO_DATA_H = $(objtree)/include/bmp_logo_data.h
+LOGO-$(CONFIG_LCD_LOGO) += $(LOGO_H)
+LOGO-$(CONFIG_LCD_LOGO) += $(LOGO_DATA_H)
+LOGO-$(CONFIG_VIDEO_LOGO) += $(LOGO_H)
+LOGO-$(CONFIG_VIDEO_LOGO) += $(LOGO_DATA_H)
+
+# Generic logo
+ifeq ($(LOGO_BMP),)
+LOGO_BMP= $(srctree)/$(src)/logos/denx.bmp
+
+# Use board logo and fallback to vendor
+ifneq ($(wildcard $(srctree)/$(src)/logos/$(BOARD).bmp),)
+LOGO_BMP= $(srctree)/$(src)/logos/$(BOARD).bmp
+else
+ifneq ($(wildcard $(srctree)/$(src)/logos/$(VENDOR).bmp),)
+LOGO_BMP= $(srctree)/$(src)/logos/$(VENDOR).bmp
+endif
+endif
+
+endif # !LOGO_BMP
+
+#
+# Use native tools and options
+# Define __KERNEL_STRICT_NAMES to prevent typedef overlaps
+# Define _GNU_SOURCE to obtain the getline prototype from stdio.h
+#
+HOST_EXTRACFLAGS += -include $(srctree)/include/compiler.h \
+		$(patsubst -I%,-idirafter%, $(filter -I%, $(UBOOTINCLUDE))) \
+		-I$(srctree)/scripts/dtc/libfdt \
+		-I$(srctree)/tools \
+		-DUSE_HOSTCC \
+		-D__KERNEL_STRICT_NAMES \
+		-D_GNU_SOURCE
+
+__build:	$(LOGO-y)
+
+$(LOGO_H):	$(obj)/bmp_logo $(LOGO_BMP)
+	$(obj)/bmp_logo --gen-info $(LOGO_BMP) > $@
+
+$(LOGO_DATA_H):	$(obj)/bmp_logo $(LOGO_BMP)
+	$(obj)/bmp_logo --gen-data $(LOGO_BMP) > $@
+
+# Let clean descend into subdirs
+subdir- += env
+
+ifneq ($(CROSS_BUILD_TOOLS),)
+override HOSTCC = $(CC)
+
+quiet_cmd_crosstools_strip = STRIP   $^
+      cmd_crosstools_strip = $(STRIP) $^; touch $@
+$(obj)/.strip: $(call objectify,$(filter $(always),$(hostprogs-y)))
+	$(call cmd,crosstools_strip)
+
+always += .strip
+endif
+clean-files += .strip
diff --git a/tools/u-boot-tools/aisimage.c b/tools/u-boot-tools/aisimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..4cd76ab843a22aaca4ad7579ed92ea2fa6cff2bb
--- /dev/null
+++ b/tools/u-boot-tools/aisimage.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2011
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ */
+
+#include "imagetool.h"
+#include "aisimage.h"
+#include <image.h>
+
+#define IS_FNC_EXEC(c)	(cmd_table[c].AIS_cmd == AIS_CMD_FNLOAD)
+#define WORD_ALIGN0	4
+#define WORD_ALIGN(len) (((len)+WORD_ALIGN0-1) & ~(WORD_ALIGN0-1))
+#define MAX_CMD_BUFFER	4096
+
+static uint32_t ais_img_size;
+
+/*
+ * Supported commands for configuration file
+ */
+static table_entry_t aisimage_cmds[] = {
+	{CMD_DATA,		"DATA",		"Reg Write Data"},
+	{CMD_FILL,		"FILL",		"Fill range with pattern"},
+	{CMD_CRCON,		"CRCON",	"CRC Enable"},
+	{CMD_CRCOFF,		"CRCOFF",	"CRC Disable"},
+	{CMD_CRCCHECK,		"CRCCHECK",	"CRC Validate"},
+	{CMD_JMPCLOSE,		"JMPCLOSE",	"Jump & Close"},
+	{CMD_JMP,		"JMP",		"Jump"},
+	{CMD_SEQREAD,		"SEQREAD",	"Sequential read"},
+	{CMD_PLL0,		"PLL0",		"PLL0"},
+	{CMD_PLL1,		"PLL1",		"PLL1"},
+	{CMD_CLK,		"CLK",		"Clock configuration"},
+	{CMD_DDR2,		"DDR2",		"DDR2 Configuration"},
+	{CMD_EMIFA,		"EMIFA",	"EMIFA"},
+	{CMD_EMIFA_ASYNC,	"EMIFA_ASYNC",	"EMIFA Async"},
+	{CMD_PLL,		"PLL",		"PLL & Clock configuration"},
+	{CMD_PSC,		"PSC",		"PSC setup"},
+	{CMD_PINMUX,		"PINMUX",	"Pinmux setup"},
+	{CMD_BOOTTABLE,		"BOOT_TABLE",	"Boot table command"},
+	{-1,			"",		""},
+};
+
+static struct ais_func_exec {
+	uint32_t index;
+	uint32_t argcnt;
+} ais_func_table[] = {
+	[CMD_PLL0] = {0, 2},
+	[CMD_PLL1] = {1, 2},
+	[CMD_CLK] = {2, 1},
+	[CMD_DDR2] = {3, 8},
+	[CMD_EMIFA] = {4, 5},
+	[CMD_EMIFA_ASYNC] = {5, 5},
+	[CMD_PLL] = {6, 3},
+	[CMD_PSC] = {7, 1},
+	[CMD_PINMUX] = {8, 3}
+};
+
+static struct cmd_table_t {
+	uint32_t nargs;
+	uint32_t AIS_cmd;
+} cmd_table[] = {
+	[CMD_FILL]	 =	{	4,	AIS_CMD_FILL},
+	[CMD_CRCON]	=	{	0,	AIS_CMD_ENCRC},
+	[CMD_CRCOFF]	=	{	0,	AIS_CMD_DISCRC},
+	[CMD_CRCCHECK]	=	{	2,	AIS_CMD_ENCRC},
+	[CMD_JMPCLOSE]	=	{	1,	AIS_CMD_JMPCLOSE},
+	[CMD_JMP]	=	{	1,	AIS_CMD_JMP},
+	[CMD_SEQREAD]	=	{	0,	AIS_CMD_SEQREAD},
+	[CMD_PLL0]	=	{	2,	AIS_CMD_FNLOAD},
+	[CMD_PLL1]	=	{	2,	AIS_CMD_FNLOAD},
+	[CMD_CLK]	=	{	1,	AIS_CMD_FNLOAD},
+	[CMD_DDR2]	=	{	8,	AIS_CMD_FNLOAD},
+	[CMD_EMIFA]	=	{	5,	AIS_CMD_FNLOAD},
+	[CMD_EMIFA_ASYNC] =	{	5,	AIS_CMD_FNLOAD},
+	[CMD_PLL]	=	{	3,	AIS_CMD_FNLOAD},
+	[CMD_PSC]	=	{	1,	AIS_CMD_FNLOAD},
+	[CMD_PINMUX]	=	{	3,	AIS_CMD_FNLOAD},
+	[CMD_BOOTTABLE]	=	{	4,	AIS_CMD_BOOTTBL},
+};
+
+static uint32_t get_cfg_value(char *token, char *name,  int linenr)
+{
+	char *endptr;
+	uint32_t value;
+
+	errno = 0;
+	value = strtoul(token, &endptr, 16);
+	if (errno || (token == endptr)) {
+		fprintf(stderr, "Error: %s[%d] - Invalid hex data(%s)\n",
+			name,  linenr, token);
+		exit(EXIT_FAILURE);
+	}
+	return value;
+}
+
+static int get_ais_table_id(uint32_t *ptr)
+{
+
+	int i;
+	int func_no;
+
+	for (i = 0; i < ARRAY_SIZE(cmd_table); i++) {
+		if (*ptr == cmd_table[i].AIS_cmd) {
+			if (cmd_table[i].AIS_cmd != AIS_CMD_FNLOAD)
+				return i;
+
+			func_no = ((struct ais_cmd_func *)ptr)->func_args
+				& 0xFFFF;
+			if (func_no == ais_func_table[i].index)
+				return i;
+		}
+	}
+
+	return -1;
+}
+
+static void aisimage_print_header(const void *hdr)
+{
+	struct ais_header *ais_hdr = (struct ais_header *)hdr;
+	uint32_t *ptr;
+	struct ais_cmd_load *ais_load;
+	int id;
+
+	if (ais_hdr->magic != AIS_MAGIC_WORD) {
+		fprintf(stderr, "Error: - AIS Magic Number not found\n");
+		return;
+	}
+	fprintf(stdout, "Image Type:   TI Davinci AIS Boot Image\n");
+	fprintf(stdout, "AIS magic :   %08x\n", ais_hdr->magic);
+	ptr = (uint32_t *)&ais_hdr->magic;
+	ptr++;
+
+	while (*ptr != AIS_CMD_JMPCLOSE) {
+		/* Check if we find the image */
+		if (*ptr == AIS_CMD_LOAD) {
+			ais_load = (struct ais_cmd_load *)ptr;
+			fprintf(stdout, "Image at  :   0x%08x size 0x%08x\n",
+				ais_load->addr,
+				ais_load->size);
+			ptr = ais_load->data + ais_load->size / sizeof(*ptr);
+			continue;
+		}
+
+		id = get_ais_table_id(ptr);
+		if (id < 0) {
+			fprintf(stderr, "Error: -  AIS Image corrupted\n");
+			return;
+		}
+		fprintf(stdout, "AIS cmd   :   %s\n",
+			get_table_entry_name(aisimage_cmds, NULL, id));
+		ptr += cmd_table[id].nargs + IS_FNC_EXEC(id) + 1;
+		if (((void *)ptr - hdr) > ais_img_size) {
+			fprintf(stderr,
+				"AIS Image not terminated by JMPCLOSE\n");
+			return;
+		}
+	}
+}
+
+static uint32_t *ais_insert_cmd_header(uint32_t cmd, uint32_t nargs,
+	uint32_t *parms, struct image_type_params *tparams,
+	uint32_t *ptr)
+{
+	int i;
+
+	*ptr++ = cmd_table[cmd].AIS_cmd;
+	if (IS_FNC_EXEC(cmd))
+		*ptr++ = ((nargs & 0xFFFF) << 16) + ais_func_table[cmd].index;
+
+	/* Copy parameters */
+	for (i = 0; i < nargs; i++)
+		*ptr++ = cpu_to_le32(parms[i]);
+
+	return ptr;
+
+}
+
+static uint32_t *ais_alloc_buffer(struct image_tool_params *params)
+{
+	int dfd;
+	struct stat sbuf;
+	char *datafile = params->datafile;
+	uint32_t *ptr;
+
+	dfd = open(datafile, O_RDONLY|O_BINARY);
+	if (dfd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			params->cmdname, datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat %s: %s\n",
+			params->cmdname, datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Place for header is allocated. The size is taken from
+	 * the size of the datafile, that the ais_image_generate()
+	 * will copy into the header. Copying the datafile
+	 * is not left to the main program, because after the datafile
+	 * the header must be terminated with the Jump & Close command.
+	 */
+	ais_img_size = WORD_ALIGN(sbuf.st_size) + MAX_CMD_BUFFER;
+	ptr = (uint32_t *)malloc(WORD_ALIGN(sbuf.st_size) + MAX_CMD_BUFFER);
+	if (!ptr) {
+		fprintf(stderr, "%s: malloc return failure: %s\n",
+			params->cmdname, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	close(dfd);
+
+	return ptr;
+}
+
+static uint32_t *ais_copy_image(struct image_tool_params *params,
+	uint32_t *aisptr)
+
+{
+	int dfd;
+	struct stat sbuf;
+	char *datafile = params->datafile;
+	void *ptr;
+
+	dfd = open(datafile, O_RDONLY|O_BINARY);
+	if (dfd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			params->cmdname, datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat %s: %s\n",
+			params->cmdname, datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, dfd, 0);
+	*aisptr++ = AIS_CMD_LOAD;
+	*aisptr++ = params->ep;
+	*aisptr++ = sbuf.st_size;
+	memcpy((void *)aisptr, ptr, sbuf.st_size);
+	aisptr += WORD_ALIGN(sbuf.st_size) / sizeof(uint32_t);
+
+	(void) munmap((void *)ptr, sbuf.st_size);
+	(void) close(dfd);
+
+	return aisptr;
+
+}
+
+static int aisimage_generate(struct image_tool_params *params,
+	struct image_type_params *tparams)
+{
+	FILE *fd = NULL;
+	char *line = NULL;
+	char *token, *saveptr1, *saveptr2;
+	int lineno = 0;
+	int fld;
+	size_t len;
+	int32_t cmd;
+	uint32_t nargs, cmd_parms[10];
+	uint32_t value, size;
+	char *name = params->imagename;
+	uint32_t *aishdr;
+
+	fd = fopen(name, "r");
+	if (fd == 0) {
+		fprintf(stderr,
+			"Error: %s - Can't open AIS configuration\n", name);
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * the size of the header is variable and is computed
+	 * scanning the configuration file.
+	 */
+	tparams->header_size = 0;
+
+	/*
+	 * Start allocating a buffer suitable for most command
+	 * The buffer is then reallocated if it is too small
+	 */
+	aishdr = ais_alloc_buffer(params);
+	tparams->hdr = aishdr;
+	*aishdr++ = AIS_MAGIC_WORD;
+
+	/* Very simple parsing, line starting with # are comments
+	 * and are dropped
+	 */
+	while ((getline(&line, &len, fd)) > 0) {
+		lineno++;
+
+		token = strtok_r(line, "\r\n", &saveptr1);
+		if (token == NULL)
+			continue;
+
+		/* Check inside the single line */
+		line = token;
+		fld = CFG_COMMAND;
+		cmd = CMD_INVALID;
+		nargs = 0;
+		while (token != NULL) {
+			token = strtok_r(line, " \t", &saveptr2);
+			if (token == NULL)
+				break;
+
+			/* Drop all text starting with '#' as comments */
+			if (token[0] == '#')
+				break;
+
+			switch (fld) {
+			case CFG_COMMAND:
+				cmd = get_table_entry_id(aisimage_cmds,
+					"aisimage commands", token);
+				if (cmd < 0) {
+					fprintf(stderr,
+					"Error: %s[%d] - Invalid command"
+					"(%s)\n", name, lineno, token);
+
+					exit(EXIT_FAILURE);
+				}
+				break;
+			case CFG_VALUE:
+				value = get_cfg_value(token, name, lineno);
+				cmd_parms[nargs++] = value;
+				if (nargs > cmd_table[cmd].nargs) {
+					fprintf(stderr,
+					 "Error: %s[%d] - too much arguments:"
+						"(%s) for command %s\n", name,
+						lineno, token,
+						aisimage_cmds[cmd].sname);
+					exit(EXIT_FAILURE);
+				}
+				break;
+			}
+			line = NULL;
+			fld = CFG_VALUE;
+		}
+		if (cmd != CMD_INVALID) {
+			/* Now insert the command into the header */
+			aishdr = ais_insert_cmd_header(cmd, nargs, cmd_parms,
+				tparams, aishdr);
+		}
+
+	}
+	fclose(fd);
+
+	aishdr = ais_copy_image(params, aishdr);
+
+	/* Add Jmp & Close */
+	*aishdr++ = AIS_CMD_JMPCLOSE;
+	*aishdr++ = params->ep;
+
+	size = (aishdr - (uint32_t *)tparams->hdr) * sizeof(uint32_t);
+	tparams->header_size = size;
+
+	return 0;
+}
+
+static int aisimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_AISIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int aisimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct ais_header *ais_hdr = (struct ais_header *)ptr;
+
+	if (ais_hdr->magic != AIS_MAGIC_WORD)
+		return -FDT_ERR_BADSTRUCTURE;
+
+	/* Store the total size to remember in print_hdr */
+	ais_img_size = image_size;
+
+	return 0;
+}
+
+static void aisimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+}
+
+int aisimage_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return CFG_INVALID;
+	if (!strlen(params->imagename)) {
+		fprintf(stderr, "Error: %s - Configuration file not specified, "
+			"it is needed for aisimage generation\n",
+			params->cmdname);
+		return CFG_INVALID;
+	}
+	/*
+	 * Check parameters:
+	 * XIP is not allowed and verify that incompatible
+	 * parameters are not sent at the same time
+	 * For example, if list is required a data image must not be provided
+	 */
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)) ||
+		(params->xflag) || !(strlen(params->imagename));
+}
+
+/*
+ * aisimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	aisimage,
+	"TI Davinci AIS Boot Image support",
+	0,
+	NULL,
+	aisimage_check_params,
+	aisimage_verify_header,
+	aisimage_print_header,
+	aisimage_set_header,
+	NULL,
+	aisimage_check_image_types,
+	NULL,
+	aisimage_generate
+);
diff --git a/tools/u-boot-tools/aisimage.h b/tools/u-boot-tools/aisimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..d8637a0fa5dedb658185be8d5183130274f0c8dd
--- /dev/null
+++ b/tools/u-boot-tools/aisimage.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2011
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ */
+
+#ifndef _AISIMAGE_H_
+#define _AISIMAGE_H_
+
+/* all values are for little endian systems */
+#define AIS_MAGIC_WORD	0x41504954
+#define AIS_FCN_MAX	8
+
+enum {
+	AIS_CMD_LOAD	= 0x58535901,
+	AIS_CMD_VALCRC	= 0x58535902,
+	AIS_CMD_ENCRC	= 0x58535903,
+	AIS_CMD_DISCRC	= 0x58535904,
+	AIS_CMD_JMP	= 0x58535905,
+	AIS_CMD_JMPCLOSE = 0x58535906,
+	AIS_CMD_BOOTTBL	= 0x58535907,
+	AIS_CMD_FILL	= 0x5853590A,
+	AIS_CMD_FNLOAD	= 0x5853590D,
+	AIS_CMD_SEQREAD	= 0x58535963,
+};
+
+struct ais_cmd_load {
+	uint32_t cmd;
+	uint32_t addr;
+	uint32_t size;
+	uint32_t data[1];
+};
+
+struct ais_cmd_func {
+	uint32_t cmd;
+	uint32_t func_args;
+	uint32_t parms[AIS_FCN_MAX];
+};
+
+struct ais_cmd_jmpclose {
+	uint32_t cmd;
+	uint32_t addr;
+};
+
+#define CMD_DATA_STR	"DATA"
+
+enum ais_file_cmd {
+	CMD_INVALID,
+	CMD_FILL,
+	CMD_CRCON,
+	CMD_CRCOFF,
+	CMD_CRCCHECK,
+	CMD_JMPCLOSE,
+	CMD_JMP,
+	CMD_SEQREAD,
+	CMD_DATA,
+	CMD_PLL0,
+	CMD_PLL1,
+	CMD_CLK,
+	CMD_DDR2,
+	CMD_EMIFA,
+	CMD_EMIFA_ASYNC,
+	CMD_PLL,
+	CMD_PSC,
+	CMD_PINMUX,
+	CMD_BOOTTABLE
+};
+
+enum aisimage_fld_types {
+	CFG_INVALID = -1,
+	CFG_COMMAND,
+	CFG_VALUE,
+};
+
+struct ais_header {
+	uint32_t magic;
+	char data[1];
+};
+
+#endif /* _AISIMAGE_H_ */
diff --git a/tools/u-boot-tools/aisimage.o b/tools/u-boot-tools/aisimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..c739de79f48ef20240fadf03acd268134907b40c
Binary files /dev/null and b/tools/u-boot-tools/aisimage.o differ
diff --git a/tools/u-boot-tools/atmel_pmecc_params.c b/tools/u-boot-tools/atmel_pmecc_params.c
new file mode 100644
index 0000000000000000000000000000000000000000..a4ae03f0fd42cb5edec772cbfc09684172e214d1
--- /dev/null
+++ b/tools/u-boot-tools/atmel_pmecc_params.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014 Andreas Bießmann <andreas@biessmann.org>
+ */
+
+/*
+ * This is a host tool for generating an appropriate string out of board
+ * configuration. The string is required for correct generation of PMECC
+ * header which in turn is required for NAND flash booting of Atmel AT91 style
+ * hardware.
+ *
+ * See doc/README.atmel_pmecc for more information.
+ */
+
+#include <config.h>
+#include <stdlib.h>
+
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+	int m = 12 + sector_size / 512;
+	return (m * cap + 7) / 8;
+}
+
+int main(int argc, char *argv[])
+{
+	unsigned int use_pmecc = 0;
+	unsigned int sector_per_page;
+	unsigned int sector_size = CONFIG_PMECC_SECTOR_SIZE;
+	unsigned int oob_size = CONFIG_SYS_NAND_OOBSIZE;
+	unsigned int ecc_bits = CONFIG_PMECC_CAP;
+	unsigned int ecc_offset;
+
+#ifdef CONFIG_ATMEL_NAND_HW_PMECC
+	use_pmecc = 1;
+#endif
+
+	sector_per_page = CONFIG_SYS_NAND_PAGE_SIZE / CONFIG_PMECC_SECTOR_SIZE;
+	ecc_offset = oob_size -
+		pmecc_get_ecc_bytes(ecc_bits, sector_size) * sector_per_page;
+
+	printf("usePmecc=%d,", use_pmecc);
+	printf("sectorPerPage=%d,", sector_per_page);
+	printf("sectorSize=%d,", sector_size);
+	printf("spareSize=%d,", oob_size);
+	printf("eccBits=%d,", ecc_bits);
+	printf("eccOffset=%d", ecc_offset);
+	printf("\n");
+
+	exit(EXIT_SUCCESS);
+}
diff --git a/tools/u-boot-tools/atmelimage.c b/tools/u-boot-tools/atmelimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..7b3b243d58b7c13923e9e69065d4d74f944873eb
--- /dev/null
+++ b/tools/u-boot-tools/atmelimage.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * Andreas Bießmann <andreas@biessmann.org>
+ */
+
+#include "imagetool.h"
+#include "mkimage.h"
+
+#include <image.h>
+
+#define pr_err(fmt, args...) fprintf(stderr, "atmelimage Error: " fmt, ##args)
+
+static int atmel_check_image_type(uint8_t type)
+{
+	if (type == IH_TYPE_ATMELIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static uint32_t nand_pmecc_header[52];
+
+/*
+ * A helper struct for parsing the mkimage -n parameter
+ *
+ * Keep in same order as the configs array!
+ */
+static struct pmecc_config {
+	int use_pmecc;
+	int sector_per_page;
+	int spare_size;
+	int ecc_bits;
+	int sector_size;
+	int ecc_offset;
+} pmecc;
+
+/*
+ * Strings used for configure the PMECC header via -n mkimage switch
+ *
+ * We estimate a coma separated list of key=value pairs. The mkimage -n
+ * parameter argument should not contain any whitespace.
+ *
+ * Keep in same order as struct pmecc_config!
+ */
+static const char * const configs[] = {
+	"usePmecc",
+	"sectorPerPage",
+	"spareSize",
+	"eccBits",
+	"sectorSize",
+	"eccOffset"
+};
+
+static int atmel_find_pmecc_parameter_in_token(const char *token)
+{
+	size_t pos;
+	char *param;
+
+	debug("token: '%s'\n", token);
+
+	for (pos = 0; pos < ARRAY_SIZE(configs); pos++) {
+		if (strncmp(token, configs[pos], strlen(configs[pos])) == 0) {
+			param = strstr(token, "=");
+			if (!param)
+				goto err;
+
+			param++;
+			debug("\t%s parameter: '%s'\n", configs[pos], param);
+
+			switch (pos) {
+			case 0:
+				pmecc.use_pmecc = strtol(param, NULL, 10);
+				return EXIT_SUCCESS;
+			case 1:
+				pmecc.sector_per_page = strtol(param, NULL, 10);
+				return EXIT_SUCCESS;
+			case 2:
+				pmecc.spare_size = strtol(param, NULL, 10);
+				return EXIT_SUCCESS;
+			case 3:
+				pmecc.ecc_bits = strtol(param, NULL, 10);
+				return EXIT_SUCCESS;
+			case 4:
+				pmecc.sector_size = strtol(param, NULL, 10);
+				return EXIT_SUCCESS;
+			case 5:
+				pmecc.ecc_offset = strtol(param, NULL, 10);
+				return EXIT_SUCCESS;
+			}
+		}
+	}
+
+err:
+	pr_err("Could not find parameter in token '%s'\n", token);
+	return EXIT_FAILURE;
+}
+
+static int atmel_parse_pmecc_params(char *txt)
+{
+	char *token;
+
+	token = strtok(txt, ",");
+	while (token != NULL) {
+		if (atmel_find_pmecc_parameter_in_token(token))
+			return EXIT_FAILURE;
+
+		token = strtok(NULL, ",");
+	}
+
+	return EXIT_SUCCESS;
+}
+
+static int atmel_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	uint32_t *ints = (uint32_t *)ptr;
+	size_t pos;
+	size_t size = image_size;
+
+	/* check if we have an PMECC header attached */
+	for (pos = 0; pos < ARRAY_SIZE(nand_pmecc_header); pos++)
+		if (ints[pos] >> 28 != 0xC)
+			break;
+
+	if (pos == ARRAY_SIZE(nand_pmecc_header)) {
+		ints += ARRAY_SIZE(nand_pmecc_header);
+		size -= sizeof(nand_pmecc_header);
+	}
+
+	/* check the seven interrupt vectors of binary */
+	for (pos = 0; pos < 7; pos++) {
+		debug("atmelimage: interrupt vector #%zu is 0x%08X\n", pos+1,
+		      ints[pos]);
+		/*
+		 * all vectors except the 6'th one must contain valid
+		 * LDR or B Opcode
+		 */
+		if (pos == 5)
+			/* 6'th vector has image size set, check later */
+			continue;
+		if ((ints[pos] & 0xff000000) == 0xea000000)
+			/* valid B Opcode */
+			continue;
+		if ((ints[pos] & 0xfffff000) == 0xe59ff000)
+			/* valid LDR (I=0, P=1, U=1, B=0, W=0, L=1) */
+			continue;
+		/* ouch, one of the checks has missed ... */
+		return 1;
+	}
+
+	return ints[5] != cpu_to_le32(size);
+}
+
+static void atmel_print_pmecc_header(const uint32_t word)
+{
+	int val;
+
+	printf("\t\tPMECC header\n");
+
+	printf("\t\t====================\n");
+
+	val = (word >> 18) & 0x1ff;
+	printf("\t\teccOffset: %9i\n", val);
+
+	val = (((word >> 16) & 0x3) == 0) ? 512 : 1024;
+	printf("\t\tsectorSize: %8i\n", val);
+
+	if (((word >> 13) & 0x7) <= 2)
+		val = (2 << ((word >> 13) & 0x7));
+	else
+		val = (12 << (((word >> 13) & 0x7) - 3));
+	printf("\t\teccBitReq: %9i\n", val);
+
+	val = (word >> 4) & 0x1ff;
+	printf("\t\tspareSize: %9i\n", val);
+
+	val = (1 << ((word >> 1) & 0x3));
+	printf("\t\tnbSectorPerPage: %3i\n", val);
+
+	printf("\t\tusePmecc: %10i\n", word & 0x1);
+	printf("\t\t====================\n");
+}
+
+static void atmel_print_header(const void *ptr)
+{
+	uint32_t *ints = (uint32_t *)ptr;
+	size_t pos;
+
+	/* check if we have an PMECC header attached */
+	for (pos = 0; pos < ARRAY_SIZE(nand_pmecc_header); pos++)
+		if (ints[pos] >> 28 != 0xC)
+			break;
+
+	if (pos == ARRAY_SIZE(nand_pmecc_header)) {
+		printf("Image Type:\tATMEL ROM-Boot Image with PMECC Header\n");
+		atmel_print_pmecc_header(ints[0]);
+		pos += 5;
+	} else {
+		printf("Image Type:\tATMEL ROM-Boot Image without PMECC Header\n");
+		pos = 5;
+	}
+	printf("\t\t6'th vector has %u set\n", le32_to_cpu(ints[pos]));
+}
+
+static void atmel_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	/* just save the image size into 6'th interrupt vector */
+	uint32_t *ints = (uint32_t *)ptr;
+	size_t cnt;
+	size_t pos = 5;
+	size_t size = sbuf->st_size;
+
+	for (cnt = 0; cnt < ARRAY_SIZE(nand_pmecc_header); cnt++)
+		if (ints[cnt] >> 28 != 0xC)
+			break;
+
+	if (cnt == ARRAY_SIZE(nand_pmecc_header)) {
+		pos += ARRAY_SIZE(nand_pmecc_header);
+		size -= sizeof(nand_pmecc_header);
+	}
+
+	ints[pos] = cpu_to_le32(size);
+}
+
+static int atmel_check_params(struct image_tool_params *params)
+{
+	if (strlen(params->imagename) > 0)
+		if (atmel_parse_pmecc_params(params->imagename))
+			return EXIT_FAILURE;
+
+	return !(!params->eflag &&
+		!params->fflag &&
+		!params->xflag &&
+		((params->dflag && !params->lflag) ||
+		 (params->lflag && !params->dflag)));
+}
+
+static int atmel_vrec_header(struct image_tool_params *params,
+				struct image_type_params *tparams)
+{
+	uint32_t tmp;
+	size_t pos;
+
+	if (strlen(params->imagename) == 0)
+		return EXIT_SUCCESS;
+
+	tmp = 0xC << 28;
+
+	tmp |= (pmecc.ecc_offset & 0x1ff) << 18;
+
+	switch (pmecc.sector_size) {
+	case 512:
+		tmp |= 0 << 16;
+		break;
+	case 1024:
+		tmp |= 1 << 16;
+		break;
+
+	default:
+		pr_err("Wrong sectorSize (%i) for PMECC header\n",
+		       pmecc.sector_size);
+		return EXIT_FAILURE;
+	}
+
+	switch (pmecc.ecc_bits) {
+	case 2:
+		tmp |= 0 << 13;
+		break;
+	case 4:
+		tmp |= 1 << 13;
+		break;
+	case 8:
+		tmp |= 2 << 13;
+		break;
+	case 12:
+		tmp |= 3 << 13;
+		break;
+	case 24:
+		tmp |= 4 << 13;
+		break;
+
+	default:
+		pr_err("Wrong eccBits (%i) for PMECC header\n",
+		       pmecc.ecc_bits);
+		 return EXIT_FAILURE;
+	}
+
+	tmp |= (pmecc.spare_size & 0x1ff) << 4;
+
+	switch (pmecc.sector_per_page) {
+	case 1:
+		tmp |= 0 << 1;
+		break;
+	case 2:
+		tmp |= 1 << 1;
+		break;
+	case 4:
+		tmp |= 2 << 1;
+		break;
+	case 8:
+		tmp |= 3 << 1;
+		break;
+
+	default:
+		pr_err("Wrong sectorPerPage (%i) for PMECC header\n",
+		       pmecc.sector_per_page);
+		return EXIT_FAILURE;
+	}
+
+	if (pmecc.use_pmecc)
+		tmp |= 1;
+
+	for (pos = 0; pos < ARRAY_SIZE(nand_pmecc_header); pos++)
+		nand_pmecc_header[pos] = tmp;
+
+	debug("PMECC header filled 52 times with 0x%08X\n", tmp);
+
+	tparams->header_size = sizeof(nand_pmecc_header);
+	tparams->hdr = nand_pmecc_header;
+
+	return EXIT_SUCCESS;
+}
+
+U_BOOT_IMAGE_TYPE(
+	atmelimage,
+	"ATMEL ROM-Boot Image support",
+	0,
+	NULL,
+	atmel_check_params,
+	atmel_verify_header,
+	atmel_print_header,
+	atmel_set_header,
+	NULL,
+	atmel_check_image_type,
+	NULL,
+	atmel_vrec_header
+);
diff --git a/tools/u-boot-tools/atmelimage.o b/tools/u-boot-tools/atmelimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..dd1c6953ef37ae3391858a7be0bcbca8c934bc8e
Binary files /dev/null and b/tools/u-boot-tools/atmelimage.o differ
diff --git a/tools/u-boot-tools/binman/.gitignore b/tools/u-boot-tools/binman/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d
--- /dev/null
+++ b/tools/u-boot-tools/binman/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/u-boot-tools/binman/README b/tools/u-boot-tools/binman/README
new file mode 100644
index 0000000000000000000000000000000000000000..04ed2b799c805ef079259fa8fb56fb4549f9f78c
--- /dev/null
+++ b/tools/u-boot-tools/binman/README
@@ -0,0 +1,780 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+
+Introduction
+------------
+
+Firmware often consists of several components which must be packaged together.
+For example, we may have SPL, U-Boot, a device tree and an environment area
+grouped together and placed in MMC flash. When the system starts, it must be
+able to find these pieces.
+
+So far U-Boot has not provided a way to handle creating such images in a
+general way. Each SoC does what it needs to build an image, often packing or
+concatenating images in the U-Boot build system.
+
+Binman aims to provide a mechanism for building images, from simple
+SPL + U-Boot combinations, to more complex arrangements with many parts.
+
+
+What it does
+------------
+
+Binman reads your board's device tree and finds a node which describes the
+required image layout. It uses this to work out what to place where. The
+output file normally contains the device tree, so it is in principle possible
+to read an image and extract its constituent parts.
+
+
+Features
+--------
+
+So far binman is pretty simple. It supports binary blobs, such as 'u-boot',
+'spl' and 'fdt'. It supports empty entries (such as setting to 0xff). It can
+place entries at a fixed location in the image, or fit them together with
+suitable padding and alignment. It provides a way to process binaries before
+they are included, by adding a Python plug-in. The device tree is available
+to U-Boot at run-time so that the images can be interpreted.
+
+Binman does not yet update the device tree with the final location of
+everything when it is done. A simple C structure could be generated for
+constrained environments like SPL (using dtoc) but this is also not
+implemented.
+
+Binman can also support incorporating filesystems in the image if required.
+For example x86 platforms may use CBFS in some cases.
+
+Binman is intended for use with U-Boot but is designed to be general enough
+to be useful in other image-packaging situations.
+
+
+Motivation
+----------
+
+Packaging of firmware is quite a different task from building the various
+parts. In many cases the various binaries which go into the image come from
+separate build systems. For example, ARM Trusted Firmware is used on ARMv8
+devices but is not built in the U-Boot tree. If a Linux kernel is included
+in the firmware image, it is built elsewhere.
+
+It is of course possible to add more and more build rules to the U-Boot
+build system to cover these cases. It can shell out to other Makefiles and
+build scripts. But it seems better to create a clear divide between building
+software and packaging it.
+
+At present this is handled by manual instructions, different for each board,
+on how to create images that will boot. By turning these instructions into a
+standard format, we can support making valid images for any board without
+manual effort, lots of READMEs, etc.
+
+Benefits:
+- Each binary can have its own build system and tool chain without creating
+any dependencies between them
+- Avoids the need for a single-shot build: individual parts can be updated
+and brought in as needed
+- Provides for a standard image description available in the build and at
+run-time
+- SoC-specific image-signing tools can be accomodated
+- Avoids cluttering the U-Boot build system with image-building code
+- The image description is automatically available at run-time in U-Boot,
+SPL. It can be made available to other software also
+- The image description is easily readable (it's a text file in device-tree
+format) and permits flexible packing of binaries
+
+
+Terminology
+-----------
+
+Binman uses the following terms:
+
+- image - an output file containing a firmware image
+- binary - an input binary that goes into the image
+
+
+Relationship to FIT
+-------------------
+
+FIT is U-Boot's official image format. It supports multiple binaries with
+load / execution addresses, compression. It also supports verification
+through hashing and RSA signatures.
+
+FIT was originally designed to support booting a Linux kernel (with an
+optional ramdisk) and device tree chosen from various options in the FIT.
+Now that U-Boot supports configuration via device tree, it is possible to
+load U-Boot from a FIT, with the device tree chosen by SPL.
+
+Binman considers FIT to be one of the binaries it can place in the image.
+
+Where possible it is best to put as much as possible in the FIT, with binman
+used to deal with cases not covered by FIT. Examples include initial
+execution (since FIT itself does not have an executable header) and dealing
+with device boundaries, such as the read-only/read-write separation in SPI
+flash.
+
+For U-Boot, binman should not be used to create ad-hoc images in place of
+FIT.
+
+
+Relationship to mkimage
+-----------------------
+
+The mkimage tool provides a means to create a FIT. Traditionally it has
+needed an image description file: a device tree, like binman, but in a
+different format. More recently it has started to support a '-f auto' mode
+which can generate that automatically.
+
+More relevant to binman, mkimage also permits creation of many SoC-specific
+image types. These can be listed by running 'mkimage -T list'. Examples
+include 'rksd', the Rockchip SD/MMC boot format. The mkimage tool is often
+called from the U-Boot build system for this reason.
+
+Binman considers the output files created by mkimage to be binary blobs
+which it can place in an image. Binman does not replace the mkimage tool or
+this purpose. It would be possible in some situations to create a new entry
+type for the images in mkimage, but this would not add functionality. It
+seems better to use the mkimage tool to generate binaries and avoid blurring
+the boundaries between building input files (mkimage) and packaging then
+into a final image (binman).
+
+
+Example use of binman in U-Boot
+-------------------------------
+
+Binman aims to replace some of the ad-hoc image creation in the U-Boot
+build system.
+
+Consider sunxi. It has the following steps:
+
+1. It uses a custom mksunxiboot tool to build an SPL image called
+sunxi-spl.bin. This should probably move into mkimage.
+
+2. It uses mkimage to package U-Boot into a legacy image file (so that it can
+hold the load and execution address) called u-boot.img.
+
+3. It builds a final output image called u-boot-sunxi-with-spl.bin which
+consists of sunxi-spl.bin, some padding and u-boot.img.
+
+Binman is intended to replace the last step. The U-Boot build system builds
+u-boot.bin and sunxi-spl.bin. Binman can then take over creation of
+sunxi-spl.bin (by calling mksunxiboot, or hopefully one day mkimage). In any
+case, it would then create the image from the component parts.
+
+This simplifies the U-Boot Makefile somewhat, since various pieces of logic
+can be replaced by a call to binman.
+
+
+Example use of binman for x86
+-----------------------------
+
+In most cases x86 images have a lot of binary blobs, 'black-box' code
+provided by Intel which must be run for the platform to work. Typically
+these blobs are not relocatable and must be placed at fixed areas in the
+firmware image.
+
+Currently this is handled by ifdtool, which places microcode, FSP, MRC, VGA
+BIOS, reference code and Intel ME binaries into a u-boot.rom file.
+
+Binman is intended to replace all of this, with ifdtool left to handle only
+the configuration of the Intel-format descriptor.
+
+
+Running binman
+--------------
+
+Type:
+
+	binman -b <board_name>
+
+to build an image for a board. The board name is the same name used when
+configuring U-Boot (e.g. for sandbox_defconfig the board name is 'sandbox').
+Binman assumes that the input files for the build are in ../b/<board_name>.
+
+Or you can specify this explicitly:
+
+	binman -I <build_path>
+
+where <build_path> is the build directory containing the output of the U-Boot
+build.
+
+(Future work will make this more configurable)
+
+In either case, binman picks up the device tree file (u-boot.dtb) and looks
+for its instructions in the 'binman' node.
+
+Binman has a few other options which you can see by running 'binman -h'.
+
+
+Enabling binman for a board
+---------------------------
+
+At present binman is invoked from a rule in the main Makefile. Typically you
+will have a rule like:
+
+ifneq ($(CONFIG_ARCH_<something>),)
+u-boot-<your_suffix>.bin: <input_file_1> <input_file_2> checkbinman FORCE
+	$(call if_changed,binman)
+endif
+
+This assumes that u-boot-<your_suffix>.bin is a target, and is the final file
+that you need to produce. You can make it a target by adding it to ALL-y
+either in the main Makefile or in a config.mk file in your arch subdirectory.
+
+Once binman is executed it will pick up its instructions from a device-tree
+file, typically <soc>-u-boot.dtsi, where <soc> is your CONFIG_SYS_SOC value.
+You can use other, more specific CONFIG options - see 'Automatic .dtsi
+inclusion' below.
+
+
+Image description format
+------------------------
+
+The binman node is called 'binman'. An example image description is shown
+below:
+
+	binman {
+		filename = "u-boot-sunxi-with-spl.bin";
+		pad-byte = <0xff>;
+		blob {
+			filename = "spl/sunxi-spl.bin";
+		};
+		u-boot {
+			offset = <CONFIG_SPL_PAD_TO>;
+		};
+	};
+
+
+This requests binman to create an image file called u-boot-sunxi-with-spl.bin
+consisting of a specially formatted SPL (spl/sunxi-spl.bin, built by the
+normal U-Boot Makefile), some 0xff padding, and a U-Boot legacy image. The
+padding comes from the fact that the second binary is placed at
+CONFIG_SPL_PAD_TO. If that line were omitted then the U-Boot binary would
+immediately follow the SPL binary.
+
+The binman node describes an image. The sub-nodes describe entries in the
+image. Each entry represents a region within the overall image. The name of
+the entry (blob, u-boot) tells binman what to put there. For 'blob' we must
+provide a filename. For 'u-boot', binman knows that this means 'u-boot.bin'.
+
+Entries are normally placed into the image sequentially, one after the other.
+The image size is the total size of all entries. As you can see, you can
+specify the start offset of an entry using the 'offset' property.
+
+Note that due to a device tree requirement, all entries must have a unique
+name. If you want to put the same binary in the image multiple times, you can
+use any unique name, with the 'type' property providing the type.
+
+The attributes supported for entries are described below.
+
+offset:
+	This sets the offset of an entry within the image or section containing
+	it. The first byte of the image is normally at offset 0. If 'offset' is
+	not provided, binman sets it to the end of the previous region, or the
+	start of the image's entry area (normally 0) if there is no previous
+	region.
+
+align:
+	This sets the alignment of the entry. The entry offset is adjusted
+	so that the entry starts on an aligned boundary within the image. For
+	example 'align = <16>' means that the entry will start on a 16-byte
+	boundary. Alignment shold be a power of 2. If 'align' is not
+	provided, no alignment is performed.
+
+size:
+	This sets the size of the entry. The contents will be padded out to
+	this size. If this is not provided, it will be set to the size of the
+	contents.
+
+pad-before:
+	Padding before the contents of the entry. Normally this is 0, meaning
+	that the contents start at the beginning of the entry. This can be
+	offset the entry contents a little. Defaults to 0.
+
+pad-after:
+	Padding after the contents of the entry. Normally this is 0, meaning
+	that the entry ends at the last byte of content (unless adjusted by
+	other properties). This allows room to be created in the image for
+	this entry to expand later. Defaults to 0.
+
+align-size:
+	This sets the alignment of the entry size. For example, to ensure
+	that the size of an entry is a multiple of 64 bytes, set this to 64.
+	If 'align-size' is not provided, no alignment is performed.
+
+align-end:
+	This sets the alignment of the end of an entry. Some entries require
+	that they end on an alignment boundary, regardless of where they
+	start. This does not move the start of the entry, so the contents of
+	the entry will still start at the beginning. But there may be padding
+	at the end. If 'align-end' is not provided, no alignment is performed.
+
+filename:
+	For 'blob' types this provides the filename containing the binary to
+	put into the entry. If binman knows about the entry type (like
+	u-boot-bin), then there is no need to specify this.
+
+type:
+	Sets the type of an entry. This defaults to the entry name, but it is
+	possible to use any name, and then add (for example) 'type = "u-boot"'
+	to specify the type.
+
+offset-unset:
+	Indicates that the offset of this entry should not be set by placing
+	it immediately after the entry before. Instead, is set by another
+	entry which knows where this entry should go. When this boolean
+	property is present, binman will give an error if another entry does
+	not set the offset (with the GetOffsets() method).
+
+image-pos:
+	This cannot be set on entry (or at least it is ignored if it is), but
+	with the -u option, binman will set it to the absolute image position
+	for each entry. This makes it easy to find out exactly where the entry
+	ended up in the image, regardless of parent sections, etc.
+
+expand-size:
+	Expand the size of this entry to fit available space. This space is only
+	limited by the size of the image/section and the position of the next
+	entry.
+
+The attributes supported for images and sections are described below. Several
+are similar to those for entries.
+
+size:
+	Sets the image size in bytes, for example 'size = <0x100000>' for a
+	1MB image.
+
+align-size:
+	This sets the alignment of the image size. For example, to ensure
+	that the image ends on a 512-byte boundary, use 'align-size = <512>'.
+	If 'align-size' is not provided, no alignment is performed.
+
+pad-before:
+	This sets the padding before the image entries. The first entry will
+	be positioned after the padding. This defaults to 0.
+
+pad-after:
+	This sets the padding after the image entries. The padding will be
+	placed after the last entry. This defaults to 0.
+
+pad-byte:
+	This specifies the pad byte to use when padding in the image. It
+	defaults to 0. To use 0xff, you would add 'pad-byte = <0xff>'.
+
+filename:
+	This specifies the image filename. It defaults to 'image.bin'.
+
+sort-by-offset:
+	This causes binman to reorder the entries as needed to make sure they
+	are in increasing positional order. This can be used when your entry
+	order may not match the positional order. A common situation is where
+	the 'offset' properties are set by CONFIG options, so their ordering is
+	not known a priori.
+
+	This is a boolean property so needs no value. To enable it, add a
+	line 'sort-by-offset;' to your description.
+
+multiple-images:
+	Normally only a single image is generated. To create more than one
+	image, put this property in the binman node. For example, this will
+	create image1.bin containing u-boot.bin, and image2.bin containing
+	both spl/u-boot-spl.bin and u-boot.bin:
+
+	binman {
+		multiple-images;
+		image1 {
+			u-boot {
+			};
+		};
+
+		image2 {
+			spl {
+			};
+			u-boot {
+			};
+		};
+	};
+
+end-at-4gb:
+	For x86 machines the ROM offsets start just before 4GB and extend
+	up so that the image finished at the 4GB boundary. This boolean
+	option can be enabled to support this. The image size must be
+	provided so that binman knows when the image should start. For an
+	8MB ROM, the offset of the first entry would be 0xfff80000 with
+	this option, instead of 0 without this option.
+
+skip-at-start:
+	This property specifies the entry offset of the first entry.
+
+	For PowerPC mpc85xx based CPU, CONFIG_SYS_TEXT_BASE is the entry
+	offset of the first entry. It can be 0xeff40000 or 0xfff40000 for
+	nor flash boot, 0x201000 for sd boot etc.
+
+	'end-at-4gb' property is not applicable where CONFIG_SYS_TEXT_BASE +
+	Image size != 4gb.
+
+Examples of the above options can be found in the tests. See the
+tools/binman/test directory.
+
+It is possible to have the same binary appear multiple times in the image,
+either by using a unit number suffix (u-boot@0, u-boot@1) or by using a
+different name for each and specifying the type with the 'type' attribute.
+
+
+Sections and hierachical images
+-------------------------------
+
+Sometimes it is convenient to split an image into several pieces, each of which
+contains its own set of binaries. An example is a flash device where part of
+the image is read-only and part is read-write. We can set up sections for each
+of these, and place binaries in them independently. The image is still produced
+as a single output file.
+
+This feature provides a way of creating hierarchical images. For example here
+is an example image with two copies of U-Boot. One is read-only (ro), intended
+to be written only in the factory. Another is read-write (rw), so that it can be
+upgraded in the field. The sizes are fixed so that the ro/rw boundary is known
+and can be programmed:
+
+	binman {
+		section@0 {
+			read-only;
+			name-prefix = "ro-";
+			size = <0x100000>;
+			u-boot {
+			};
+		};
+		section@1 {
+			name-prefix = "rw-";
+			size = <0x100000>;
+			u-boot {
+			};
+		};
+	};
+
+This image could be placed into a SPI flash chip, with the protection boundary
+set at 1MB.
+
+A few special properties are provided for sections:
+
+read-only:
+	Indicates that this section is read-only. This has no impact on binman's
+	operation, but his property can be read at run time.
+
+name-prefix:
+	This string is prepended to all the names of the binaries in the
+	section. In the example above, the 'u-boot' binaries which actually be
+	renamed to 'ro-u-boot' and 'rw-u-boot'. This can be useful to
+	distinguish binaries with otherwise identical names.
+
+
+Entry Documentation
+-------------------
+
+For details on the various entry types supported by binman and how to use them,
+see README.entries. This is generated from the source code using:
+
+	binman -E >tools/binman/README.entries
+
+
+Hashing Entries
+---------------
+
+It is possible to ask binman to hash the contents of an entry and write that
+value back to the device-tree node. For example:
+
+	binman {
+		u-boot {
+			hash {
+				algo = "sha256";
+			};
+		};
+	};
+
+Here, a new 'value' property will be written to the 'hash' node containing
+the hash of the 'u-boot' entry. Only SHA256 is supported at present. Whole
+sections can be hased if desired, by adding the 'hash' node to the section.
+
+The has value can be chcked at runtime by hashing the data actually read and
+comparing this has to the value in the device tree.
+
+
+Order of image creation
+-----------------------
+
+Image creation proceeds in the following order, for each entry in the image.
+
+1. AddMissingProperties() - binman can add calculated values to the device
+tree as part of its processing, for example the offset and size of each
+entry. This method adds any properties associated with this, expanding the
+device tree as needed. These properties can have placeholder values which are
+set later by SetCalculatedProperties(). By that stage the size of sections
+cannot be changed (since it would cause the images to need to be repacked),
+but the correct values can be inserted.
+
+2. ProcessFdt() - process the device tree information as required by the
+particular entry. This may involve adding or deleting properties. If the
+processing is complete, this method should return True. If the processing
+cannot complete because it needs the ProcessFdt() method of another entry to
+run first, this method should return False, in which case it will be called
+again later.
+
+3. GetEntryContents() - the contents of each entry are obtained, normally by
+reading from a file. This calls the Entry.ObtainContents() to read the
+contents. The default version of Entry.ObtainContents() calls
+Entry.GetDefaultFilename() and then reads that file. So a common mechanism
+to select a file to read is to override that function in the subclass. The
+functions must return True when they have read the contents. Binman will
+retry calling the functions a few times if False is returned, allowing
+dependencies between the contents of different entries.
+
+4. GetEntryOffsets() - calls Entry.GetOffsets() for each entry. This can
+return a dict containing entries that need updating. The key should be the
+entry name and the value is a tuple (offset, size). This allows an entry to
+provide the offset and size for other entries. The default implementation
+of GetEntryOffsets() returns {}.
+
+5. PackEntries() - calls Entry.Pack() which figures out the offset and
+size of an entry. The 'current' image offset is passed in, and the function
+returns the offset immediately after the entry being packed. The default
+implementation of Pack() is usually sufficient.
+
+6. CheckSize() - checks that the contents of all the entries fits within
+the image size. If the image does not have a defined size, the size is set
+large enough to hold all the entries.
+
+7. CheckEntries() - checks that the entries do not overlap, nor extend
+outside the image.
+
+8. SetCalculatedProperties() - update any calculated properties in the device
+tree. This sets the correct 'offset' and 'size' vaues, for example.
+
+9. ProcessEntryContents() - this calls Entry.ProcessContents() on each entry.
+The default implementatoin does nothing. This can be overriden to adjust the
+contents of an entry in some way. For example, it would be possible to create
+an entry containing a hash of the contents of some other entries. At this
+stage the offset and size of entries should not be adjusted.
+
+10. WriteSymbols() - write the value of symbols into the U-Boot SPL binary.
+See 'Access to binman entry offsets at run time' below for a description of
+what happens in this stage.
+
+11. BuildImage() - builds the image and writes it to a file. This is the final
+step.
+
+
+Automatic .dtsi inclusion
+-------------------------
+
+It is sometimes inconvenient to add a 'binman' node to the .dts file for each
+board. This can be done by using #include to bring in a common file. Another
+approach supported by the U-Boot build system is to automatically include
+a common header. You can then put the binman node (and anything else that is
+specific to U-Boot, such as u-boot,dm-pre-reloc properies) in that header
+file.
+
+Binman will search for the following files in arch/<arch>/dts:
+
+   <dts>-u-boot.dtsi where <dts> is the base name of the .dts file
+   <CONFIG_SYS_SOC>-u-boot.dtsi
+   <CONFIG_SYS_CPU>-u-boot.dtsi
+   <CONFIG_SYS_VENDOR>-u-boot.dtsi
+   u-boot.dtsi
+
+U-Boot will only use the first one that it finds. If you need to include a
+more general file you can do that from the more specific file using #include.
+If you are having trouble figuring out what is going on, you can uncomment
+the 'warning' line in scripts/Makefile.lib to see what it has found:
+
+   # Uncomment for debugging
+   # This shows all the files that were considered and the one that we chose.
+   # u_boot_dtsi_options_debug = $(u_boot_dtsi_options_raw)
+
+
+Access to binman entry offsets at run time (symbols)
+----------------------------------------------------
+
+Binman assembles images and determines where each entry is placed in the image.
+This information may be useful to U-Boot at run time. For example, in SPL it
+is useful to be able to find the location of U-Boot so that it can be executed
+when SPL is finished.
+
+Binman allows you to declare symbols in the SPL image which are filled in
+with their correct values during the build. For example:
+
+    binman_sym_declare(ulong, u_boot_any, offset);
+
+declares a ulong value which will be assigned to the offset of any U-Boot
+image (u-boot.bin, u-boot.img, u-boot-nodtb.bin) that is present in the image.
+You can access this value with something like:
+
+    ulong u_boot_offset = binman_sym(ulong, u_boot_any, offset);
+
+Thus u_boot_offset will be set to the offset of U-Boot in memory, assuming that
+the whole image has been loaded, or is available in flash. You can then jump to
+that address to start U-Boot.
+
+At present this feature is only supported in SPL. In principle it is possible
+to fill in such symbols in U-Boot proper, as well.
+
+
+Access to binman entry offsets at run time (fdt)
+------------------------------------------------
+
+Binman can update the U-Boot FDT to include the final position and size of
+each entry in the images it processes. The option to enable this is -u and it
+causes binman to make sure that the 'offset', 'image-pos' and 'size' properties
+are set correctly for every entry. Since it is not necessary to specify these in
+the image definition, binman calculates the final values and writes these to
+the device tree. These can be used by U-Boot at run-time to find the location
+of each entry.
+
+
+Compression
+-----------
+
+Binman support compression for 'blob' entries (those of type 'blob' and
+derivatives). To enable this for an entry, add a 'compression' property:
+
+    blob {
+        filename = "datafile";
+        compression = "lz4";
+    };
+
+The entry will then contain the compressed data, using the 'lz4' compression
+algorithm. Currently this is the only one that is supported.
+
+
+
+Map files
+---------
+
+The -m option causes binman to output a .map file for each image that it
+generates. This shows the offset and size of each entry. For example:
+
+      Offset      Size  Name
+    00000000  00000028  main-section
+     00000000  00000010  section@0
+      00000000  00000004  u-boot
+     00000010  00000010  section@1
+      00000000  00000004  u-boot
+
+This shows a hierarchical image with two sections, each with a single entry. The
+offsets of the sections are absolute hex byte offsets within the image. The
+offsets of the entries are relative to their respective sections. The size of
+each entry is also shown, in bytes (hex). The indentation shows the entries
+nested inside their sections.
+
+
+Passing command-line arguments to entries
+-----------------------------------------
+
+Sometimes it is useful to pass binman the value of an entry property from the
+command line. For example some entries need access to files and it is not
+always convenient to put these filenames in the image definition (device tree).
+
+The-a option supports this:
+
+    -a<prop>=<value>
+
+where
+
+    <prop> is the property to set
+    <value> is the value to set it to
+
+Not all properties can be provided this way. Only some entries support it,
+typically for filenames.
+
+
+Code coverage
+-------------
+
+Binman is a critical tool and is designed to be very testable. Entry
+implementations target 100% test coverage. Run 'binman -T' to check this.
+
+To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
+
+   $ sudo apt-get install python-coverage python-pytest
+
+
+Advanced Features / Technical docs
+----------------------------------
+
+The behaviour of entries is defined by the Entry class. All other entries are
+a subclass of this. An important subclass is Entry_blob which takes binary
+data from a file and places it in the entry. In fact most entry types are
+subclasses of Entry_blob.
+
+Each entry type is a separate file in the tools/binman/etype directory. Each
+file contains a class called Entry_<type> where <type> is the entry type.
+New entry types can be supported by adding new files in that directory.
+These will automatically be detected by binman when needed.
+
+Entry properties are documented in entry.py. The entry subclasses are free
+to change the values of properties to support special behaviour. For example,
+when Entry_blob loads a file, it sets content_size to the size of the file.
+Entry classes can adjust other entries. For example, an entry that knows
+where other entries should be positioned can set up those entries' offsets
+so they don't need to be set in the binman decription. It can also adjust
+entry contents.
+
+Most of the time such essoteric behaviour is not needed, but it can be
+essential for complex images.
+
+If you need to specify a particular device-tree compiler to use, you can define
+the DTC environment variable. This can be useful when the system dtc is too
+old.
+
+To enable a full backtrace and other debugging features in binman, pass
+BINMAN_DEBUG=1 to your build:
+
+   make sandbox_defconfig
+   make BINMAN_DEBUG=1
+
+
+History / Credits
+-----------------
+
+Binman takes a lot of inspiration from a Chrome OS tool called
+'cros_bundle_firmware', which I wrote some years ago. That tool was based on
+a reasonably simple and sound design but has expanded greatly over the
+years. In particular its handling of x86 images is convoluted.
+
+Quite a few lessons have been learned which are hopefully applied here.
+
+
+Design notes
+------------
+
+On the face of it, a tool to create firmware images should be fairly simple:
+just find all the input binaries and place them at the right place in the
+image. The difficulty comes from the wide variety of input types (simple
+flat binaries containing code, packaged data with various headers), packing
+requirments (alignment, spacing, device boundaries) and other required
+features such as hierarchical images.
+
+The design challenge is to make it easy to create simple images, while
+allowing the more complex cases to be supported. For example, for most
+images we don't much care exactly where each binary ends up, so we should
+not have to specify that unnecessarily.
+
+New entry types should aim to provide simple usage where possible. If new
+core features are needed, they can be added in the Entry base class.
+
+
+To do
+-----
+
+Some ideas:
+- Use of-platdata to make the information available to code that is unable
+  to use device tree (such as a very small SPL image)
+- Allow easy building of images by specifying just the board name
+- Produce a full Python binding for libfdt (for upstream). This is nearing
+    completion but some work remains
+- Add an option to decode an image into the constituent binaries
+- Support building an image for a board (-b) more completely, with a
+  configurable build directory
+- Consider making binman work with buildman, although if it is used in the
+  Makefile, this will be automatic
+
+--
+Simon Glass <sjg@chromium.org>
+7/7/2016
diff --git a/tools/u-boot-tools/binman/README.entries b/tools/u-boot-tools/binman/README.entries
new file mode 100644
index 0000000000000000000000000000000000000000..9fc2f83280ef771ff1e90d9f2f0ed02d68c99781
--- /dev/null
+++ b/tools/u-boot-tools/binman/README.entries
@@ -0,0 +1,698 @@
+Binman Entry Documentation
+===========================
+
+This file describes the entry types supported by binman. These entry types can
+be placed in an image one by one to build up a final firmware image. It is
+fairly easy to create new entry types. Just add a new file to the 'etype'
+directory. You can use the existing entries as examples.
+
+Note that some entries are subclasses of others, using and extending their
+features to produce new behaviours.
+
+
+
+Entry: blob: Entry containing an arbitrary binary blob
+------------------------------------------------------
+
+Note: This should not be used by itself. It is normally used as a parent
+class by other entry types.
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+    - compress: Compression algorithm to use:
+        none: No compression
+        lz4: Use lz4 compression (via 'lz4' command-line utility)
+
+This entry reads data from a file and places it in the entry. The
+default filename is often specified specified by the subclass. See for
+example the 'u_boot' entry which provides the filename 'u-boot.bin'.
+
+If compression is enabled, an extra 'uncomp-size' property is written to
+the node (if enabled with -u) which provides the uncompressed size of the
+data.
+
+
+
+Entry: blob-dtb: A blob that holds a device tree
+------------------------------------------------
+
+This is a blob containing a device tree. The contents of the blob are
+obtained from the list of available device-tree files, managed by the
+'state' module.
+
+
+
+Entry: blob-named-by-arg: A blob entry which gets its filename property from its subclass
+-----------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - <xxx>-path: Filename containing the contents of this entry (optional,
+        defaults to 0)
+
+where <xxx> is the blob_fname argument to the constructor.
+
+This entry cannot be used directly. Instead, it is used as a parent class
+for another entry, which defined blob_fname. This parameter is used to
+set the entry-arg or property containing the filename. The entry-arg or
+property is in turn used to set the actual filename.
+
+See cros_ec_rw for an example of this.
+
+
+
+Entry: cros-ec-rw: A blob entry which contains a Chromium OS read-write EC image
+--------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - cros-ec-rw-path: Filename containing the EC image
+
+This entry holds a Chromium OS EC (embedded controller) image, for use in
+updating the EC on startup via software sync.
+
+
+
+Entry: files: Entry containing a set of files
+---------------------------------------------
+
+Properties / Entry arguments:
+    - pattern: Filename pattern to match the files to include
+    - compress: Compression algorithm to use:
+        none: No compression
+        lz4: Use lz4 compression (via 'lz4' command-line utility)
+
+This entry reads a number of files and places each in a separate sub-entry
+within this entry. To access these you need to enable device-tree updates
+at run-time so you can obtain the file positions.
+
+
+
+Entry: fill: An entry which is filled to a particular byte value
+----------------------------------------------------------------
+
+Properties / Entry arguments:
+    - fill-byte: Byte to use to fill the entry
+
+Note that the size property must be set since otherwise this entry does not
+know how large it should be.
+
+You can often achieve the same effect using the pad-byte property of the
+overall image, in that the space between entries will then be padded with
+that byte. But this entry is sometimes useful for explicitly setting the
+byte value of a region.
+
+
+
+Entry: fmap: An entry which contains an Fmap section
+----------------------------------------------------
+
+Properties / Entry arguments:
+    None
+
+FMAP is a simple format used by flashrom, an open-source utility for
+reading and writing the SPI flash, typically on x86 CPUs. The format
+provides flashrom with a list of areas, so it knows what it in the flash.
+It can then read or write just a single area, instead of the whole flash.
+
+The format is defined by the flashrom project, in the file lib/fmap.h -
+see www.flashrom.org/Flashrom for more information.
+
+When used, this entry will be populated with an FMAP which reflects the
+entries in the current image. Note that any hierarchy is squashed, since
+FMAP does not support this.
+
+
+
+Entry: gbb: An entry which contains a Chromium OS Google Binary Block
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - hardware-id: Hardware ID to use for this build (a string)
+    - keydir: Directory containing the public keys to use
+    - bmpblk: Filename containing images used by recovery
+
+Chromium OS uses a GBB to store various pieces of information, in particular
+the root and recovery keys that are used to verify the boot process. Some
+more details are here:
+
+    https://www.chromium.org/chromium-os/firmware-porting-guide/2-concepts
+
+but note that the page dates from 2013 so is quite out of date. See
+README.chromium for how to obtain the required keys and tools.
+
+
+
+Entry: intel-cmc: Entry containing an Intel Chipset Micro Code (CMC) file
+-------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+
+This file contains microcode for some devices in a special format. An
+example filename is 'Microcode/C0_22211.BIN'.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+Entry: intel-descriptor: Intel flash descriptor block (4KB)
+-----------------------------------------------------------
+
+Properties / Entry arguments:
+    filename: Filename of file containing the descriptor. This is typically
+        a 4KB binary file, sometimes called 'descriptor.bin'
+
+This entry is placed at the start of flash and provides information about
+the SPI flash regions. In particular it provides the base address and
+size of the ME (Management Engine) region, allowing us to place the ME
+binary in the right place.
+
+With this entry in your image, the position of the 'intel-me' entry will be
+fixed in the image, which avoids you needed to specify an offset for that
+region. This is useful, because it is not possible to change the position
+of the ME region without updating the descriptor.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+Entry: intel-fsp: Entry containing an Intel Firmware Support Package (FSP) file
+-------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+
+This file contains binary blobs which are used on some devices to make the
+platform work. U-Boot executes this code since it is not possible to set up
+the hardware using U-Boot open-source code. Documentation is typically not
+available in sufficient detail to allow this.
+
+An example filename is 'FSP/QUEENSBAY_FSP_GOLD_001_20-DECEMBER-2013.fd'
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+Entry: intel-me: Entry containing an Intel Management Engine (ME) file
+----------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+
+This file contains code used by the SoC that is required to make it work.
+The Management Engine is like a background task that runs things that are
+not clearly documented, but may include keyboard, deplay and network
+access. For platform that use ME it is not possible to disable it. U-Boot
+does not directly execute code in the ME binary.
+
+A typical filename is 'me.bin'.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+Entry: intel-mrc: Entry containing an Intel Memory Reference Code (MRC) file
+----------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+
+This file contains code for setting up the SDRAM on some Intel systems. This
+is executed by U-Boot when needed early during startup. A typical filename
+is 'mrc.bin'.
+
+See README.x86 for information about x86 binary blobs.
+
+
+
+Entry: intel-vbt: Entry containing an Intel Video BIOS Table (VBT) file
+-----------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+
+This file contains code that sets up the integrated graphics subsystem on
+some Intel SoCs. U-Boot executes this when the display is started up.
+
+See README.x86 for information about Intel binary blobs.
+
+
+
+Entry: intel-vga: Entry containing an Intel Video Graphics Adaptor (VGA) file
+-----------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry
+
+This file contains code that sets up the integrated graphics subsystem on
+some Intel SoCs. U-Boot executes this when the display is started up.
+
+This is similar to the VBT file but in a different format.
+
+See README.x86 for information about Intel binary blobs.
+
+
+
+Entry: powerpc-mpc85xx-bootpg-resetvec: PowerPC mpc85xx bootpg + resetvec code for U-Boot
+-----------------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot-br.bin (default 'u-boot-br.bin')
+
+This enrty is valid for PowerPC mpc85xx cpus. This entry holds
+'bootpg + resetvec' code for PowerPC mpc85xx CPUs which needs to be
+placed at offset 'RESET_VECTOR_ADDRESS - 0xffc'.
+
+
+
+Entry: section: Entry that contains other entries
+-------------------------------------------------
+
+Properties / Entry arguments: (see binman README for more information)
+    - size: Size of section in bytes
+    - align-size: Align size to a particular power of two
+    - pad-before: Add padding before the entry
+    - pad-after: Add padding after the entry
+    - pad-byte: Pad byte to use when padding
+    - sort-by-offset: Reorder the entries by offset
+    - end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
+    - name-prefix: Adds a prefix to the name of every entry in the section
+        when writing out the map
+
+A section is an entry which can contain other entries, thus allowing
+hierarchical images to be created. See 'Sections and hierarchical images'
+in the binman README for more information.
+
+
+
+Entry: text: An entry which contains text
+-----------------------------------------
+
+The text can be provided either in the node itself or by a command-line
+argument. There is a level of indirection to allow multiple text strings
+and sharing of text.
+
+Properties / Entry arguments:
+    text-label: The value of this string indicates the property / entry-arg
+        that contains the string to place in the entry
+    <xxx> (actual name is the value of text-label): contains the string to
+        place in the entry.
+
+Example node:
+
+    text {
+        size = <50>;
+        text-label = "message";
+    };
+
+You can then use:
+
+    binman -amessage="this is my message"
+
+and binman will insert that string into the entry.
+
+It is also possible to put the string directly in the node:
+
+    text {
+        size = <8>;
+        text-label = "message";
+        message = "a message directly in the node"
+    };
+
+The text is not itself nul-terminated. This can be achieved, if required,
+by setting the size of the entry to something larger than the text.
+
+
+
+Entry: u-boot: U-Boot flat binary
+---------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.bin (default 'u-boot.bin')
+
+This is the U-Boot binary, containing relocation information to allow it
+to relocate itself at runtime. The binary typically includes a device tree
+blob at the end of it. Use u_boot_nodtb if you want to package the device
+tree separately.
+
+U-Boot can access binman symbols at runtime. See:
+
+    'Access to binman entry offsets at run time (fdt)'
+
+in the binman README for more information.
+
+
+
+Entry: u-boot-dtb: U-Boot device tree
+-------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+This is the U-Boot device tree, containing configuration information for
+U-Boot. U-Boot needs this to know what devices are present and which drivers
+to activate.
+
+Note: This is mostly an internal entry type, used by others. This allows
+binman to know which entries contain a device tree.
+
+
+
+Entry: u-boot-dtb-with-ucode: A U-Boot device tree file, with the microcode removed
+-----------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+See Entry_u_boot_ucode for full details of the three entries involved in
+this process. This entry provides the U-Boot device-tree file, which
+contains the microcode. If the microcode is not being collated into one
+place then the offset and size of the microcode is recorded by this entry,
+for use by u_boot_with_ucode_ptr. If it is being collated, then this
+entry deletes the microcode from the device tree (to save space) and makes
+it available to u_boot_ucode.
+
+
+
+Entry: u-boot-elf: U-Boot ELF image
+-----------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot (default 'u-boot')
+
+This is the U-Boot ELF image. It does not include a device tree but can be
+relocated to any address for execution.
+
+
+
+Entry: u-boot-img: U-Boot legacy image
+--------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.img (default 'u-boot.img')
+
+This is the U-Boot binary as a packaged image, in legacy format. It has a
+header which allows it to be loaded at the correct address for execution.
+
+You should use FIT (Flat Image Tree) instead of the legacy image for new
+applications.
+
+
+
+Entry: u-boot-nodtb: U-Boot flat binary without device tree appended
+--------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.bin (default 'u-boot-nodtb.bin')
+
+This is the U-Boot binary, containing relocation information to allow it
+to relocate itself at runtime. It does not include a device tree blob at
+the end of it so normally cannot work without it. You can add a u_boot_dtb
+entry after this one, or use a u_boot entry instead (which contains both
+U-Boot and the device tree).
+
+
+
+Entry: u-boot-spl: U-Boot SPL binary
+------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot-spl.bin (default 'spl/u-boot-spl.bin')
+
+This is the U-Boot SPL (Secondary Program Loader) binary. This is a small
+binary which loads before U-Boot proper, typically into on-chip SRAM. It is
+responsible for locating, loading and jumping to U-Boot. Note that SPL is
+not relocatable so must be loaded to the correct address in SRAM, or written
+to run from the correct address if direct flash execution is possible (e.g.
+on x86 devices).
+
+SPL can access binman symbols at runtime. See:
+
+    'Access to binman entry offsets at run time (symbols)'
+
+in the binman README for more information.
+
+The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+binman uses that to look up symbols to write into the SPL binary.
+
+
+
+Entry: u-boot-spl-bss-pad: U-Boot SPL binary padded with a BSS region
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+    None
+
+This is similar to u_boot_spl except that padding is added after the SPL
+binary to cover the BSS (Block Started by Symbol) region. This region holds
+the various used by SPL. It is set to 0 by SPL when it starts up. If you
+want to append data to the SPL image (such as a device tree file), you must
+pad out the BSS region to avoid the data overlapping with U-Boot variables.
+This entry is useful in that case. It automatically pads out the entry size
+to cover both the code, data and BSS.
+
+The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+binman uses that to look up the BSS address.
+
+
+
+Entry: u-boot-spl-dtb: U-Boot SPL device tree
+---------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.dtb (default 'spl/u-boot-spl.dtb')
+
+This is the SPL device tree, containing configuration information for
+SPL. SPL needs this to know what devices are present and which drivers
+to activate.
+
+
+
+Entry: u-boot-spl-elf: U-Boot SPL ELF image
+-------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of SPL u-boot (default 'spl/u-boot')
+
+This is the U-Boot SPL ELF image. It does not include a device tree but can
+be relocated to any address for execution.
+
+
+
+Entry: u-boot-spl-nodtb: SPL binary without device tree appended
+----------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of spl/u-boot-spl-nodtb.bin (default
+        'spl/u-boot-spl-nodtb.bin')
+
+This is the U-Boot SPL binary, It does not include a device tree blob at
+the end of it so may not be able to work without it, assuming SPL needs
+a device tree to operation on your platform. You can add a u_boot_spl_dtb
+entry after this one, or use a u_boot_spl entry instead (which contains
+both SPL and the device tree).
+
+
+
+Entry: u-boot-spl-with-ucode-ptr: U-Boot SPL with embedded microcode pointer
+----------------------------------------------------------------------------
+
+This is used when SPL must set up the microcode for U-Boot.
+
+See Entry_u_boot_ucode for full details of the entries involved in this
+process.
+
+
+
+Entry: u-boot-tpl: U-Boot TPL binary
+------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot-tpl.bin (default 'tpl/u-boot-tpl.bin')
+
+This is the U-Boot TPL (Tertiary Program Loader) binary. This is a small
+binary which loads before SPL, typically into on-chip SRAM. It is
+responsible for locating, loading and jumping to SPL, the next-stage
+loader. Note that SPL is not relocatable so must be loaded to the correct
+address in SRAM, or written to run from the correct address if direct
+flash execution is possible (e.g. on x86 devices).
+
+SPL can access binman symbols at runtime. See:
+
+    'Access to binman entry offsets at run time (symbols)'
+
+in the binman README for more information.
+
+The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+binman uses that to look up symbols to write into the TPL binary.
+
+
+
+Entry: u-boot-tpl-dtb: U-Boot TPL device tree
+---------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot.dtb (default 'tpl/u-boot-tpl.dtb')
+
+This is the TPL device tree, containing configuration information for
+TPL. TPL needs this to know what devices are present and which drivers
+to activate.
+
+
+
+Entry: u-boot-tpl-dtb-with-ucode: U-Boot TPL with embedded microcode pointer
+----------------------------------------------------------------------------
+
+This is used when TPL must set up the microcode for U-Boot.
+
+See Entry_u_boot_ucode for full details of the entries involved in this
+process.
+
+
+
+Entry: u-boot-tpl-with-ucode-ptr: U-Boot TPL with embedded microcode pointer
+----------------------------------------------------------------------------
+
+See Entry_u_boot_ucode for full details of the entries involved in this
+process.
+
+
+
+Entry: u-boot-ucode: U-Boot microcode block
+-------------------------------------------
+
+Properties / Entry arguments:
+    None
+
+The contents of this entry are filled in automatically by other entries
+which must also be in the image.
+
+U-Boot on x86 needs a single block of microcode. This is collected from
+the various microcode update nodes in the device tree. It is also unable
+to read the microcode from the device tree on platforms that use FSP
+(Firmware Support Package) binaries, because the API requires that the
+microcode is supplied before there is any SRAM available to use (i.e.
+the FSP sets up the SRAM / cache-as-RAM but does so in the call that
+requires the microcode!). To keep things simple, all x86 platforms handle
+microcode the same way in U-Boot (even non-FSP platforms). This is that
+a table is placed at _dt_ucode_base_size containing the base address and
+size of the microcode. This is either passed to the FSP (for FSP
+platforms), or used to set up the microcode (for non-FSP platforms).
+This all happens in the build system since it is the only way to get
+the microcode into a single blob and accessible without SRAM.
+
+There are two cases to handle. If there is only one microcode blob in
+the device tree, then the ucode pointer it set to point to that. This
+entry (u-boot-ucode) is empty. If there is more than one update, then
+this entry holds the concatenation of all updates, and the device tree
+entry (u-boot-dtb-with-ucode) is updated to remove the microcode. This
+last step ensures that that the microcode appears in one contiguous
+block in the image and is not unnecessarily duplicated in the device
+tree. It is referred to as 'collation' here.
+
+Entry types that have a part to play in handling microcode:
+
+    Entry_u_boot_with_ucode_ptr:
+        Contains u-boot-nodtb.bin (i.e. U-Boot without the device tree).
+        It updates it with the address and size of the microcode so that
+        U-Boot can find it early on start-up.
+    Entry_u_boot_dtb_with_ucode:
+        Contains u-boot.dtb. It stores the microcode in a
+        'self.ucode_data' property, which is then read by this class to
+        obtain the microcode if needed. If collation is performed, it
+        removes the microcode from the device tree.
+    Entry_u_boot_ucode:
+        This class. If collation is enabled it reads the microcode from
+        the Entry_u_boot_dtb_with_ucode entry, and uses it as the
+        contents of this entry.
+
+
+
+Entry: u-boot-with-ucode-ptr: U-Boot with embedded microcode pointer
+--------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot-nodtb.dtb (default 'u-boot-nodtb.dtb')
+    - optional-ucode: boolean property to make microcode optional. If the
+        u-boot.bin image does not include microcode, no error will
+        be generated.
+
+See Entry_u_boot_ucode for full details of the three entries involved in
+this process. This entry updates U-Boot with the offset and size of the
+microcode, to allow early x86 boot code to find it without doing anything
+complicated. Otherwise it is the same as the u_boot entry.
+
+
+
+Entry: vblock: An entry which contains a Chromium OS verified boot block
+------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - keydir: Directory containing the public keys to use
+    - keyblock: Name of the key file to use (inside keydir)
+    - signprivate: Name of provide key file to use (inside keydir)
+    - version: Version number of the vblock (typically 1)
+    - kernelkey: Name of the kernel key to use (inside keydir)
+    - preamble-flags: Value of the vboot preamble flags (typically 0)
+
+Output files:
+    - input.<unique_name> - input file passed to futility
+    - vblock.<unique_name> - output file generated by futility (which is
+        used as the entry contents)
+
+Chromium OS signs the read-write firmware and kernel, writing the signature
+in this block. This allows U-Boot to verify that the next firmware stage
+and kernel are genuine.
+
+
+
+Entry: x86-start16: x86 16-bit start-up code for U-Boot
+-------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of u-boot-x86-16bit.bin (default
+        'u-boot-x86-16bit.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+must be placed at a particular address. This entry holds that code. It is
+typically placed at offset CONFIG_SYS_X86_START16. The code is responsible
+for changing to 32-bit mode and jumping to U-Boot's entry point, which
+requires 32-bit mode (for 32-bit U-Boot).
+
+For 64-bit U-Boot, the 'x86_start16_spl' entry type is used instead.
+
+
+
+Entry: x86-start16-spl: x86 16-bit start-up code for SPL
+--------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of spl/u-boot-x86-16bit-spl.bin (default
+        'spl/u-boot-x86-16bit-spl.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 64-bit CPUs. This code
+must be placed at a particular address. This entry holds that code. It is
+typically placed at offset CONFIG_SYS_X86_START16. The code is responsible
+for changing to 32-bit mode and starting SPL, which in turn changes to
+64-bit mode and jumps to U-Boot (for 64-bit U-Boot).
+
+For 32-bit U-Boot, the 'x86_start16' entry type is used instead.
+
+
+
+Entry: x86-start16-tpl: x86 16-bit start-up code for TPL
+--------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of tpl/u-boot-x86-16bit-tpl.bin (default
+        'tpl/u-boot-x86-16bit-tpl.bin')
+
+x86 CPUs start up in 16-bit mode, even if they are 64-bit CPUs. This code
+must be placed at a particular address. This entry holds that code. It is
+typically placed at offset CONFIG_SYS_X86_START16. The code is responsible
+for changing to 32-bit mode and starting TPL, which in turn jumps to SPL.
+
+If TPL is not being used, the 'x86_start16_spl or 'x86_start16' entry types
+may be used instead.
+
+
+
diff --git a/tools/u-boot-tools/binman/binman b/tools/u-boot-tools/binman/binman
new file mode 120000
index 0000000000000000000000000000000000000000..979b7e4d4b8fdd0bfeda53b04edf141cbf22149c
--- /dev/null
+++ b/tools/u-boot-tools/binman/binman
@@ -0,0 +1 @@
+binman.py
\ No newline at end of file
diff --git a/tools/u-boot-tools/binman/binman.py b/tools/u-boot-tools/binman/binman.py
new file mode 100755
index 0000000000000000000000000000000000000000..439908e66508045e78563617304e3d36dffe9417
--- /dev/null
+++ b/tools/u-boot-tools/binman/binman.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Creates binary images from input files controlled by a description
+#
+
+"""See README for more information"""
+
+import glob
+import multiprocessing
+import os
+import sys
+import traceback
+import unittest
+
+# Bring in the patman and dtoc libraries
+our_path = os.path.dirname(os.path.realpath(__file__))
+for dirname in ['../patman', '../dtoc', '..', '../concurrencytest']:
+    sys.path.insert(0, os.path.join(our_path, dirname))
+
+# Bring in the libfdt module
+sys.path.insert(0, 'scripts/dtc/pylibfdt')
+sys.path.insert(0, os.path.join(our_path,
+                '../../build-sandbox_spl/scripts/dtc/pylibfdt'))
+
+import cmdline
+import command
+use_concurrent = True
+try:
+    from concurrencytest import ConcurrentTestSuite, fork_for_tests
+except:
+    use_concurrent = False
+import control
+import test_util
+
+def RunTests(debug, processes, args):
+    """Run the functional tests and any embedded doctests
+
+    Args:
+        debug: True to enable debugging, which shows a full stack trace on error
+        args: List of positional args provided to binman. This can hold a test
+            name to execute (as in 'binman -t testSections', for example)
+        processes: Number of processes to use to run tests (None=same as #CPUs)
+    """
+    import elf_test
+    import entry_test
+    import fdt_test
+    import ftest
+    import image_test
+    import test
+    import doctest
+
+    result = unittest.TestResult()
+    for module in []:
+        suite = doctest.DocTestSuite(module)
+        suite.run(result)
+
+    sys.argv = [sys.argv[0]]
+    if debug:
+        sys.argv.append('-D')
+    if debug:
+        sys.argv.append('-D')
+
+    # Run the entry tests first ,since these need to be the first to import the
+    # 'entry' module.
+    test_name = args and args[0] or None
+    suite = unittest.TestSuite()
+    loader = unittest.TestLoader()
+    for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
+                   elf_test.TestElf, image_test.TestImage):
+        if test_name:
+            try:
+                suite.addTests(loader.loadTestsFromName(test_name, module))
+            except AttributeError:
+                continue
+        else:
+            suite.addTests(loader.loadTestsFromTestCase(module))
+    if use_concurrent and processes != 1:
+        concurrent_suite = ConcurrentTestSuite(suite,
+                fork_for_tests(processes or multiprocessing.cpu_count()))
+        concurrent_suite.run(result)
+    else:
+        suite.run(result)
+
+    print result
+    for test, err in result.errors:
+        print test.id(), err
+    for test, err in result.failures:
+        print err, result.failures
+    if result.errors or result.failures:
+      print 'binman tests FAILED'
+      return 1
+    return 0
+
+def GetEntryModules(include_testing=True):
+    """Get a set of entry class implementations
+
+    Returns:
+        Set of paths to entry class filenames
+    """
+    glob_list = glob.glob(os.path.join(our_path, 'etype/*.py'))
+    return set([os.path.splitext(os.path.basename(item))[0]
+                for item in glob_list
+                if include_testing or '_testing' not in item])
+
+def RunTestCoverage():
+    """Run the tests and check that we get 100% coverage"""
+    glob_list = GetEntryModules(False)
+    all_set = set([os.path.splitext(os.path.basename(item))[0]
+                   for item in glob_list if '_testing' not in item])
+    test_util.RunTestCoverage('tools/binman/binman.py', None,
+            ['*test*', '*binman.py', 'tools/patman/*', 'tools/dtoc/*'],
+            options.build_dir, all_set)
+
+def RunBinman(options, args):
+    """Main entry point to binman once arguments are parsed
+
+    Args:
+        options: Command-line options
+        args: Non-option arguments
+    """
+    ret_code = 0
+
+    # For testing: This enables full exception traces.
+    #options.debug = True
+
+    if not options.debug:
+        sys.tracebacklimit = 0
+
+    if options.test:
+        ret_code = RunTests(options.debug, options.processes, args[1:])
+
+    elif options.test_coverage:
+        RunTestCoverage()
+
+    elif options.entry_docs:
+        control.WriteEntryDocs(GetEntryModules())
+
+    else:
+        try:
+            ret_code = control.Binman(options, args)
+        except Exception as e:
+            print 'binman: %s' % e
+            if options.debug:
+                print
+                traceback.print_exc()
+            ret_code = 1
+    return ret_code
+
+
+if __name__ == "__main__":
+    (options, args) = cmdline.ParseArgs(sys.argv)
+    ret_code = RunBinman(options, args)
+    sys.exit(ret_code)
diff --git a/tools/u-boot-tools/binman/bsection.py b/tools/u-boot-tools/binman/bsection.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccf2920c5bde338e9004246660049da71a67eb1a
--- /dev/null
+++ b/tools/u-boot-tools/binman/bsection.py
@@ -0,0 +1,464 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Base class for sections (collections of entries)
+#
+
+from __future__ import print_function
+
+from collections import OrderedDict
+from sets import Set
+import sys
+
+import fdt_util
+import re
+import state
+import tools
+
+class Section(object):
+    """A section which contains multiple entries
+
+    A section represents a collection of entries. There must be one or more
+    sections in an image. Sections are used to group entries together.
+
+    Attributes:
+        _node: Node object that contains the section definition in device tree
+        _parent_section: Parent Section object which created this Section
+        _size: Section size in bytes, or None if not known yet
+        _align_size: Section size alignment, or None
+        _pad_before: Number of bytes before the first entry starts. This
+            effectively changes the place where entry offset 0 starts
+        _pad_after: Number of bytes after the last entry ends. The last
+            entry will finish on or before this boundary
+        _pad_byte: Byte to use to pad the section where there is no entry
+        _sort: True if entries should be sorted by offset, False if they
+            must be in-order in the device tree description
+        _skip_at_start: Number of bytes before the first entry starts. These
+            effectively adjust the starting offset of entries. For example,
+            if _pad_before is 16, then the first entry would start at 16.
+            An entry with offset = 20 would in fact be written at offset 4
+            in the image file.
+        _end_4gb: Indicates that the section ends at the 4GB boundary. This is
+            used for x86 images, which want to use offsets such that a memory
+            address (like 0xff800000) is the first entry offset. This causes
+            _skip_at_start to be set to the starting memory address.
+        _name_prefix: Prefix to add to the name of all entries within this
+            section
+        _entries: OrderedDict() of entries
+    """
+    def __init__(self, name, parent_section, node, image, test=False):
+        global entry
+        global Entry
+        import entry
+        from entry import Entry
+
+        self._parent_section = parent_section
+        self._name = name
+        self._node = node
+        self._image = image
+        self._offset = 0
+        self._size = None
+        self._align_size = None
+        self._pad_before = 0
+        self._pad_after = 0
+        self._pad_byte = 0
+        self._sort = False
+        self._skip_at_start = None
+        self._end_4gb = False
+        self._name_prefix = ''
+        self._entries = OrderedDict()
+        self._image_pos = None
+        if not test:
+            self._ReadNode()
+            self._ReadEntries()
+
+    def _ReadNode(self):
+        """Read properties from the section node"""
+        self._size = fdt_util.GetInt(self._node, 'size')
+        self._align_size = fdt_util.GetInt(self._node, 'align-size')
+        if tools.NotPowerOfTwo(self._align_size):
+            self._Raise("Alignment size %s must be a power of two" %
+                        self._align_size)
+        self._pad_before = fdt_util.GetInt(self._node, 'pad-before', 0)
+        self._pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
+        self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
+        self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
+        self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
+        self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
+        if self._end_4gb:
+            if not self._size:
+                self._Raise("Section size must be provided when using end-at-4gb")
+            if self._skip_at_start is not None:
+                self._Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
+            else:
+                self._skip_at_start = 0x100000000 - self._size
+        else:
+            if self._skip_at_start is None:
+                self._skip_at_start = 0
+        self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
+
+    def _ReadEntries(self):
+        for node in self._node.subnodes:
+            if node.name == 'hash':
+                continue
+            entry = Entry.Create(self, node)
+            entry.SetPrefix(self._name_prefix)
+            self._entries[node.name] = entry
+
+    def GetFdtSet(self):
+        """Get the set of device tree files used by this image"""
+        fdt_set = Set()
+        for entry in self._entries.values():
+            fdt_set.update(entry.GetFdtSet())
+        return fdt_set
+
+    def SetOffset(self, offset):
+        self._offset = offset
+
+    def ExpandEntries(self):
+        for entry in self._entries.values():
+            entry.ExpandEntries()
+
+    def AddMissingProperties(self):
+        """Add new properties to the device tree as needed for this entry"""
+        for prop in ['offset', 'size', 'image-pos']:
+            if not prop in self._node.props:
+                state.AddZeroProp(self._node, prop)
+        state.CheckAddHashProp(self._node)
+        for entry in self._entries.values():
+            entry.AddMissingProperties()
+
+    def SetCalculatedProperties(self):
+        state.SetInt(self._node, 'offset', self._offset)
+        state.SetInt(self._node, 'size', self._size)
+        image_pos = self._image_pos
+        if self._parent_section:
+            image_pos -= self._parent_section.GetRootSkipAtStart()
+        state.SetInt(self._node, 'image-pos', image_pos)
+        for entry in self._entries.values():
+            entry.SetCalculatedProperties()
+
+    def ProcessFdt(self, fdt):
+        todo = self._entries.values()
+        for passnum in range(3):
+            next_todo = []
+            for entry in todo:
+                if not entry.ProcessFdt(fdt):
+                    next_todo.append(entry)
+            todo = next_todo
+            if not todo:
+                break
+        if todo:
+            self._Raise('Internal error: Could not complete processing of Fdt: '
+                        'remaining %s' % todo)
+        return True
+
+    def CheckSize(self):
+        """Check that the section contents does not exceed its size, etc."""
+        contents_size = 0
+        for entry in self._entries.values():
+            contents_size = max(contents_size, entry.offset + entry.size)
+
+        contents_size -= self._skip_at_start
+
+        size = self._size
+        if not size:
+            size = self._pad_before + contents_size + self._pad_after
+            size = tools.Align(size, self._align_size)
+
+        if self._size and contents_size > self._size:
+            self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
+                       (contents_size, contents_size, self._size, self._size))
+        if not self._size:
+            self._size = size
+        if self._size != tools.Align(self._size, self._align_size):
+            self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
+                  (self._size, self._size, self._align_size, self._align_size))
+        return size
+
+    def _Raise(self, msg):
+        """Raises an error for this section
+
+        Args:
+            msg: Error message to use in the raise string
+        Raises:
+            ValueError()
+        """
+        raise ValueError("Section '%s': %s" % (self._node.path, msg))
+
+    def GetPath(self):
+        """Get the path of an image (in the FDT)
+
+        Returns:
+            Full path of the node for this image
+        """
+        return self._node.path
+
+    def FindEntryType(self, etype):
+        """Find an entry type in the section
+
+        Args:
+            etype: Entry type to find
+        Returns:
+            entry matching that type, or None if not found
+        """
+        for entry in self._entries.values():
+            if entry.etype == etype:
+                return entry
+        return None
+
+    def GetEntryContents(self):
+        """Call ObtainContents() for each entry
+
+        This calls each entry's ObtainContents() a few times until they all
+        return True. We stop calling an entry's function once it returns
+        True. This allows the contents of one entry to depend on another.
+
+        After 3 rounds we give up since it's likely an error.
+        """
+        todo = self._entries.values()
+        for passnum in range(3):
+            next_todo = []
+            for entry in todo:
+                if not entry.ObtainContents():
+                    next_todo.append(entry)
+            todo = next_todo
+            if not todo:
+                break
+        if todo:
+            self._Raise('Internal error: Could not complete processing of '
+                        'contents: remaining %s' % todo)
+        return True
+
+    def _SetEntryOffsetSize(self, name, offset, size):
+        """Set the offset and size of an entry
+
+        Args:
+            name: Entry name to update
+            offset: New offset
+            size: New size
+        """
+        entry = self._entries.get(name)
+        if not entry:
+            self._Raise("Unable to set offset/size for unknown entry '%s'" %
+                        name)
+        entry.SetOffsetSize(self._skip_at_start + offset, size)
+
+    def GetEntryOffsets(self):
+        """Handle entries that want to set the offset/size of other entries
+
+        This calls each entry's GetOffsets() method. If it returns a list
+        of entries to update, it updates them.
+        """
+        for entry in self._entries.values():
+            offset_dict = entry.GetOffsets()
+            for name, info in offset_dict.iteritems():
+                self._SetEntryOffsetSize(name, *info)
+
+    def PackEntries(self):
+        """Pack all entries into the section"""
+        offset = self._skip_at_start
+        for entry in self._entries.values():
+            offset = entry.Pack(offset)
+        self._size = self.CheckSize()
+
+    def _SortEntries(self):
+        """Sort entries by offset"""
+        entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
+        self._entries.clear()
+        for entry in entries:
+            self._entries[entry._node.name] = entry
+
+    def _ExpandEntries(self):
+        """Expand any entries that are permitted to"""
+        exp_entry = None
+        for entry in self._entries.values():
+            if exp_entry:
+                exp_entry.ExpandToLimit(entry.offset)
+                exp_entry = None
+            if entry.expand_size:
+                exp_entry = entry
+        if exp_entry:
+            exp_entry.ExpandToLimit(self._size)
+
+    def CheckEntries(self):
+        """Check that entries do not overlap or extend outside the section
+
+        This also sorts entries, if needed and expands
+        """
+        if self._sort:
+            self._SortEntries()
+        self._ExpandEntries()
+        offset = 0
+        prev_name = 'None'
+        for entry in self._entries.values():
+            entry.CheckOffset()
+            if (entry.offset < self._skip_at_start or
+                entry.offset + entry.size > self._skip_at_start + self._size):
+                entry.Raise("Offset %#x (%d) is outside the section starting "
+                            "at %#x (%d)" %
+                            (entry.offset, entry.offset, self._skip_at_start,
+                             self._skip_at_start))
+            if entry.offset < offset:
+                entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' "
+                            "ending at %#x (%d)" %
+                            (entry.offset, entry.offset, prev_name, offset, offset))
+            offset = entry.offset + entry.size
+            prev_name = entry.GetPath()
+
+    def SetImagePos(self, image_pos):
+        self._image_pos = image_pos
+        for entry in self._entries.values():
+            entry.SetImagePos(image_pos)
+
+    def ProcessEntryContents(self):
+        """Call the ProcessContents() method for each entry
+
+        This is intended to adjust the contents as needed by the entry type.
+        """
+        for entry in self._entries.values():
+            entry.ProcessContents()
+
+    def WriteSymbols(self):
+        """Write symbol values into binary files for access at run time"""
+        for entry in self._entries.values():
+            entry.WriteSymbols(self)
+
+    def BuildSection(self, fd, base_offset):
+        """Write the section to a file"""
+        fd.seek(base_offset)
+        fd.write(self.GetData())
+
+    def GetData(self):
+        """Get the contents of the section"""
+        section_data = chr(self._pad_byte) * self._size
+
+        for entry in self._entries.values():
+            data = entry.GetData()
+            base = self._pad_before + entry.offset - self._skip_at_start
+            section_data = (section_data[:base] + data +
+                            section_data[base + len(data):])
+        return section_data
+
+    def LookupSymbol(self, sym_name, optional, msg):
+        """Look up a symbol in an ELF file
+
+        Looks up a symbol in an ELF file. Only entry types which come from an
+        ELF image can be used by this function.
+
+        At present the only entry property supported is offset.
+
+        Args:
+            sym_name: Symbol name in the ELF file to look up in the format
+                _binman_<entry>_prop_<property> where <entry> is the name of
+                the entry and <property> is the property to find (e.g.
+                _binman_u_boot_prop_offset). As a special case, you can append
+                _any to <entry> to have it search for any matching entry. E.g.
+                _binman_u_boot_any_prop_offset will match entries called u-boot,
+                u-boot-img and u-boot-nodtb)
+            optional: True if the symbol is optional. If False this function
+                will raise if the symbol is not found
+            msg: Message to display if an error occurs
+
+        Returns:
+            Value that should be assigned to that symbol, or None if it was
+                optional and not found
+
+        Raises:
+            ValueError if the symbol is invalid or not found, or references a
+                property which is not supported
+        """
+        m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
+        if not m:
+            raise ValueError("%s: Symbol '%s' has invalid format" %
+                             (msg, sym_name))
+        entry_name, prop_name = m.groups()
+        entry_name = entry_name.replace('_', '-')
+        entry = self._entries.get(entry_name)
+        if not entry:
+            if entry_name.endswith('-any'):
+                root = entry_name[:-4]
+                for name in self._entries:
+                    if name.startswith(root):
+                        rest = name[len(root):]
+                        if rest in ['', '-img', '-nodtb']:
+                            entry = self._entries[name]
+        if not entry:
+            err = ("%s: Entry '%s' not found in list (%s)" %
+                   (msg, entry_name, ','.join(self._entries.keys())))
+            if optional:
+                print('Warning: %s' % err, file=sys.stderr)
+                return None
+            raise ValueError(err)
+        if prop_name == 'offset':
+            return entry.offset
+        elif prop_name == 'image_pos':
+            return entry.image_pos
+        else:
+            raise ValueError("%s: No such property '%s'" % (msg, prop_name))
+
+    def GetEntries(self):
+        """Get the number of entries in a section
+
+        Returns:
+            Number of entries in a section
+        """
+        return self._entries
+
+    def GetSize(self):
+        """Get the size of a section in bytes
+
+        This is only meaningful if the section has a pre-defined size, or the
+        entries within it have been packed, so that the size has been
+        calculated.
+
+        Returns:
+            Entry size in bytes
+        """
+        return self._size
+
+    def WriteMap(self, fd, indent):
+        """Write a map of the section to a .map file
+
+        Args:
+            fd: File to write the map to
+        """
+        Entry.WriteMapLine(fd, indent, self._name, self._offset, self._size,
+                           self._image_pos)
+        for entry in self._entries.values():
+            entry.WriteMap(fd, indent + 1)
+
+    def GetContentsByPhandle(self, phandle, source_entry):
+        """Get the data contents of an entry specified by a phandle
+
+        This uses a phandle to look up a node and and find the entry
+        associated with it. Then it returnst he contents of that entry.
+
+        Args:
+            phandle: Phandle to look up (integer)
+            source_entry: Entry containing that phandle (used for error
+                reporting)
+
+        Returns:
+            data from associated entry (as a string), or None if not found
+        """
+        node = self._node.GetFdt().LookupPhandle(phandle)
+        if not node:
+            source_entry.Raise("Cannot find node for phandle %d" % phandle)
+        for entry in self._entries.values():
+            if entry._node == node:
+                return entry.GetData()
+        source_entry.Raise("Cannot find entry for node '%s'" % node.name)
+
+    def ExpandSize(self, size):
+        if size != self._size:
+            self._size = size
+
+    def GetRootSkipAtStart(self):
+        if self._parent_section:
+            return self._parent_section.GetRootSkipAtStart()
+        return self._skip_at_start
+
+    def GetImageSize(self):
+        return self._image._size
diff --git a/tools/u-boot-tools/binman/cmdline.py b/tools/u-boot-tools/binman/cmdline.py
new file mode 100644
index 0000000000000000000000000000000000000000..3886d52b3a02c4332293dcf3cd050b9a20889fd8
--- /dev/null
+++ b/tools/u-boot-tools/binman/cmdline.py
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Command-line parser for binman
+#
+
+from optparse import OptionParser
+
+def ParseArgs(argv):
+    """Parse the binman command-line arguments
+
+    Args:
+        argv: List of string arguments
+    Returns:
+        Tuple (options, args) with the command-line options and arugments.
+            options provides access to the options (e.g. option.debug)
+            args is a list of string arguments
+    """
+    parser = OptionParser()
+    parser.add_option('-a', '--entry-arg', type='string', action='append',
+            help='Set argument value arg=value')
+    parser.add_option('-b', '--board', type='string',
+            help='Board name to build')
+    parser.add_option('-B', '--build-dir', type='string', default='b',
+            help='Directory containing the build output')
+    parser.add_option('-d', '--dt', type='string',
+            help='Configuration file (.dtb) to use')
+    parser.add_option('-D', '--debug', action='store_true',
+            help='Enabling debugging (provides a full traceback on error)')
+    parser.add_option('-E', '--entry-docs', action='store_true',
+            help='Write out entry documentation (see README.entries)')
+    parser.add_option('--fake-dtb', action='store_true',
+            help='Use fake device tree contents (for testing only)')
+    parser.add_option('-i', '--image', type='string', action='append',
+            help='Image filename to build (if not specified, build all)')
+    parser.add_option('-I', '--indir', action='append',
+            help='Add a path to a directory to use for input files')
+    parser.add_option('-H', '--full-help', action='store_true',
+        default=False, help='Display the README file')
+    parser.add_option('-m', '--map', action='store_true',
+        default=False, help='Output a map file for each image')
+    parser.add_option('-O', '--outdir', type='string',
+        action='store', help='Path to directory to use for intermediate and '
+        'output files')
+    parser.add_option('-p', '--preserve', action='store_true',\
+        help='Preserve temporary output directory even if option -O is not '
+             'given')
+    parser.add_option('-P', '--processes', type=int,
+                      help='set number of processes to use for running tests')
+    parser.add_option('-t', '--test', action='store_true',
+                    default=False, help='run tests')
+    parser.add_option('-T', '--test-coverage', action='store_true',
+                    default=False, help='run tests and check for 100% coverage')
+    parser.add_option('-u', '--update-fdt', action='store_true',
+        default=False, help='Update the binman node with offset/size info')
+    parser.add_option('-v', '--verbosity', default=1,
+        type='int', help='Control verbosity: 0=silent, 1=progress, 3=full, '
+        '4=debug')
+
+    parser.usage += """
+
+Create images for a board from a set of binaries. It is controlled by a
+description in the board device tree."""
+
+    return parser.parse_args(argv)
diff --git a/tools/u-boot-tools/binman/control.py b/tools/u-boot-tools/binman/control.py
new file mode 100644
index 0000000000000000000000000000000000000000..3446e2e79c5b278c7d8895f2d6395321bab3d582
--- /dev/null
+++ b/tools/u-boot-tools/binman/control.py
@@ -0,0 +1,195 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Creates binary images from input files controlled by a description
+#
+
+from collections import OrderedDict
+import os
+import sys
+import tools
+
+import command
+import elf
+from image import Image
+import state
+import tout
+
+# List of images we plan to create
+# Make this global so that it can be referenced from tests
+images = OrderedDict()
+
+def _ReadImageDesc(binman_node):
+    """Read the image descriptions from the /binman node
+
+    This normally produces a single Image object called 'image'. But if
+    multiple images are present, they will all be returned.
+
+    Args:
+        binman_node: Node object of the /binman node
+    Returns:
+        OrderedDict of Image objects, each of which describes an image
+    """
+    images = OrderedDict()
+    if 'multiple-images' in binman_node.props:
+        for node in binman_node.subnodes:
+            images[node.name] = Image(node.name, node)
+    else:
+        images['image'] = Image('image', binman_node)
+    return images
+
+def _FindBinmanNode(dtb):
+    """Find the 'binman' node in the device tree
+
+    Args:
+        dtb: Fdt object to scan
+    Returns:
+        Node object of /binman node, or None if not found
+    """
+    for node in dtb.GetRoot().subnodes:
+        if node.name == 'binman':
+            return node
+    return None
+
+def WriteEntryDocs(modules, test_missing=None):
+    """Write out documentation for all entries
+
+    Args:
+        modules: List of Module objects to get docs for
+        test_missing: Used for testing only, to force an entry's documeentation
+            to show as missing even if it is present. Should be set to None in
+            normal use.
+    """
+    from entry import Entry
+    Entry.WriteDocs(modules, test_missing)
+
+def Binman(options, args):
+    """The main control code for binman
+
+    This assumes that help and test options have already been dealt with. It
+    deals with the core task of building images.
+
+    Args:
+        options: Command line options object
+        args: Command line arguments (list of strings)
+    """
+    global images
+
+    if options.full_help:
+        pager = os.getenv('PAGER')
+        if not pager:
+            pager = 'more'
+        fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
+                            'README')
+        command.Run(pager, fname)
+        return 0
+
+    # Try to figure out which device tree contains our image description
+    if options.dt:
+        dtb_fname = options.dt
+    else:
+        board = options.board
+        if not board:
+            raise ValueError('Must provide a board to process (use -b <board>)')
+        board_pathname = os.path.join(options.build_dir, board)
+        dtb_fname = os.path.join(board_pathname, 'u-boot.dtb')
+        if not options.indir:
+            options.indir = ['.']
+        options.indir.append(board_pathname)
+
+    try:
+        # Import these here in case libfdt.py is not available, in which case
+        # the above help option still works.
+        import fdt
+        import fdt_util
+
+        tout.Init(options.verbosity)
+        elf.debug = options.debug
+        state.use_fake_dtb = options.fake_dtb
+        try:
+            tools.SetInputDirs(options.indir)
+            tools.PrepareOutputDir(options.outdir, options.preserve)
+            state.SetEntryArgs(options.entry_arg)
+
+            # Get the device tree ready by compiling it and copying the compiled
+            # output into a file in our output directly. Then scan it for use
+            # in binman.
+            dtb_fname = fdt_util.EnsureCompiled(dtb_fname)
+            fname = tools.GetOutputFilename('u-boot.dtb.out')
+            tools.WriteFile(fname, tools.ReadFile(dtb_fname))
+            dtb = fdt.FdtScan(fname)
+
+            node = _FindBinmanNode(dtb)
+            if not node:
+                raise ValueError("Device tree '%s' does not have a 'binman' "
+                                 "node" % dtb_fname)
+
+            images = _ReadImageDesc(node)
+
+            if options.image:
+                skip = []
+                for name, image in images.iteritems():
+                    if name not in options.image:
+                        del images[name]
+                        skip.append(name)
+                if skip:
+                    print 'Skipping images: %s\n' % ', '.join(skip)
+
+            state.Prepare(images, dtb)
+
+            # Prepare the device tree by making sure that any missing
+            # properties are added (e.g. 'pos' and 'size'). The values of these
+            # may not be correct yet, but we add placeholders so that the
+            # size of the device tree is correct. Later, in
+            # SetCalculatedProperties() we will insert the correct values
+            # without changing the device-tree size, thus ensuring that our
+            # entry offsets remain the same.
+            for image in images.values():
+                image.ExpandEntries()
+                if options.update_fdt:
+                    image.AddMissingProperties()
+                image.ProcessFdt(dtb)
+
+            for dtb_item in state.GetFdts():
+                dtb_item.Sync(auto_resize=True)
+                dtb_item.Pack()
+                dtb_item.Flush()
+
+            for image in images.values():
+                # Perform all steps for this image, including checking and
+                # writing it. This means that errors found with a later
+                # image will be reported after earlier images are already
+                # completed and written, but that does not seem important.
+                image.GetEntryContents()
+                image.GetEntryOffsets()
+                try:
+                    image.PackEntries()
+                    image.CheckSize()
+                    image.CheckEntries()
+                except Exception as e:
+                    if options.map:
+                        fname = image.WriteMap()
+                        print "Wrote map file '%s' to show errors"  % fname
+                    raise
+                image.SetImagePos()
+                if options.update_fdt:
+                    image.SetCalculatedProperties()
+                    for dtb_item in state.GetFdts():
+                        dtb_item.Sync()
+                image.ProcessEntryContents()
+                image.WriteSymbols()
+                image.BuildImage()
+                if options.map:
+                    image.WriteMap()
+
+            # Write the updated FDTs to our output files
+            for dtb_item in state.GetFdts():
+                tools.WriteFile(dtb_item._fname, dtb_item.GetContents())
+
+        finally:
+            tools.FinaliseOutputDir()
+    finally:
+        tout.Uninit()
+
+    return 0
diff --git a/tools/u-boot-tools/binman/elf.py b/tools/u-boot-tools/binman/elf.py
new file mode 100644
index 0000000000000000000000000000000000000000..97df8e32c5d8d60271051874c4b81a53b16914c7
--- /dev/null
+++ b/tools/u-boot-tools/binman/elf.py
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Handle various things related to ELF images
+#
+
+from collections import namedtuple, OrderedDict
+import command
+import os
+import re
+import struct
+
+import tools
+
+# This is enabled from control.py
+debug = False
+
+Symbol = namedtuple('Symbol', ['section', 'address', 'size', 'weak'])
+
+
+def GetSymbols(fname, patterns):
+    """Get the symbols from an ELF file
+
+    Args:
+        fname: Filename of the ELF file to read
+        patterns: List of regex patterns to search for, each a string
+
+    Returns:
+        None, if the file does not exist, or Dict:
+          key: Name of symbol
+          value: Hex value of symbol
+    """
+    stdout = command.Output('objdump', '-t', fname, raise_on_error=False)
+    lines = stdout.splitlines()
+    if patterns:
+        re_syms = re.compile('|'.join(patterns))
+    else:
+        re_syms = None
+    syms = {}
+    syms_started = False
+    for line in lines:
+        if not line or not syms_started:
+            if 'SYMBOL TABLE' in line:
+                syms_started = True
+            line = None  # Otherwise code coverage complains about 'continue'
+            continue
+        if re_syms and not re_syms.search(line):
+            continue
+
+        space_pos = line.find(' ')
+        value, rest = line[:space_pos], line[space_pos + 1:]
+        flags = rest[:7]
+        parts = rest[7:].split()
+        section, size =  parts[:2]
+        if len(parts) > 2:
+            name = parts[2]
+            syms[name] = Symbol(section, int(value, 16), int(size,16),
+                                flags[1] == 'w')
+
+    # Sort dict by address
+    return OrderedDict(sorted(syms.iteritems(), key=lambda x: x[1].address))
+
+def GetSymbolAddress(fname, sym_name):
+    """Get a value of a symbol from an ELF file
+
+    Args:
+        fname: Filename of the ELF file to read
+        patterns: List of regex patterns to search for, each a string
+
+    Returns:
+        Symbol value (as an integer) or None if not found
+    """
+    syms = GetSymbols(fname, [sym_name])
+    sym = syms.get(sym_name)
+    if not sym:
+        return None
+    return sym.address
+
+def LookupAndWriteSymbols(elf_fname, entry, section):
+    """Replace all symbols in an entry with their correct values
+
+    The entry contents is updated so that values for referenced symbols will be
+    visible at run time. This is done by finding out the symbols offsets in the
+    entry (using the ELF file) and replacing them with values from binman's data
+    structures.
+
+    Args:
+        elf_fname: Filename of ELF image containing the symbol information for
+            entry
+        entry: Entry to process
+        section: Section which can be used to lookup symbol values
+    """
+    fname = tools.GetInputFilename(elf_fname)
+    syms = GetSymbols(fname, ['image', 'binman'])
+    if not syms:
+        return
+    base = syms.get('__image_copy_start')
+    if not base:
+        return
+    for name, sym in syms.iteritems():
+        if name.startswith('_binman'):
+            msg = ("Section '%s': Symbol '%s'\n   in entry '%s'" %
+                   (section.GetPath(), name, entry.GetPath()))
+            offset = sym.address - base.address
+            if offset < 0 or offset + sym.size > entry.contents_size:
+                raise ValueError('%s has offset %x (size %x) but the contents '
+                                 'size is %x' % (entry.GetPath(), offset,
+                                                 sym.size, entry.contents_size))
+            if sym.size == 4:
+                pack_string = '<I'
+            elif sym.size == 8:
+                pack_string = '<Q'
+            else:
+                raise ValueError('%s has size %d: only 4 and 8 are supported' %
+                                 (msg, sym.size))
+
+            # Look up the symbol in our entry tables.
+            value = section.LookupSymbol(name, sym.weak, msg)
+            if value is not None:
+                value += base.address
+            else:
+                value = -1
+                pack_string = pack_string.lower()
+            value_bytes = struct.pack(pack_string, value)
+            if debug:
+                print('%s:\n   insert %s, offset %x, value %x, length %d' %
+                      (msg, name, offset, value, len(value_bytes)))
+            entry.data = (entry.data[:offset] + value_bytes +
+                        entry.data[offset + sym.size:])
diff --git a/tools/u-boot-tools/binman/elf_test.py b/tools/u-boot-tools/binman/elf_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..b68530c19ba6e6c724bed6d970a5dfcc203049d3
--- /dev/null
+++ b/tools/u-boot-tools/binman/elf_test.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the elf module
+
+import os
+import sys
+import unittest
+
+import elf
+import test_util
+import tools
+
+binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+
+
+class FakeEntry:
+    """A fake Entry object, usedfor testing
+
+    This supports an entry with a given size.
+    """
+    def __init__(self, contents_size):
+        self.contents_size = contents_size
+        self.data = 'a' * contents_size
+
+    def GetPath(self):
+        return 'entry_path'
+
+
+class FakeSection:
+    """A fake Section object, used for testing
+
+    This has the minimum feature set needed to support testing elf functions.
+    A LookupSymbol() function is provided which returns a fake value for amu
+    symbol requested.
+    """
+    def __init__(self, sym_value=1):
+        self.sym_value = sym_value
+
+    def GetPath(self):
+        return 'section_path'
+
+    def LookupSymbol(self, name, weak, msg):
+        """Fake implementation which returns the same value for all symbols"""
+        return self.sym_value
+
+
+class TestElf(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        tools.SetInputDirs(['.'])
+
+    def testAllSymbols(self):
+        """Test that we can obtain a symbol from the ELF file"""
+        fname = os.path.join(binman_dir, 'test', 'u_boot_ucode_ptr')
+        syms = elf.GetSymbols(fname, [])
+        self.assertIn('.ucode', syms)
+
+    def testRegexSymbols(self):
+        """Test that we can obtain from the ELF file by regular expression"""
+        fname = os.path.join(binman_dir, 'test', 'u_boot_ucode_ptr')
+        syms = elf.GetSymbols(fname, ['ucode'])
+        self.assertIn('.ucode', syms)
+        syms = elf.GetSymbols(fname, ['missing'])
+        self.assertNotIn('.ucode', syms)
+        syms = elf.GetSymbols(fname, ['missing', 'ucode'])
+        self.assertIn('.ucode', syms)
+
+    def testMissingFile(self):
+        """Test that a missing file is detected"""
+        entry = FakeEntry(10)
+        section = FakeSection()
+        with self.assertRaises(ValueError) as e:
+            syms = elf.LookupAndWriteSymbols('missing-file', entry, section)
+        self.assertIn("Filename 'missing-file' not found in input path",
+                      str(e.exception))
+
+    def testOutsideFile(self):
+        """Test a symbol which extends outside the entry area is detected"""
+        entry = FakeEntry(10)
+        section = FakeSection()
+        elf_fname = os.path.join(binman_dir, 'test', 'u_boot_binman_syms')
+        with self.assertRaises(ValueError) as e:
+            syms = elf.LookupAndWriteSymbols(elf_fname, entry, section)
+        self.assertIn('entry_path has offset 4 (size 8) but the contents size '
+                      'is a', str(e.exception))
+
+    def testMissingImageStart(self):
+        """Test that we detect a missing __image_copy_start symbol
+
+        This is needed to mark the start of the image. Without it we cannot
+        locate the offset of a binman symbol within the image.
+        """
+        entry = FakeEntry(10)
+        section = FakeSection()
+        elf_fname = os.path.join(binman_dir, 'test', 'u_boot_binman_syms_bad')
+        self.assertEqual(elf.LookupAndWriteSymbols(elf_fname, entry, section),
+                         None)
+
+    def testBadSymbolSize(self):
+        """Test that an attempt to use an 8-bit symbol are detected
+
+        Only 32 and 64 bits are supported, since we need to store an offset
+        into the image.
+        """
+        entry = FakeEntry(10)
+        section = FakeSection()
+        elf_fname = os.path.join(binman_dir, 'test', 'u_boot_binman_syms_size')
+        with self.assertRaises(ValueError) as e:
+            syms = elf.LookupAndWriteSymbols(elf_fname, entry, section)
+        self.assertIn('has size 1: only 4 and 8 are supported',
+                      str(e.exception))
+
+    def testNoValue(self):
+        """Test the case where we have no value for the symbol
+
+        This should produce -1 values for all thress symbols, taking up the
+        first 16 bytes of the image.
+        """
+        entry = FakeEntry(20)
+        section = FakeSection(sym_value=None)
+        elf_fname = os.path.join(binman_dir, 'test', 'u_boot_binman_syms')
+        syms = elf.LookupAndWriteSymbols(elf_fname, entry, section)
+        self.assertEqual(chr(255) * 16 + 'a' * 4, entry.data)
+
+    def testDebug(self):
+        """Check that enabling debug in the elf module produced debug output"""
+        elf.debug = True
+        entry = FakeEntry(20)
+        section = FakeSection()
+        elf_fname = os.path.join(binman_dir, 'test', 'u_boot_binman_syms')
+        with test_util.capture_sys_output() as (stdout, stderr):
+            syms = elf.LookupAndWriteSymbols(elf_fname, entry, section)
+        elf.debug = False
+        self.assertTrue(len(stdout.getvalue()) > 0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tools/u-boot-tools/binman/entry.py b/tools/u-boot-tools/binman/entry.py
new file mode 100644
index 0000000000000000000000000000000000000000..648cfd241f1dac53d87459f6cb1f725d1e86848a
--- /dev/null
+++ b/tools/u-boot-tools/binman/entry.py
@@ -0,0 +1,532 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+#
+# Base class for all entries
+#
+
+from __future__ import print_function
+
+from collections import namedtuple
+
+# importlib was introduced in Python 2.7 but there was a report of it not
+# working in 2.7.12, so we work around this:
+# http://lists.denx.de/pipermail/u-boot/2016-October/269729.html
+try:
+    import importlib
+    have_importlib = True
+except:
+    have_importlib = False
+
+import os
+from sets import Set
+import sys
+
+import fdt_util
+import state
+import tools
+
+modules = {}
+
+our_path = os.path.dirname(os.path.realpath(__file__))
+
+
+# An argument which can be passed to entries on the command line, in lieu of
+# device-tree properties.
+EntryArg = namedtuple('EntryArg', ['name', 'datatype'])
+
+
+class Entry(object):
+    """An Entry in the section
+
+    An entry corresponds to a single node in the device-tree description
+    of the section. Each entry ends up being a part of the final section.
+    Entries can be placed either right next to each other, or with padding
+    between them. The type of the entry determines the data that is in it.
+
+    This class is not used by itself. All entry objects are subclasses of
+    Entry.
+
+    Attributes:
+        section: Section object containing this entry
+        node: The node that created this entry
+        offset: Offset of entry within the section, None if not known yet (in
+            which case it will be calculated by Pack())
+        size: Entry size in bytes, None if not known
+        contents_size: Size of contents in bytes, 0 by default
+        align: Entry start offset alignment, or None
+        align_size: Entry size alignment, or None
+        align_end: Entry end offset alignment, or None
+        pad_before: Number of pad bytes before the contents, 0 if none
+        pad_after: Number of pad bytes after the contents, 0 if none
+        data: Contents of entry (string of bytes)
+    """
+    def __init__(self, section, etype, node, read_node=True, name_prefix=''):
+        self.section = section
+        self.etype = etype
+        self._node = node
+        self.name = node and (name_prefix + node.name) or 'none'
+        self.offset = None
+        self.size = None
+        self.data = None
+        self.contents_size = 0
+        self.align = None
+        self.align_size = None
+        self.align_end = None
+        self.pad_before = 0
+        self.pad_after = 0
+        self.offset_unset = False
+        self.image_pos = None
+        self._expand_size = False
+        if read_node:
+            self.ReadNode()
+
+    @staticmethod
+    def Lookup(section, node_path, etype):
+        """Look up the entry class for a node.
+
+        Args:
+            section:   Section object containing this node
+            node_node: Path name of Node object containing information about
+                       the entry to create (used for errors)
+            etype:   Entry type to use
+
+        Returns:
+            The entry class object if found, else None
+        """
+        # Convert something like 'u-boot@0' to 'u_boot' since we are only
+        # interested in the type.
+        module_name = etype.replace('-', '_')
+        if '@' in module_name:
+            module_name = module_name.split('@')[0]
+        module = modules.get(module_name)
+
+        # Also allow entry-type modules to be brought in from the etype directory.
+
+        # Import the module if we have not already done so.
+        if not module:
+            old_path = sys.path
+            sys.path.insert(0, os.path.join(our_path, 'etype'))
+            try:
+                if have_importlib:
+                    module = importlib.import_module(module_name)
+                else:
+                    module = __import__(module_name)
+            except ImportError as e:
+                raise ValueError("Unknown entry type '%s' in node '%s' (expected etype/%s.py, error '%s'" %
+                                 (etype, node_path, module_name, e))
+            finally:
+                sys.path = old_path
+            modules[module_name] = module
+
+        # Look up the expected class name
+        return getattr(module, 'Entry_%s' % module_name)
+
+    @staticmethod
+    def Create(section, node, etype=None):
+        """Create a new entry for a node.
+
+        Args:
+            section: Section object containing this node
+            node:    Node object containing information about the entry to
+                     create
+            etype:   Entry type to use, or None to work it out (used for tests)
+
+        Returns:
+            A new Entry object of the correct type (a subclass of Entry)
+        """
+        if not etype:
+            etype = fdt_util.GetString(node, 'type', node.name)
+        obj = Entry.Lookup(section, node.path, etype)
+
+        # Call its constructor to get the object we want.
+        return obj(section, etype, node)
+
+    def ReadNode(self):
+        """Read entry information from the node
+
+        This reads all the fields we recognise from the node, ready for use.
+        """
+        if 'pos' in self._node.props:
+            self.Raise("Please use 'offset' instead of 'pos'")
+        self.offset = fdt_util.GetInt(self._node, 'offset')
+        self.size = fdt_util.GetInt(self._node, 'size')
+        self.align = fdt_util.GetInt(self._node, 'align')
+        if tools.NotPowerOfTwo(self.align):
+            raise ValueError("Node '%s': Alignment %s must be a power of two" %
+                             (self._node.path, self.align))
+        self.pad_before = fdt_util.GetInt(self._node, 'pad-before', 0)
+        self.pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
+        self.align_size = fdt_util.GetInt(self._node, 'align-size')
+        if tools.NotPowerOfTwo(self.align_size):
+            raise ValueError("Node '%s': Alignment size %s must be a power "
+                             "of two" % (self._node.path, self.align_size))
+        self.align_end = fdt_util.GetInt(self._node, 'align-end')
+        self.offset_unset = fdt_util.GetBool(self._node, 'offset-unset')
+        self.expand_size = fdt_util.GetBool(self._node, 'expand-size')
+
+    def GetDefaultFilename(self):
+        return None
+
+    def GetFdtSet(self):
+        """Get the set of device trees used by this entry
+
+        Returns:
+            Set containing the filename from this entry, if it is a .dtb, else
+            an empty set
+        """
+        fname = self.GetDefaultFilename()
+        # It would be better to use isinstance(self, Entry_blob_dtb) here but
+        # we cannot access Entry_blob_dtb
+        if fname and fname.endswith('.dtb'):
+            return Set([fname])
+        return Set()
+
+    def ExpandEntries(self):
+        pass
+
+    def AddMissingProperties(self):
+        """Add new properties to the device tree as needed for this entry"""
+        for prop in ['offset', 'size', 'image-pos']:
+            if not prop in self._node.props:
+                state.AddZeroProp(self._node, prop)
+        err = state.CheckAddHashProp(self._node)
+        if err:
+            self.Raise(err)
+
+    def SetCalculatedProperties(self):
+        """Set the value of device-tree properties calculated by binman"""
+        state.SetInt(self._node, 'offset', self.offset)
+        state.SetInt(self._node, 'size', self.size)
+        state.SetInt(self._node, 'image-pos',
+                       self.image_pos - self.section.GetRootSkipAtStart())
+        state.CheckSetHashValue(self._node, self.GetData)
+
+    def ProcessFdt(self, fdt):
+        """Allow entries to adjust the device tree
+
+        Some entries need to adjust the device tree for their purposes. This
+        may involve adding or deleting properties.
+
+        Returns:
+            True if processing is complete
+            False if processing could not be completed due to a dependency.
+                This will cause the entry to be retried after others have been
+                called
+        """
+        return True
+
+    def SetPrefix(self, prefix):
+        """Set the name prefix for a node
+
+        Args:
+            prefix: Prefix to set, or '' to not use a prefix
+        """
+        if prefix:
+            self.name = prefix + self.name
+
+    def SetContents(self, data):
+        """Set the contents of an entry
+
+        This sets both the data and content_size properties
+
+        Args:
+            data: Data to set to the contents (string)
+        """
+        self.data = data
+        self.contents_size = len(self.data)
+
+    def ProcessContentsUpdate(self, data):
+        """Update the contens of an entry, after the size is fixed
+
+        This checks that the new data is the same size as the old.
+
+        Args:
+            data: Data to set to the contents (string)
+
+        Raises:
+            ValueError if the new data size is not the same as the old
+        """
+        if len(data) != self.contents_size:
+            self.Raise('Cannot update entry size from %d to %d' %
+                       (len(data), self.contents_size))
+        self.SetContents(data)
+
+    def ObtainContents(self):
+        """Figure out the contents of an entry.
+
+        Returns:
+            True if the contents were found, False if another call is needed
+            after the other entries are processed.
+        """
+        # No contents by default: subclasses can implement this
+        return True
+
+    def Pack(self, offset):
+        """Figure out how to pack the entry into the section
+
+        Most of the time the entries are not fully specified. There may be
+        an alignment but no size. In that case we take the size from the
+        contents of the entry.
+
+        If an entry has no hard-coded offset, it will be placed at @offset.
+
+        Once this function is complete, both the offset and size of the
+        entry will be know.
+
+        Args:
+            Current section offset pointer
+
+        Returns:
+            New section offset pointer (after this entry)
+        """
+        if self.offset is None:
+            if self.offset_unset:
+                self.Raise('No offset set with offset-unset: should another '
+                           'entry provide this correct offset?')
+            self.offset = tools.Align(offset, self.align)
+        needed = self.pad_before + self.contents_size + self.pad_after
+        needed = tools.Align(needed, self.align_size)
+        size = self.size
+        if not size:
+            size = needed
+        new_offset = self.offset + size
+        aligned_offset = tools.Align(new_offset, self.align_end)
+        if aligned_offset != new_offset:
+            size = aligned_offset - self.offset
+            new_offset = aligned_offset
+
+        if not self.size:
+            self.size = size
+
+        if self.size < needed:
+            self.Raise("Entry contents size is %#x (%d) but entry size is "
+                       "%#x (%d)" % (needed, needed, self.size, self.size))
+        # Check that the alignment is correct. It could be wrong if the
+        # and offset or size values were provided (i.e. not calculated), but
+        # conflict with the provided alignment values
+        if self.size != tools.Align(self.size, self.align_size):
+            self.Raise("Size %#x (%d) does not match align-size %#x (%d)" %
+                  (self.size, self.size, self.align_size, self.align_size))
+        if self.offset != tools.Align(self.offset, self.align):
+            self.Raise("Offset %#x (%d) does not match align %#x (%d)" %
+                  (self.offset, self.offset, self.align, self.align))
+
+        return new_offset
+
+    def Raise(self, msg):
+        """Convenience function to raise an error referencing a node"""
+        raise ValueError("Node '%s': %s" % (self._node.path, msg))
+
+    def GetEntryArgsOrProps(self, props, required=False):
+        """Return the values of a set of properties
+
+        Args:
+            props: List of EntryArg objects
+
+        Raises:
+            ValueError if a property is not found
+        """
+        values = []
+        missing = []
+        for prop in props:
+            python_prop = prop.name.replace('-', '_')
+            if hasattr(self, python_prop):
+                value = getattr(self, python_prop)
+            else:
+                value = None
+            if value is None:
+                value = self.GetArg(prop.name, prop.datatype)
+            if value is None and required:
+                missing.append(prop.name)
+            values.append(value)
+        if missing:
+            self.Raise('Missing required properties/entry args: %s' %
+                       (', '.join(missing)))
+        return values
+
+    def GetPath(self):
+        """Get the path of a node
+
+        Returns:
+            Full path of the node for this entry
+        """
+        return self._node.path
+
+    def GetData(self):
+        return self.data
+
+    def GetOffsets(self):
+        return {}
+
+    def SetOffsetSize(self, pos, size):
+        self.offset = pos
+        self.size = size
+
+    def SetImagePos(self, image_pos):
+        """Set the position in the image
+
+        Args:
+            image_pos: Position of this entry in the image
+        """
+        self.image_pos = image_pos + self.offset
+
+    def ProcessContents(self):
+        pass
+
+    def WriteSymbols(self, section):
+        """Write symbol values into binary files for access at run time
+
+        Args:
+          section: Section containing the entry
+        """
+        pass
+
+    def CheckOffset(self):
+        """Check that the entry offsets are correct
+
+        This is used for entries which have extra offset requirements (other
+        than having to be fully inside their section). Sub-classes can implement
+        this function and raise if there is a problem.
+        """
+        pass
+
+    @staticmethod
+    def GetStr(value):
+        if value is None:
+            return '<none>  '
+        return '%08x' % value
+
+    @staticmethod
+    def WriteMapLine(fd, indent, name, offset, size, image_pos):
+        print('%s  %s%s  %s  %s' % (Entry.GetStr(image_pos), ' ' * indent,
+                                    Entry.GetStr(offset), Entry.GetStr(size),
+                                    name), file=fd)
+
+    def WriteMap(self, fd, indent):
+        """Write a map of the entry to a .map file
+
+        Args:
+            fd: File to write the map to
+            indent: Curent indent level of map (0=none, 1=one level, etc.)
+        """
+        self.WriteMapLine(fd, indent, self.name, self.offset, self.size,
+                          self.image_pos)
+
+    def GetEntries(self):
+        """Return a list of entries contained by this entry
+
+        Returns:
+            List of entries, or None if none. A normal entry has no entries
+                within it so will return None
+        """
+        return None
+
+    def GetArg(self, name, datatype=str):
+        """Get the value of an entry argument or device-tree-node property
+
+        Some node properties can be provided as arguments to binman. First check
+        the entry arguments, and fall back to the device tree if not found
+
+        Args:
+            name: Argument name
+            datatype: Data type (str or int)
+
+        Returns:
+            Value of argument as a string or int, or None if no value
+
+        Raises:
+            ValueError if the argument cannot be converted to in
+        """
+        value = state.GetEntryArg(name)
+        if value is not None:
+            if datatype == int:
+                try:
+                    value = int(value)
+                except ValueError:
+                    self.Raise("Cannot convert entry arg '%s' (value '%s') to integer" %
+                               (name, value))
+            elif datatype == str:
+                pass
+            else:
+                raise ValueError("GetArg() internal error: Unknown data type '%s'" %
+                                 datatype)
+        else:
+            value = fdt_util.GetDatatype(self._node, name, datatype)
+        return value
+
+    @staticmethod
+    def WriteDocs(modules, test_missing=None):
+        """Write out documentation about the various entry types to stdout
+
+        Args:
+            modules: List of modules to include
+            test_missing: Used for testing. This is a module to report
+                as missing
+        """
+        print('''Binman Entry Documentation
+===========================
+
+This file describes the entry types supported by binman. These entry types can
+be placed in an image one by one to build up a final firmware image. It is
+fairly easy to create new entry types. Just add a new file to the 'etype'
+directory. You can use the existing entries as examples.
+
+Note that some entries are subclasses of others, using and extending their
+features to produce new behaviours.
+
+
+''')
+        modules = sorted(modules)
+
+        # Don't show the test entry
+        if '_testing' in modules:
+            modules.remove('_testing')
+        missing = []
+        for name in modules:
+            module = Entry.Lookup(name, name, name)
+            docs = getattr(module, '__doc__')
+            if test_missing == name:
+                docs = None
+            if docs:
+                lines = docs.splitlines()
+                first_line = lines[0]
+                rest = [line[4:] for line in lines[1:]]
+                hdr = 'Entry: %s: %s' % (name.replace('_', '-'), first_line)
+                print(hdr)
+                print('-' * len(hdr))
+                print('\n'.join(rest))
+                print()
+                print()
+            else:
+                missing.append(name)
+
+        if missing:
+            raise ValueError('Documentation is missing for modules: %s' %
+                             ', '.join(missing))
+
+    def GetUniqueName(self):
+        """Get a unique name for a node
+
+        Returns:
+            String containing a unique name for a node, consisting of the name
+            of all ancestors (starting from within the 'binman' node) separated
+            by a dot ('.'). This can be useful for generating unique filesnames
+            in the output directory.
+        """
+        name = self.name
+        node = self._node
+        while node.parent:
+            node = node.parent
+            if node.name == 'binman':
+                break
+            name = '%s.%s' % (node.name, name)
+        return name
+
+    def ExpandToLimit(self, limit):
+        """Expand an entry so that it ends at the given offset limit"""
+        if self.offset + self.size < limit:
+            self.size = limit - self.offset
+            # Request the contents again, since changing the size requires that
+            # the data grows. This should not fail, but check it to be sure.
+            if not self.ObtainContents():
+                self.Raise('Cannot obtain contents when expanding entry')
diff --git a/tools/u-boot-tools/binman/entry_test.py b/tools/u-boot-tools/binman/entry_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f7ff5b4e419e9676771da91456047b18f95ea72
--- /dev/null
+++ b/tools/u-boot-tools/binman/entry_test.py
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the Entry class
+
+import collections
+import os
+import sys
+import unittest
+
+import fdt
+import fdt_util
+import tools
+
+entry = None
+
+class TestEntry(unittest.TestCase):
+    def setUp(self):
+        tools.PrepareOutputDir(None)
+
+    def tearDown(self):
+        tools.FinaliseOutputDir()
+
+    def GetNode(self):
+        binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+        fname = fdt_util.EnsureCompiled(
+            os.path.join(binman_dir,('test/005_simple.dts')))
+        dtb = fdt.FdtScan(fname)
+        return dtb.GetNode('/binman/u-boot')
+
+    def test1EntryNoImportLib(self):
+        """Test that we can import Entry subclassess successfully"""
+
+        sys.modules['importlib'] = None
+        global entry
+        import entry
+        entry.Entry.Create(None, self.GetNode(), 'u-boot')
+
+    def test2EntryImportLib(self):
+        del sys.modules['importlib']
+        global entry
+        if entry:
+            reload(entry)
+        else:
+            import entry
+        entry.Entry.Create(None, self.GetNode(), 'u-boot-spl')
+        del entry
+
+    def testEntryContents(self):
+        """Test the Entry bass class"""
+        import entry
+        base_entry = entry.Entry(None, None, None, read_node=False)
+        self.assertEqual(True, base_entry.ObtainContents())
+
+    def testUnknownEntry(self):
+        """Test that unknown entry types are detected"""
+        import entry
+        Node = collections.namedtuple('Node', ['name', 'path'])
+        node = Node('invalid-name', 'invalid-path')
+        with self.assertRaises(ValueError) as e:
+            entry.Entry.Create(None, node, node.name)
+        self.assertIn("Unknown entry type 'invalid-name' in node "
+                      "'invalid-path'", str(e.exception))
+
+    def testUniqueName(self):
+        """Test Entry.GetUniqueName"""
+        import entry
+        Node = collections.namedtuple('Node', ['name', 'parent'])
+        base_node = Node('root', None)
+        base_entry = entry.Entry(None, None, base_node, read_node=False)
+        self.assertEqual('root', base_entry.GetUniqueName())
+        sub_node = Node('subnode', base_node)
+        sub_entry = entry.Entry(None, None, sub_node, read_node=False)
+        self.assertEqual('root.subnode', sub_entry.GetUniqueName())
+
+    def testGetDefaultFilename(self):
+        """Trivial test for this base class function"""
+        import entry
+        base_entry = entry.Entry(None, None, None, read_node=False)
+        self.assertIsNone(base_entry.GetDefaultFilename())
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tools/u-boot-tools/binman/etype/_testing.py b/tools/u-boot-tools/binman/etype/_testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e345bd95265c87b203147f1b51020bbea052283
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/_testing.py
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for testing purposes. Not used in real images.
+#
+
+from collections import OrderedDict
+
+from entry import Entry, EntryArg
+import fdt_util
+import tools
+
+
+class Entry__testing(Entry):
+    """A fake entry used for testing
+
+    This entry should not be used in normal images. It is a special entry with
+    strange features used for testing.
+
+    Properties / Entry arguments
+        test-str-fdt: Test string, normally in the node
+        test-int-fdt: Test integer, normally in the node
+        test-str-arg: Test string, normally in the entry arguments
+        test-int-arg: Test integer, normally in the entry arguments
+
+    The entry has a single 'a' byte as its contents. Operation is controlled by
+    a number of properties in the node, as follows:
+
+    Properties:
+        return-invalid-entry: Return an invalid entry from GetOffsets()
+        return-unknown-contents: Refuse to provide any contents (to cause a
+            failure)
+        bad-update-contents: Implement ProcessContents() incorrectly so as to
+            cause a failure
+        never-complete-process-fdt: Refund to process the FDT (to cause a
+            failure)
+        require-args: Require that all used args are present (generating an
+            error if not)
+        force-bad-datatype: Force a call to GetEntryArgsOrProps() with a bad
+            data type (generating an error)
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self.return_invalid_entry = fdt_util.GetBool(self._node,
+                                                     'return-invalid-entry')
+        self.return_unknown_contents = fdt_util.GetBool(self._node,
+                                                     'return-unknown-contents')
+        self.bad_update_contents = fdt_util.GetBool(self._node,
+                                                    'bad-update-contents')
+        self.return_contents_once = fdt_util.GetBool(self._node,
+                                                     'return-contents-once')
+
+        # Set to True when the entry is ready to process the FDT.
+        self.process_fdt_ready = False
+        self.never_complete_process_fdt = fdt_util.GetBool(self._node,
+                                                'never-complete-process-fdt')
+        self.require_args = fdt_util.GetBool(self._node, 'require-args')
+
+        # This should be picked up by GetEntryArgsOrProps()
+        self.test_existing_prop = 'existing'
+        self.force_bad_datatype = fdt_util.GetBool(self._node,
+                                                   'force-bad-datatype')
+        (self.test_str_fdt, self.test_str_arg, self.test_int_fdt,
+         self.test_int_arg, existing) = self.GetEntryArgsOrProps([
+            EntryArg('test-str-fdt', str),
+            EntryArg('test-str-arg', str),
+            EntryArg('test-int-fdt', int),
+            EntryArg('test-int-arg', int),
+            EntryArg('test-existing-prop', str)], self.require_args)
+        if self.force_bad_datatype:
+            self.GetEntryArgsOrProps([EntryArg('test-bad-datatype-arg', bool)])
+        self.return_contents = True
+
+    def ObtainContents(self):
+        if self.return_unknown_contents or not self.return_contents:
+            return False
+        self.data = 'a'
+        self.contents_size = len(self.data)
+        if self.return_contents_once:
+            self.return_contents = False
+        return True
+
+    def GetOffsets(self):
+        if self.return_invalid_entry :
+            return {'invalid-entry': [1, 2]}
+        return {}
+
+    def ProcessContents(self):
+        if self.bad_update_contents:
+            # Request to update the conents with something larger, to cause a
+            # failure.
+            self.ProcessContentsUpdate('aa')
+
+    def ProcessFdt(self, fdt):
+        """Force reprocessing the first time"""
+        ready = self.process_fdt_ready
+        if not self.never_complete_process_fdt:
+            self.process_fdt_ready = True
+        return ready
diff --git a/tools/u-boot-tools/binman/etype/blob.py b/tools/u-boot-tools/binman/etype/blob.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae80bbee530c72b51d438eebb9901714d4b5480e
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/blob.py
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for blobs, which are binary objects read from files
+#
+
+from entry import Entry
+import fdt_util
+import state
+import tools
+
+class Entry_blob(Entry):
+    """Entry containing an arbitrary binary blob
+
+    Note: This should not be used by itself. It is normally used as a parent
+    class by other entry types.
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+        - compress: Compression algorithm to use:
+            none: No compression
+            lz4: Use lz4 compression (via 'lz4' command-line utility)
+
+    This entry reads data from a file and places it in the entry. The
+    default filename is often specified specified by the subclass. See for
+    example the 'u_boot' entry which provides the filename 'u-boot.bin'.
+
+    If compression is enabled, an extra 'uncomp-size' property is written to
+    the node (if enabled with -u) which provides the uncompressed size of the
+    data.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
+        self._compress = fdt_util.GetString(self._node, 'compress', 'none')
+        self._uncompressed_size = None
+
+    def ObtainContents(self):
+        self._filename = self.GetDefaultFilename()
+        self._pathname = tools.GetInputFilename(self._filename)
+        self.ReadBlobContents()
+        return True
+
+    def ReadBlobContents(self):
+        # We assume the data is small enough to fit into memory. If this
+        # is used for large filesystem image that might not be true.
+        # In that case, Image.BuildImage() could be adjusted to use a
+        # new Entry method which can read in chunks. Then we could copy
+        # the data in chunks and avoid reading it all at once. For now
+        # this seems like an unnecessary complication.
+        data = tools.ReadFile(self._pathname)
+        if self._compress == 'lz4':
+            self._uncompressed_size = len(data)
+            '''
+            import lz4  # Import this only if needed (python-lz4 dependency)
+
+            try:
+                data = lz4.frame.compress(data)
+            except AttributeError:
+                data = lz4.compress(data)
+            '''
+            data = tools.Run('lz4', '-c', self._pathname)
+        self.SetContents(data)
+        return True
+
+    def GetDefaultFilename(self):
+        return self._filename
+
+    def AddMissingProperties(self):
+        Entry.AddMissingProperties(self)
+        if self._compress != 'none':
+            state.AddZeroProp(self._node, 'uncomp-size')
+
+    def SetCalculatedProperties(self):
+        Entry.SetCalculatedProperties(self)
+        if self._uncompressed_size is not None:
+            state.SetInt(self._node, 'uncomp-size', self._uncompressed_size)
diff --git a/tools/u-boot-tools/binman/etype/blob_dtb.py b/tools/u-boot-tools/binman/etype/blob_dtb.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc5b4a3f7609de18462d2dfc3c8f3737a9c9d5fb
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/blob_dtb.py
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree files
+#
+
+import state
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_blob_dtb(Entry_blob):
+    """A blob that holds a device tree
+
+    This is a blob containing a device tree. The contents of the blob are
+    obtained from the list of available device-tree files, managed by the
+    'state' module.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def ObtainContents(self):
+        """Get the device-tree from the list held by the 'state' module"""
+        self._filename = self.GetDefaultFilename()
+        self._pathname, data = state.GetFdtContents(self._filename)
+        self.SetContents(data)
+        return True
+
+    def ProcessContents(self):
+        """Re-read the DTB contents so that we get any calculated properties"""
+        _, data = state.GetFdtContents(self._filename)
+        self.SetContents(data)
diff --git a/tools/u-boot-tools/binman/etype/blob_named_by_arg.py b/tools/u-boot-tools/binman/etype/blob_named_by_arg.py
new file mode 100644
index 0000000000000000000000000000000000000000..344112bc42088938a97f501dcd1706ed5dcf7d81
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/blob_named_by_arg.py
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a blob where the filename comes from a property in the
+# node or an entry argument. The property is called '<blob_fname>-path' where
+# <blob_fname> is provided by the subclass using this entry type.
+
+from collections import OrderedDict
+
+from blob import Entry_blob
+from entry import EntryArg
+
+
+class Entry_blob_named_by_arg(Entry_blob):
+    """A blob entry which gets its filename property from its subclass
+
+    Properties / Entry arguments:
+        - <xxx>-path: Filename containing the contents of this entry (optional,
+            defaults to 0)
+
+    where <xxx> is the blob_fname argument to the constructor.
+
+    This entry cannot be used directly. Instead, it is used as a parent class
+    for another entry, which defined blob_fname. This parameter is used to
+    set the entry-arg or property containing the filename. The entry-arg or
+    property is in turn used to set the actual filename.
+
+    See cros_ec_rw for an example of this.
+    """
+    def __init__(self, section, etype, node, blob_fname):
+        Entry_blob.__init__(self, section, etype, node)
+        self._filename, = self.GetEntryArgsOrProps(
+            [EntryArg('%s-path' % blob_fname, str)])
diff --git a/tools/u-boot-tools/binman/etype/cros_ec_rw.py b/tools/u-boot-tools/binman/etype/cros_ec_rw.py
new file mode 100644
index 0000000000000000000000000000000000000000..261f8657a6276fca119f7b3a9437e05cf9b9d934
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/cros_ec_rw.py
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Chromium OS EC image (read-write section)
+#
+
+from blob_named_by_arg import Entry_blob_named_by_arg
+
+
+class Entry_cros_ec_rw(Entry_blob_named_by_arg):
+    """A blob entry which contains a Chromium OS read-write EC image
+
+    Properties / Entry arguments:
+        - cros-ec-rw-path: Filename containing the EC image
+
+    This entry holds a Chromium OS EC (embedded controller) image, for use in
+    updating the EC on startup via software sync.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob_named_by_arg.__init__(self, section, etype, node,
+                                         'cros-ec-rw')
diff --git a/tools/u-boot-tools/binman/etype/files.py b/tools/u-boot-tools/binman/etype/files.py
new file mode 100644
index 0000000000000000000000000000000000000000..99f2f2f67febc96d185744611a72fa62a253da4c
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/files.py
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a set of files which are placed in individual
+# sub-entries
+#
+
+import glob
+import os
+
+from section import Entry_section
+import fdt_util
+import state
+import tools
+
+import bsection
+
+class Entry_files(Entry_section):
+    """Entry containing a set of files
+
+    Properties / Entry arguments:
+        - pattern: Filename pattern to match the files to include
+        - compress: Compression algorithm to use:
+            none: No compression
+            lz4: Use lz4 compression (via 'lz4' command-line utility)
+
+    This entry reads a number of files and places each in a separate sub-entry
+    within this entry. To access these you need to enable device-tree updates
+    at run-time so you can obtain the file positions.
+    """
+    def __init__(self, section, etype, node):
+        Entry_section.__init__(self, section, etype, node)
+        self._pattern = fdt_util.GetString(self._node, 'pattern')
+        if not self._pattern:
+            self.Raise("Missing 'pattern' property")
+        self._compress = fdt_util.GetString(self._node, 'compress', 'none')
+        self._require_matches = fdt_util.GetBool(self._node,
+                                                'require-matches')
+
+    def ExpandEntries(self):
+        files = tools.GetInputFilenameGlob(self._pattern)
+        if self._require_matches and not files:
+            self.Raise("Pattern '%s' matched no files" % self._pattern)
+        for fname in files:
+            if not os.path.isfile(fname):
+                continue
+            name = os.path.basename(fname)
+            subnode = self._node.FindNode(name)
+            if not subnode:
+                subnode = state.AddSubnode(self._node, name)
+            state.AddString(subnode, 'type', 'blob')
+            state.AddString(subnode, 'filename', fname)
+            state.AddString(subnode, 'compress', self._compress)
+
+        # Read entries again, now that we have some
+        self._section._ReadEntries()
diff --git a/tools/u-boot-tools/binman/etype/fill.py b/tools/u-boot-tools/binman/etype/fill.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcfe978a5bf7a1a521ac067438c036fb49c51dac
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/fill.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+from entry import Entry
+import fdt_util
+
+
+class Entry_fill(Entry):
+    """An entry which is filled to a particular byte value
+
+    Properties / Entry arguments:
+        - fill-byte: Byte to use to fill the entry
+
+    Note that the size property must be set since otherwise this entry does not
+    know how large it should be.
+
+    You can often achieve the same effect using the pad-byte property of the
+    overall image, in that the space between entries will then be padded with
+    that byte. But this entry is sometimes useful for explicitly setting the
+    byte value of a region.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        if self.size is None:
+            self.Raise("'fill' entry must have a size property")
+        self.fill_value = fdt_util.GetByte(self._node, 'fill-byte', 0)
+
+    def ObtainContents(self):
+        self.SetContents(chr(self.fill_value) * self.size)
+        return True
diff --git a/tools/u-boot-tools/binman/etype/fmap.py b/tools/u-boot-tools/binman/etype/fmap.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf35a5bbf4e2a60d797fe9a34678d5a3a1705dfc
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/fmap.py
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Flash map, as used by the flashrom SPI flash tool
+#
+
+from entry import Entry
+import fmap_util
+
+
+class Entry_fmap(Entry):
+    """An entry which contains an Fmap section
+
+    Properties / Entry arguments:
+        None
+
+    FMAP is a simple format used by flashrom, an open-source utility for
+    reading and writing the SPI flash, typically on x86 CPUs. The format
+    provides flashrom with a list of areas, so it knows what it in the flash.
+    It can then read or write just a single area, instead of the whole flash.
+
+    The format is defined by the flashrom project, in the file lib/fmap.h -
+    see www.flashrom.org/Flashrom for more information.
+
+    When used, this entry will be populated with an FMAP which reflects the
+    entries in the current image. Note that any hierarchy is squashed, since
+    FMAP does not support this.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+
+    def _GetFmap(self):
+        """Build an FMAP from the entries in the current image
+
+        Returns:
+            FMAP binary data
+        """
+        def _AddEntries(areas, entry):
+            entries = entry.GetEntries()
+            if entries:
+                for subentry in entries.values():
+                    _AddEntries(areas, subentry)
+            else:
+                pos = entry.image_pos
+                if pos is not None:
+                    pos -= entry.section.GetRootSkipAtStart()
+                areas.append(fmap_util.FmapArea(pos or 0, entry.size or 0,
+                                                entry.name, 0))
+
+        entries = self.section._image.GetEntries()
+        areas = []
+        for entry in entries.values():
+            _AddEntries(areas, entry)
+        return fmap_util.EncodeFmap(self.section.GetImageSize() or 0, self.name,
+                                    areas)
+
+    def ObtainContents(self):
+        """Obtain a placeholder for the fmap contents"""
+        self.SetContents(self._GetFmap())
+        return True
+
+    def ProcessContents(self):
+        self.SetContents(self._GetFmap())
diff --git a/tools/u-boot-tools/binman/etype/gbb.py b/tools/u-boot-tools/binman/etype/gbb.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fe10f47135bebe6a6761e2cb15f1bcc0b10d4ac
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/gbb.py
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for a Chromium OS Google Binary Block, used to record read-only
+# information mostly used by firmware.
+
+from collections import OrderedDict
+
+import command
+from entry import Entry, EntryArg
+
+import fdt_util
+import tools
+
+# Build GBB flags.
+# (src/platform/vboot_reference/firmware/include/gbb_header.h)
+gbb_flag_properties = {
+  'dev-screen-short-delay': 0x1,
+  'load-option-roms': 0x2,
+  'enable-alternate-os': 0x4,
+  'force-dev-switch-on': 0x8,
+  'force-dev-boot-usb': 0x10,
+  'disable-fw-rollback-check': 0x20,
+  'enter-triggers-tonorm': 0x40,
+  'force-dev-boot-legacy': 0x80,
+  'faft-key-override': 0x100,
+  'disable-ec-software-sync': 0x200,
+  'default-dev-boot-legacy': 0x400,
+  'disable-pd-software-sync': 0x800,
+  'disable-lid-shutdown': 0x1000,
+  'force-dev-boot-fastboot-full-cap': 0x2000,
+  'enable-serial': 0x4000,
+  'disable-dwmp': 0x8000,
+}
+
+
+class Entry_gbb(Entry):
+    """An entry which contains a Chromium OS Google Binary Block
+
+    Properties / Entry arguments:
+        - hardware-id: Hardware ID to use for this build (a string)
+        - keydir: Directory containing the public keys to use
+        - bmpblk: Filename containing images used by recovery
+
+    Chromium OS uses a GBB to store various pieces of information, in particular
+    the root and recovery keys that are used to verify the boot process. Some
+    more details are here:
+
+        https://www.chromium.org/chromium-os/firmware-porting-guide/2-concepts
+
+    but note that the page dates from 2013 so is quite out of date. See
+    README.chromium for how to obtain the required keys and tools.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self.hardware_id, self.keydir, self.bmpblk = self.GetEntryArgsOrProps(
+            [EntryArg('hardware-id', str),
+             EntryArg('keydir', str),
+             EntryArg('bmpblk', str)])
+
+        # Read in the GBB flags from the config
+        self.gbb_flags = 0
+        flags_node = node.FindNode('flags')
+        if flags_node:
+            for flag, value in gbb_flag_properties.iteritems():
+                if fdt_util.GetBool(flags_node, flag):
+                    self.gbb_flags |= value
+
+    def ObtainContents(self):
+        gbb = 'gbb.bin'
+        fname = tools.GetOutputFilename(gbb)
+        if not self.size:
+            self.Raise('GBB must have a fixed size')
+        gbb_size = self.size
+        bmpfv_size = gbb_size - 0x2180
+        if bmpfv_size < 0:
+            self.Raise('GBB is too small (minimum 0x2180 bytes)')
+        sizes = [0x100, 0x1000, bmpfv_size, 0x1000]
+        sizes = ['%#x' % size for size in sizes]
+        keydir = tools.GetInputFilename(self.keydir)
+        gbb_set_command = [
+            'gbb_utility', '-s',
+            '--hwid=%s' % self.hardware_id,
+            '--rootkey=%s/root_key.vbpubk' % keydir,
+            '--recoverykey=%s/recovery_key.vbpubk' % keydir,
+            '--flags=%d' % self.gbb_flags,
+            '--bmpfv=%s' % tools.GetInputFilename(self.bmpblk),
+            fname]
+
+        tools.Run('futility', 'gbb_utility', '-c', ','.join(sizes), fname)
+        tools.Run('futility', *gbb_set_command)
+
+        self.SetContents(tools.ReadFile(fname))
+        return True
diff --git a/tools/u-boot-tools/binman/etype/intel_cmc.py b/tools/u-boot-tools/binman/etype/intel_cmc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa6f7793c64ab22805ae365fe0b0ed4ec18b5da4
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_cmc.py
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Chip Microcode binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_cmc(Entry_blob):
+    """Entry containing an Intel Chipset Micro Code (CMC) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains microcode for some devices in a special format. An
+    example filename is 'Microcode/C0_22211.BIN'.
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
diff --git a/tools/u-boot-tools/binman/etype/intel_descriptor.py b/tools/u-boot-tools/binman/etype/intel_descriptor.py
new file mode 100644
index 0000000000000000000000000000000000000000..6acbbd8b7a5ad17d5c97b1652f355e7061e1620e
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_descriptor.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot'
+#
+
+import struct
+
+from entry import Entry
+from blob import Entry_blob
+
+FD_SIGNATURE   = struct.pack('<L', 0x0ff0a55a)
+MAX_REGIONS    = 5
+
+# Region numbers supported by the Intel firmware format
+(REGION_DESCRIPTOR, REGION_BIOS, REGION_ME, REGION_GBE,
+        REGION_PDATA) = range(5)
+
+class Region:
+    def __init__(self, data, frba, region_num):
+        pos = frba + region_num * 4
+        val = struct.unpack('<L', data[pos:pos + 4])[0]
+        self.base = (val & 0xfff) << 12
+        self.limit = ((val & 0x0fff0000) >> 4) | 0xfff
+        self.size = self.limit - self.base + 1
+
+class Entry_intel_descriptor(Entry_blob):
+    """Intel flash descriptor block (4KB)
+
+    Properties / Entry arguments:
+        filename: Filename of file containing the descriptor. This is typically
+            a 4KB binary file, sometimes called 'descriptor.bin'
+
+    This entry is placed at the start of flash and provides information about
+    the SPI flash regions. In particular it provides the base address and
+    size of the ME (Management Engine) region, allowing us to place the ME
+    binary in the right place.
+
+    With this entry in your image, the position of the 'intel-me' entry will be
+    fixed in the image, which avoids you needed to specify an offset for that
+    region. This is useful, because it is not possible to change the position
+    of the ME region without updating the descriptor.
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+        self._regions = []
+
+    def GetOffsets(self):
+        offset = self.data.find(FD_SIGNATURE)
+        if offset == -1:
+            self.Raise('Cannot find FD signature')
+        flvalsig, flmap0, flmap1, flmap2 = struct.unpack('<LLLL',
+                                                self.data[offset:offset + 16])
+        frba = ((flmap0 >> 16) & 0xff) << 4
+        for i in range(MAX_REGIONS):
+            self._regions.append(Region(self.data, frba, i))
+
+        # Set the offset for ME only, for now, since the others are not used
+        return {'intel-me': [self._regions[REGION_ME].base,
+                             self._regions[REGION_ME].size]}
diff --git a/tools/u-boot-tools/binman/etype/intel_fsp.py b/tools/u-boot-tools/binman/etype/intel_fsp.py
new file mode 100644
index 0000000000000000000000000000000000000000..00a78e7083221113ebf0ab7d48dffa63228d2993
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_fsp.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Firmware Support Package binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_fsp(Entry_blob):
+    """Entry containing an Intel Firmware Support Package (FSP) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains binary blobs which are used on some devices to make the
+    platform work. U-Boot executes this code since it is not possible to set up
+    the hardware using U-Boot open-source code. Documentation is typically not
+    available in sufficient detail to allow this.
+
+    An example filename is 'FSP/QUEENSBAY_FSP_GOLD_001_20-DECEMBER-2013.fd'
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
diff --git a/tools/u-boot-tools/binman/etype/intel_me.py b/tools/u-boot-tools/binman/etype/intel_me.py
new file mode 100644
index 0000000000000000000000000000000000000000..247c5b33866df7c82248be22bdfb9768e074bdc3
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_me.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Management Engine binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_me(Entry_blob):
+    """Entry containing an Intel Management Engine (ME) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains code used by the SoC that is required to make it work.
+    The Management Engine is like a background task that runs things that are
+    not clearly documented, but may include keyboard, deplay and network
+    access. For platform that use ME it is not possible to disable it. U-Boot
+    does not directly execute code in the ME binary.
+
+    A typical filename is 'me.bin'.
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
diff --git a/tools/u-boot-tools/binman/etype/intel_mrc.py b/tools/u-boot-tools/binman/etype/intel_mrc.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dbc99a04f2ac346b6e4c9e1c8b81eb2d0be5529
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_mrc.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Memory Reference Code binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_mrc(Entry_blob):
+    """Entry containing an Intel Memory Reference Code (MRC) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains code for setting up the SDRAM on some Intel systems. This
+    is executed by U-Boot when needed early during startup. A typical filename
+    is 'mrc.bin'.
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'mrc.bin'
diff --git a/tools/u-boot-tools/binman/etype/intel_refcode.py b/tools/u-boot-tools/binman/etype/intel_refcode.py
new file mode 100644
index 0000000000000000000000000000000000000000..045db589d175501abb52350a8936037caeacf356
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_refcode.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Memory Reference Code binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_refcode(Entry_blob):
+    """Entry containing an Intel Reference Code file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains code for setting up the platform on some Intel systems.
+    This is executed by U-Boot when needed early during startup. A typical
+    filename is 'refcode.bin'.
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'refcode.bin'
diff --git a/tools/u-boot-tools/binman/etype/intel_vbt.py b/tools/u-boot-tools/binman/etype/intel_vbt.py
new file mode 100644
index 0000000000000000000000000000000000000000..d93dd19634146e5d514260df0802599113d9054d
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_vbt.py
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2017, Bin Meng <bmeng.cn@gmail.com>
+#
+# Entry-type module for Intel Video BIOS Table binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_vbt(Entry_blob):
+    """Entry containing an Intel Video BIOS Table (VBT) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains code that sets up the integrated graphics subsystem on
+    some Intel SoCs. U-Boot executes this when the display is started up.
+
+    See README.x86 for information about Intel binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
diff --git a/tools/u-boot-tools/binman/etype/intel_vga.py b/tools/u-boot-tools/binman/etype/intel_vga.py
new file mode 100644
index 0000000000000000000000000000000000000000..40982c8665680990d7e7e7bf91352b6c6792c51f
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/intel_vga.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for x86 VGA ROM binary blob
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_intel_vga(Entry_blob):
+    """Entry containing an Intel Video Graphics Adaptor (VGA) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry
+
+    This file contains code that sets up the integrated graphics subsystem on
+    some Intel SoCs. U-Boot executes this when the display is started up.
+
+    This is similar to the VBT file but in a different format.
+
+    See README.x86 for information about Intel binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
diff --git a/tools/u-boot-tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py b/tools/u-boot-tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py
new file mode 100644
index 0000000000000000000000000000000000000000..59fedd2b54b969d8460ff614bc4ff09c61397417
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/powerpc_mpc85xx_bootpg_resetvec.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018 NXP
+#
+# Entry-type module for the PowerPC mpc85xx bootpg and resetvec code for U-Boot
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_powerpc_mpc85xx_bootpg_resetvec(Entry_blob):
+    """PowerPC mpc85xx bootpg + resetvec code for U-Boot
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot-br.bin (default 'u-boot-br.bin')
+
+    This enrty is valid for PowerPC mpc85xx cpus. This entry holds
+    'bootpg + resetvec' code for PowerPC mpc85xx CPUs which needs to be
+    placed at offset 'RESET_VECTOR_ADDRESS - 0xffc'.
+    """
+
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'u-boot-br.bin'
diff --git a/tools/u-boot-tools/binman/etype/section.py b/tools/u-boot-tools/binman/etype/section.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f1b413604947a641e560c02f01d3c96a151aec9
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/section.py
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier:      GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for sections, which are entries which can contain other
+# entries.
+#
+
+from entry import Entry
+import fdt_util
+import tools
+
+import bsection
+
+class Entry_section(Entry):
+    """Entry that contains other entries
+
+    Properties / Entry arguments: (see binman README for more information)
+        - size: Size of section in bytes
+        - align-size: Align size to a particular power of two
+        - pad-before: Add padding before the entry
+        - pad-after: Add padding after the entry
+        - pad-byte: Pad byte to use when padding
+        - sort-by-offset: Reorder the entries by offset
+        - end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
+        - name-prefix: Adds a prefix to the name of every entry in the section
+            when writing out the map
+
+    A section is an entry which can contain other entries, thus allowing
+    hierarchical images to be created. See 'Sections and hierarchical images'
+    in the binman README for more information.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self._section = bsection.Section(node.name, section, node,
+                                         section._image)
+
+    def GetFdtSet(self):
+        return self._section.GetFdtSet()
+
+    def ProcessFdt(self, fdt):
+        return self._section.ProcessFdt(fdt)
+
+    def ExpandEntries(self):
+        Entry.ExpandEntries(self)
+        self._section.ExpandEntries()
+
+    def AddMissingProperties(self):
+        Entry.AddMissingProperties(self)
+        self._section.AddMissingProperties()
+
+    def ObtainContents(self):
+        return self._section.GetEntryContents()
+
+    def GetData(self):
+        return self._section.GetData()
+
+    def GetOffsets(self):
+        """Handle entries that want to set the offset/size of other entries
+
+        This calls each entry's GetOffsets() method. If it returns a list
+        of entries to update, it updates them.
+        """
+        self._section.GetEntryOffsets()
+        return {}
+
+    def Pack(self, offset):
+        """Pack all entries into the section"""
+        self._section.PackEntries()
+        self._section.SetOffset(offset)
+        self.size = self._section.GetSize()
+        return super(Entry_section, self).Pack(offset)
+
+    def SetImagePos(self, image_pos):
+        Entry.SetImagePos(self, image_pos)
+        self._section.SetImagePos(image_pos + self.offset)
+
+    def WriteSymbols(self, section):
+        """Write symbol values into binary files for access at run time"""
+        self._section.WriteSymbols()
+
+    def SetCalculatedProperties(self):
+        Entry.SetCalculatedProperties(self)
+        self._section.SetCalculatedProperties()
+
+    def ProcessContents(self):
+        self._section.ProcessEntryContents()
+        super(Entry_section, self).ProcessContents()
+
+    def CheckOffset(self):
+        self._section.CheckEntries()
+
+    def WriteMap(self, fd, indent):
+        """Write a map of the section to a .map file
+
+        Args:
+            fd: File to write the map to
+        """
+        self._section.WriteMap(fd, indent)
+
+    def GetEntries(self):
+        return self._section.GetEntries()
+
+    def ExpandToLimit(self, limit):
+        super(Entry_section, self).ExpandToLimit(limit)
+        self._section.ExpandSize(self.size)
diff --git a/tools/u-boot-tools/binman/etype/text.py b/tools/u-boot-tools/binman/etype/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e99819487faab75b2a2d7ded198f1b48987369e
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/text.py
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+from collections import OrderedDict
+
+from entry import Entry, EntryArg
+import fdt_util
+
+
+class Entry_text(Entry):
+    """An entry which contains text
+
+    The text can be provided either in the node itself or by a command-line
+    argument. There is a level of indirection to allow multiple text strings
+    and sharing of text.
+
+    Properties / Entry arguments:
+        text-label: The value of this string indicates the property / entry-arg
+            that contains the string to place in the entry
+        <xxx> (actual name is the value of text-label): contains the string to
+            place in the entry.
+
+    Example node:
+
+        text {
+            size = <50>;
+            text-label = "message";
+        };
+
+    You can then use:
+
+        binman -amessage="this is my message"
+
+    and binman will insert that string into the entry.
+
+    It is also possible to put the string directly in the node:
+
+        text {
+            size = <8>;
+            text-label = "message";
+            message = "a message directly in the node"
+        };
+
+    The text is not itself nul-terminated. This can be achieved, if required,
+    by setting the size of the entry to something larger than the text.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self.text_label, = self.GetEntryArgsOrProps(
+            [EntryArg('text-label', str)])
+        self.value, = self.GetEntryArgsOrProps([EntryArg(self.text_label, str)])
+        if not self.value:
+            self.Raise("No value provided for text label '%s'" %
+                       self.text_label)
+
+    def ObtainContents(self):
+        self.SetContents(self.value)
+        return True
diff --git a/tools/u-boot-tools/binman/etype/u_boot.py b/tools/u-boot-tools/binman/etype/u_boot.py
new file mode 100644
index 0000000000000000000000000000000000000000..23dd12ce43594bf359532d86ce99f266178097a8
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot binary
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot(Entry_blob):
+    """U-Boot flat binary
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.bin (default 'u-boot.bin')
+
+    This is the U-Boot binary, containing relocation information to allow it
+    to relocate itself at runtime. The binary typically includes a device tree
+    blob at the end of it. Use u_boot_nodtb if you want to package the device
+    tree separately.
+
+    U-Boot can access binman symbols at runtime. See:
+
+        'Access to binman entry offsets at run time (fdt)'
+
+    in the binman README for more information.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'u-boot.bin'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_dtb.py b/tools/u-boot-tools/binman/etype/u_boot_dtb.py
new file mode 100644
index 0000000000000000000000000000000000000000..6263c4ebee31d4f069433f69fe83ff66478ec02d
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_dtb.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree
+#
+
+from entry import Entry
+from blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_dtb(Entry_blob_dtb):
+    """U-Boot device tree
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+    This is the U-Boot device tree, containing configuration information for
+    U-Boot. U-Boot needs this to know what devices are present and which drivers
+    to activate.
+
+    Note: This is mostly an internal entry type, used by others. This allows
+    binman to know which entries contain a device tree.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob_dtb.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'u-boot.dtb'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_dtb_with_ucode.py b/tools/u-boot-tools/binman/etype/u_boot_dtb_with_ucode.py
new file mode 100644
index 0000000000000000000000000000000000000000..444c51b8b726f831dde1c1f59d6b1319fb8a0ba7
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_dtb_with_ucode.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree with the microcode removed
+#
+
+from entry import Entry
+from blob_dtb import Entry_blob_dtb
+import state
+import tools
+
+class Entry_u_boot_dtb_with_ucode(Entry_blob_dtb):
+    """A U-Boot device tree file, with the microcode removed
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.dtb (default 'u-boot.dtb')
+
+    See Entry_u_boot_ucode for full details of the three entries involved in
+    this process. This entry provides the U-Boot device-tree file, which
+    contains the microcode. If the microcode is not being collated into one
+    place then the offset and size of the microcode is recorded by this entry,
+    for use by u_boot_with_ucode_ptr. If it is being collated, then this
+    entry deletes the microcode from the device tree (to save space) and makes
+    it available to u_boot_ucode.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob_dtb.__init__(self, section, etype, node)
+        self.ucode_data = ''
+        self.collate = False
+        self.ucode_offset = None
+        self.ucode_size = None
+        self.ucode = None
+        self.ready = False
+
+    def GetDefaultFilename(self):
+        return 'u-boot.dtb'
+
+    def ProcessFdt(self, fdt):
+        # So the module can be loaded without it
+        import fdt
+
+        # If the section does not need microcode, there is nothing to do
+        ucode_dest_entry = self.section.FindEntryType(
+            'u-boot-spl-with-ucode-ptr')
+        if not ucode_dest_entry or not ucode_dest_entry.target_offset:
+            ucode_dest_entry = self.section.FindEntryType(
+                'u-boot-tpl-with-ucode-ptr')
+        if not ucode_dest_entry or not ucode_dest_entry.target_offset:
+            ucode_dest_entry = self.section.FindEntryType(
+                'u-boot-with-ucode-ptr')
+        if not ucode_dest_entry or not ucode_dest_entry.target_offset:
+            return True
+
+        # Remove the microcode
+        fname = self.GetDefaultFilename()
+        fdt = state.GetFdt(fname)
+        self.ucode = fdt.GetNode('/microcode')
+        if not self.ucode:
+            raise self.Raise("No /microcode node found in '%s'" % fname)
+
+        # There's no need to collate it (move all microcode into one place)
+        # if we only have one chunk of microcode.
+        self.collate = len(self.ucode.subnodes) > 1
+        for node in self.ucode.subnodes:
+            data_prop = node.props.get('data')
+            if data_prop:
+                self.ucode_data += ''.join(data_prop.bytes)
+                if self.collate:
+                    node.DeleteProp('data')
+        return True
+
+    def ObtainContents(self):
+        # Call the base class just in case it does something important.
+        Entry_blob_dtb.ObtainContents(self)
+        if self.ucode and not self.collate:
+            for node in self.ucode.subnodes:
+                data_prop = node.props.get('data')
+                if data_prop:
+                    # Find the offset in the device tree of the ucode data
+                    self.ucode_offset = data_prop.GetOffset() + 12
+                    self.ucode_size = len(data_prop.bytes)
+                    self.ready = True
+        else:
+            self.ready = True
+        return self.ready
diff --git a/tools/u-boot-tools/binman/etype/u_boot_elf.py b/tools/u-boot-tools/binman/etype/u_boot_elf.py
new file mode 100644
index 0000000000000000000000000000000000000000..f83860dc0a84262c8faa5ad743daa535d8a0ffa5
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_elf.py
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot ELF image
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+import fdt_util
+import tools
+
+class Entry_u_boot_elf(Entry_blob):
+    """U-Boot ELF image
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot (default 'u-boot')
+
+    This is the U-Boot ELF image. It does not include a device tree but can be
+    relocated to any address for execution.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+        self._strip = fdt_util.GetBool(self._node, 'strip')
+
+    def ReadBlobContents(self):
+        if self._strip:
+            uniq = self.GetUniqueName()
+            out_fname = tools.GetOutputFilename('%s.stripped' % uniq)
+            tools.WriteFile(out_fname, tools.ReadFile(self._pathname))
+            tools.Run('strip', out_fname)
+            self._pathname = out_fname
+        Entry_blob.ReadBlobContents(self)
+        return True
+
+    def GetDefaultFilename(self):
+        return 'u-boot'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_img.py b/tools/u-boot-tools/binman/etype/u_boot_img.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ec0757c7f8d10b19fc6de33a82b46b4491b14c7
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_img.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot binary
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_img(Entry_blob):
+    """U-Boot legacy image
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.img (default 'u-boot.img')
+
+    This is the U-Boot binary as a packaged image, in legacy format. It has a
+    header which allows it to be loaded at the correct address for execution.
+
+    You should use FIT (Flat Image Tree) instead of the legacy image for new
+    applications.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'u-boot.img'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_nodtb.py b/tools/u-boot-tools/binman/etype/u_boot_nodtb.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4b95a4390a7a698e5779fe385e4bf409e4fb652
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_nodtb.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot-nodtb.bin'
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_nodtb(Entry_blob):
+    """U-Boot flat binary without device tree appended
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.bin (default 'u-boot-nodtb.bin')
+
+    This is the U-Boot binary, containing relocation information to allow it
+    to relocate itself at runtime. It does not include a device tree blob at
+    the end of it so normally cannot work without it. You can add a u_boot_dtb
+    entry after this one, or use a u_boot entry instead (which contains both
+    U-Boot and the device tree).
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'u-boot-nodtb.bin'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_spl.py b/tools/u-boot-tools/binman/etype/u_boot_spl.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab78714c8d6781125fa2f6c11099ef22dd350c90
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_spl.py
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for spl/u-boot-spl.bin
+#
+
+import elf
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_spl(Entry_blob):
+    """U-Boot SPL binary
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot-spl.bin (default 'spl/u-boot-spl.bin')
+
+    This is the U-Boot SPL (Secondary Program Loader) binary. This is a small
+    binary which loads before U-Boot proper, typically into on-chip SRAM. It is
+    responsible for locating, loading and jumping to U-Boot. Note that SPL is
+    not relocatable so must be loaded to the correct address in SRAM, or written
+    to run from the correct address if direct flash execution is possible (e.g.
+    on x86 devices).
+
+    SPL can access binman symbols at runtime. See:
+
+        'Access to binman entry offsets at run time (symbols)'
+
+    in the binman README for more information.
+
+    The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+    binman uses that to look up symbols to write into the SPL binary.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+        self.elf_fname = 'spl/u-boot-spl'
+
+    def GetDefaultFilename(self):
+        return 'spl/u-boot-spl.bin'
+
+    def WriteSymbols(self, section):
+        elf.LookupAndWriteSymbols(self.elf_fname, self, section)
diff --git a/tools/u-boot-tools/binman/etype/u_boot_spl_bss_pad.py b/tools/u-boot-tools/binman/etype/u_boot_spl_bss_pad.py
new file mode 100644
index 0000000000000000000000000000000000000000..00b7ac50040ef752a11f9705bbc493690e167127
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_spl_bss_pad.py
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for BSS padding for spl/u-boot-spl.bin. This padding
+# can be added after the SPL binary to ensure that anything concatenated
+# to it will appear to SPL to be at the end of BSS rather than the start.
+#
+
+import command
+import elf
+from entry import Entry
+from blob import Entry_blob
+import tools
+
+class Entry_u_boot_spl_bss_pad(Entry_blob):
+    """U-Boot SPL binary padded with a BSS region
+
+    Properties / Entry arguments:
+        None
+
+    This is similar to u_boot_spl except that padding is added after the SPL
+    binary to cover the BSS (Block Started by Symbol) region. This region holds
+    the various used by SPL. It is set to 0 by SPL when it starts up. If you
+    want to append data to the SPL image (such as a device tree file), you must
+    pad out the BSS region to avoid the data overlapping with U-Boot variables.
+    This entry is useful in that case. It automatically pads out the entry size
+    to cover both the code, data and BSS.
+
+    The ELF file 'spl/u-boot-spl' must also be available for this to work, since
+    binman uses that to look up the BSS address.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def ObtainContents(self):
+        fname = tools.GetInputFilename('spl/u-boot-spl')
+        bss_size = elf.GetSymbolAddress(fname, '__bss_size')
+        if not bss_size:
+            self.Raise('Expected __bss_size symbol in spl/u-boot-spl')
+        self.SetContents(chr(0) * bss_size)
+        return True
diff --git a/tools/u-boot-tools/binman/etype/u_boot_spl_dtb.py b/tools/u-boot-tools/binman/etype/u_boot_spl_dtb.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7354646f136dec5e83dbd1e53f738a6c99a65f5
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_spl_dtb.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree in SPL (Secondary Program Loader)
+#
+
+from entry import Entry
+from blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_spl_dtb(Entry_blob_dtb):
+    """U-Boot SPL device tree
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.dtb (default 'spl/u-boot-spl.dtb')
+
+    This is the SPL device tree, containing configuration information for
+    SPL. SPL needs this to know what devices are present and which drivers
+    to activate.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob_dtb.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'spl/u-boot-spl.dtb'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_spl_elf.py b/tools/u-boot-tools/binman/etype/u_boot_spl_elf.py
new file mode 100644
index 0000000000000000000000000000000000000000..da328ae15e1039ec533145493cd385cc4fa92bf8
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_spl_elf.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot SPL ELF image
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_spl_elf(Entry_blob):
+    """U-Boot SPL ELF image
+
+    Properties / Entry arguments:
+        - filename: Filename of SPL u-boot (default 'spl/u-boot')
+
+    This is the U-Boot SPL ELF image. It does not include a device tree but can
+    be relocated to any address for execution.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'spl/u-boot-spl'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_spl_nodtb.py b/tools/u-boot-tools/binman/etype/u_boot_spl_nodtb.py
new file mode 100644
index 0000000000000000000000000000000000000000..41c17366b1d6df488cd3a369000eeda9c278fca6
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_spl_nodtb.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for 'u-boot-nodtb.bin'
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_spl_nodtb(Entry_blob):
+    """SPL binary without device tree appended
+
+    Properties / Entry arguments:
+        - filename: Filename of spl/u-boot-spl-nodtb.bin (default
+            'spl/u-boot-spl-nodtb.bin')
+
+    This is the U-Boot SPL binary, It does not include a device tree blob at
+    the end of it so may not be able to work without it, assuming SPL needs
+    a device tree to operation on your platform. You can add a u_boot_spl_dtb
+    entry after this one, or use a u_boot_spl entry instead (which contains
+    both SPL and the device tree).
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'spl/u-boot-spl-nodtb.bin'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_spl_with_ucode_ptr.py b/tools/u-boot-tools/binman/etype/u_boot_spl_with_ucode_ptr.py
new file mode 100644
index 0000000000000000000000000000000000000000..b650cf0146c8aac16d1e6ddb1d4a37ed81dcd69f
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_spl_with_ucode_ptr.py
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for an SPL binary with an embedded microcode pointer
+#
+
+import struct
+
+import command
+from entry import Entry
+from blob import Entry_blob
+from u_boot_with_ucode_ptr import Entry_u_boot_with_ucode_ptr
+import tools
+
+class Entry_u_boot_spl_with_ucode_ptr(Entry_u_boot_with_ucode_ptr):
+    """U-Boot SPL with embedded microcode pointer
+
+    This is used when SPL must set up the microcode for U-Boot.
+
+    See Entry_u_boot_ucode for full details of the entries involved in this
+    process.
+    """
+    def __init__(self, section, etype, node):
+        Entry_u_boot_with_ucode_ptr.__init__(self, section, etype, node)
+        self.elf_fname = 'spl/u-boot-spl'
+
+    def GetDefaultFilename(self):
+        return 'spl/u-boot-spl-nodtb.bin'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_tpl.py b/tools/u-boot-tools/binman/etype/u_boot_tpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d4bb925961d9992f1d380623a036a6fc55a5ef7
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_tpl.py
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for tpl/u-boot-tpl.bin
+#
+
+import elf
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_tpl(Entry_blob):
+    """U-Boot TPL binary
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot-tpl.bin (default 'tpl/u-boot-tpl.bin')
+
+    This is the U-Boot TPL (Tertiary Program Loader) binary. This is a small
+    binary which loads before SPL, typically into on-chip SRAM. It is
+    responsible for locating, loading and jumping to SPL, the next-stage
+    loader. Note that SPL is not relocatable so must be loaded to the correct
+    address in SRAM, or written to run from the correct address if direct
+    flash execution is possible (e.g. on x86 devices).
+
+    SPL can access binman symbols at runtime. See:
+
+        'Access to binman entry offsets at run time (symbols)'
+
+    in the binman README for more information.
+
+    The ELF file 'tpl/u-boot-tpl' must also be available for this to work, since
+    binman uses that to look up symbols to write into the TPL binary.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+        self.elf_fname = 'tpl/u-boot-tpl'
+
+    def GetDefaultFilename(self):
+        return 'tpl/u-boot-tpl.bin'
+
+    def WriteSymbols(self, section):
+        elf.LookupAndWriteSymbols(self.elf_fname, self, section)
diff --git a/tools/u-boot-tools/binman/etype/u_boot_tpl_dtb.py b/tools/u-boot-tools/binman/etype/u_boot_tpl_dtb.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdeb0f75a24a549ff23d26077af7dd0bb1d9d25c
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_tpl_dtb.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree in TPL (Tertiary Program Loader)
+#
+
+from entry import Entry
+from blob_dtb import Entry_blob_dtb
+
+class Entry_u_boot_tpl_dtb(Entry_blob_dtb):
+    """U-Boot TPL device tree
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot.dtb (default 'tpl/u-boot-tpl.dtb')
+
+    This is the TPL device tree, containing configuration information for
+    TPL. TPL needs this to know what devices are present and which drivers
+    to activate.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob_dtb.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'tpl/u-boot-tpl.dtb'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_tpl_dtb_with_ucode.py b/tools/u-boot-tools/binman/etype/u_boot_tpl_dtb_with_ucode.py
new file mode 100644
index 0000000000000000000000000000000000000000..71e04fcd44f884684c90c7248db6d6fb66dc54b1
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_tpl_dtb_with_ucode.py
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot device tree with the microcode removed
+#
+
+import control
+from entry import Entry
+from u_boot_dtb_with_ucode import Entry_u_boot_dtb_with_ucode
+import tools
+
+class Entry_u_boot_tpl_dtb_with_ucode(Entry_u_boot_dtb_with_ucode):
+    """U-Boot TPL with embedded microcode pointer
+
+    This is used when TPL must set up the microcode for U-Boot.
+
+    See Entry_u_boot_ucode for full details of the entries involved in this
+    process.
+    """
+    def __init__(self, section, etype, node):
+        Entry_u_boot_dtb_with_ucode.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'tpl/u-boot-tpl.dtb'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_tpl_with_ucode_ptr.py b/tools/u-boot-tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d94dded694fb8eea147ddd1410aab7ef052d698
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for an TPL binary with an embedded microcode pointer
+#
+
+import struct
+
+import command
+from entry import Entry
+from blob import Entry_blob
+from u_boot_with_ucode_ptr import Entry_u_boot_with_ucode_ptr
+import tools
+
+class Entry_u_boot_tpl_with_ucode_ptr(Entry_u_boot_with_ucode_ptr):
+    """U-Boot TPL with embedded microcode pointer
+
+    See Entry_u_boot_ucode for full details of the entries involved in this
+    process.
+    """
+    def __init__(self, section, etype, node):
+        Entry_u_boot_with_ucode_ptr.__init__(self, section, etype, node)
+        self.elf_fname = 'tpl/u-boot-tpl'
+
+    def GetDefaultFilename(self):
+        return 'tpl/u-boot-tpl-nodtb.bin'
diff --git a/tools/u-boot-tools/binman/etype/u_boot_ucode.py b/tools/u-boot-tools/binman/etype/u_boot_ucode.py
new file mode 100644
index 0000000000000000000000000000000000000000..a00e530295b6323b083c3b8e78635db9ee037c4f
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_ucode.py
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a U-Boot binary with an embedded microcode pointer
+#
+
+from entry import Entry
+from blob import Entry_blob
+import tools
+
+class Entry_u_boot_ucode(Entry_blob):
+    """U-Boot microcode block
+
+    Properties / Entry arguments:
+        None
+
+    The contents of this entry are filled in automatically by other entries
+    which must also be in the image.
+
+    U-Boot on x86 needs a single block of microcode. This is collected from
+    the various microcode update nodes in the device tree. It is also unable
+    to read the microcode from the device tree on platforms that use FSP
+    (Firmware Support Package) binaries, because the API requires that the
+    microcode is supplied before there is any SRAM available to use (i.e.
+    the FSP sets up the SRAM / cache-as-RAM but does so in the call that
+    requires the microcode!). To keep things simple, all x86 platforms handle
+    microcode the same way in U-Boot (even non-FSP platforms). This is that
+    a table is placed at _dt_ucode_base_size containing the base address and
+    size of the microcode. This is either passed to the FSP (for FSP
+    platforms), or used to set up the microcode (for non-FSP platforms).
+    This all happens in the build system since it is the only way to get
+    the microcode into a single blob and accessible without SRAM.
+
+    There are two cases to handle. If there is only one microcode blob in
+    the device tree, then the ucode pointer it set to point to that. This
+    entry (u-boot-ucode) is empty. If there is more than one update, then
+    this entry holds the concatenation of all updates, and the device tree
+    entry (u-boot-dtb-with-ucode) is updated to remove the microcode. This
+    last step ensures that that the microcode appears in one contiguous
+    block in the image and is not unnecessarily duplicated in the device
+    tree. It is referred to as 'collation' here.
+
+    Entry types that have a part to play in handling microcode:
+
+        Entry_u_boot_with_ucode_ptr:
+            Contains u-boot-nodtb.bin (i.e. U-Boot without the device tree).
+            It updates it with the address and size of the microcode so that
+            U-Boot can find it early on start-up.
+        Entry_u_boot_dtb_with_ucode:
+            Contains u-boot.dtb. It stores the microcode in a
+            'self.ucode_data' property, which is then read by this class to
+            obtain the microcode if needed. If collation is performed, it
+            removes the microcode from the device tree.
+        Entry_u_boot_ucode:
+            This class. If collation is enabled it reads the microcode from
+            the Entry_u_boot_dtb_with_ucode entry, and uses it as the
+            contents of this entry.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def ObtainContents(self):
+        # If the section does not need microcode, there is nothing to do
+        found = False
+        for suffix in ['', '-spl', '-tpl']:
+            name = 'u-boot%s-with-ucode-ptr' % suffix
+            entry = self.section.FindEntryType(name)
+            if entry and entry.target_offset:
+                found = True
+        if not found:
+            self.data = ''
+            return True
+        # Get the microcode from the device tree entry. If it is not available
+        # yet, return False so we will be called later. If the section simply
+        # doesn't exist, then we may as well return True, since we are going to
+        # get an error anyway.
+        for suffix in ['', '-spl', '-tpl']:
+            name = 'u-boot%s-dtb-with-ucode' % suffix
+            fdt_entry = self.section.FindEntryType(name)
+            if fdt_entry:
+                break
+        if not fdt_entry:
+            return True
+        if not fdt_entry.ready:
+            return False
+
+        if not fdt_entry.collate:
+            # This binary can be empty
+            self.data = ''
+            return True
+
+        # Write it out to a file
+        self._pathname = tools.GetOutputFilename('u-boot-ucode.bin')
+        tools.WriteFile(self._pathname, fdt_entry.ucode_data)
+
+        self.ReadBlobContents()
+
+        return True
diff --git a/tools/u-boot-tools/binman/etype/u_boot_with_ucode_ptr.py b/tools/u-boot-tools/binman/etype/u_boot_with_ucode_ptr.py
new file mode 100644
index 0000000000000000000000000000000000000000..da0e12417b5795fb52f8be595ee4baa1d962ad11
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/u_boot_with_ucode_ptr.py
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a U-Boot binary with an embedded microcode pointer
+#
+
+import struct
+
+import command
+import elf
+from entry import Entry
+from blob import Entry_blob
+import fdt_util
+import tools
+
+class Entry_u_boot_with_ucode_ptr(Entry_blob):
+    """U-Boot with embedded microcode pointer
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot-nodtb.dtb (default 'u-boot-nodtb.dtb')
+        - optional-ucode: boolean property to make microcode optional. If the
+            u-boot.bin image does not include microcode, no error will
+            be generated.
+
+    See Entry_u_boot_ucode for full details of the three entries involved in
+    this process. This entry updates U-Boot with the offset and size of the
+    microcode, to allow early x86 boot code to find it without doing anything
+    complicated. Otherwise it is the same as the u_boot entry.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+        self.elf_fname = 'u-boot'
+        self.target_offset = None
+
+    def GetDefaultFilename(self):
+        return 'u-boot-nodtb.bin'
+
+    def ProcessFdt(self, fdt):
+        # Figure out where to put the microcode pointer
+        fname = tools.GetInputFilename(self.elf_fname)
+        sym = elf.GetSymbolAddress(fname, '_dt_ucode_base_size')
+        if sym:
+           self.target_offset = sym
+        elif not fdt_util.GetBool(self._node, 'optional-ucode'):
+            self.Raise('Cannot locate _dt_ucode_base_size symbol in u-boot')
+        return True
+
+    def ProcessContents(self):
+        # If the image does not need microcode, there is nothing to do
+        if not self.target_offset:
+            return
+
+        # Get the offset of the microcode
+        ucode_entry = self.section.FindEntryType('u-boot-ucode')
+        if not ucode_entry:
+            self.Raise('Cannot find microcode region u-boot-ucode')
+
+        # Check the target pos is in the section. If it is not, then U-Boot is
+        # being linked incorrectly, or is being placed at the wrong offset
+        # in the section.
+        #
+        # The section must be set up so that U-Boot is placed at the
+        # flash address to which it is linked. For example, if
+        # CONFIG_SYS_TEXT_BASE is 0xfff00000, and the ROM is 8MB, then
+        # the U-Boot region must start at offset 7MB in the section. In this
+        # case the ROM starts at 0xff800000, so the offset of the first
+        # entry in the section corresponds to that.
+        if (self.target_offset < self.image_pos or
+                self.target_offset >= self.image_pos + self.size):
+            self.Raise('Microcode pointer _dt_ucode_base_size at %08x is outside the section ranging from %08x to %08x' %
+                (self.target_offset, self.image_pos,
+                 self.image_pos + self.size))
+
+        # Get the microcode, either from u-boot-ucode or u-boot-dtb-with-ucode.
+        # If we have left the microcode in the device tree, then it will be
+        # in the latter. If we extracted the microcode from the device tree
+        # and collated it in one place, it will be in the former.
+        if ucode_entry.size:
+            offset, size = ucode_entry.offset, ucode_entry.size
+        else:
+            dtb_entry = self.section.FindEntryType('u-boot-dtb-with-ucode')
+            if not dtb_entry:
+                dtb_entry = self.section.FindEntryType(
+                        'u-boot-tpl-dtb-with-ucode')
+            if not dtb_entry:
+                self.Raise('Cannot find microcode region u-boot-dtb-with-ucode')
+            offset = dtb_entry.offset + dtb_entry.ucode_offset
+            size = dtb_entry.ucode_size
+
+        # Write the microcode offset and size into the entry
+        offset_and_size = struct.pack('<2L', offset, size)
+        self.target_offset -= self.image_pos
+        self.ProcessContentsUpdate(self.data[:self.target_offset] +
+                                   offset_and_size +
+                                   self.data[self.target_offset + 8:])
diff --git a/tools/u-boot-tools/binman/etype/vblock.py b/tools/u-boot-tools/binman/etype/vblock.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4d970ed160f31d69a19e4bf8e833ed6abd3d086
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/vblock.py
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for a Chromium OS verified boot block, used to sign a read-write
+# section of the image.
+
+from collections import OrderedDict
+import os
+
+from entry import Entry, EntryArg
+
+import fdt_util
+import tools
+
+class Entry_vblock(Entry):
+    """An entry which contains a Chromium OS verified boot block
+
+    Properties / Entry arguments:
+        - keydir: Directory containing the public keys to use
+        - keyblock: Name of the key file to use (inside keydir)
+        - signprivate: Name of provide key file to use (inside keydir)
+        - version: Version number of the vblock (typically 1)
+        - kernelkey: Name of the kernel key to use (inside keydir)
+        - preamble-flags: Value of the vboot preamble flags (typically 0)
+
+    Output files:
+        - input.<unique_name> - input file passed to futility
+        - vblock.<unique_name> - output file generated by futility (which is
+            used as the entry contents)
+
+    Chromium OS signs the read-write firmware and kernel, writing the signature
+    in this block. This allows U-Boot to verify that the next firmware stage
+    and kernel are genuine.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self.content = fdt_util.GetPhandleList(self._node, 'content')
+        if not self.content:
+            self.Raise("Vblock must have a 'content' property")
+        (self.keydir, self.keyblock, self.signprivate, self.version,
+         self.kernelkey, self.preamble_flags) = self.GetEntryArgsOrProps([
+            EntryArg('keydir', str),
+            EntryArg('keyblock', str),
+            EntryArg('signprivate', str),
+            EntryArg('version', int),
+            EntryArg('kernelkey', str),
+            EntryArg('preamble-flags', int)])
+
+    def ObtainContents(self):
+        # Join up the data files to be signed
+        input_data = ''
+        for entry_phandle in self.content:
+            data = self.section.GetContentsByPhandle(entry_phandle, self)
+            if data is None:
+                # Data not available yet
+                return False
+            input_data += data
+
+        uniq = self.GetUniqueName()
+        output_fname = tools.GetOutputFilename('vblock.%s' % uniq)
+        input_fname = tools.GetOutputFilename('input.%s' % uniq)
+        tools.WriteFile(input_fname, input_data)
+        prefix = self.keydir + '/'
+        args = [
+            'vbutil_firmware',
+            '--vblock', output_fname,
+            '--keyblock', prefix + self.keyblock,
+            '--signprivate', prefix + self.signprivate,
+            '--version', '%d' % self.version,
+            '--fv', input_fname,
+            '--kernelkey', prefix + self.kernelkey,
+            '--flags', '%d' % self.preamble_flags,
+        ]
+        #out.Notice("Sign '%s' into %s" % (', '.join(self.value), self.label))
+        stdout = tools.Run('futility', *args)
+        self.SetContents(tools.ReadFile(output_fname))
+        return True
diff --git a/tools/u-boot-tools/binman/etype/x86_start16.py b/tools/u-boot-tools/binman/etype/x86_start16.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d32ecd321b0935d13f6cc38af376f09bebc7d06
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/x86_start16.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 start-up code for U-Boot
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_x86_start16(Entry_blob):
+    """x86 16-bit start-up code for U-Boot
+
+    Properties / Entry arguments:
+        - filename: Filename of u-boot-x86-16bit.bin (default
+            'u-boot-x86-16bit.bin')
+
+    x86 CPUs start up in 16-bit mode, even if they are 32-bit CPUs. This code
+    must be placed at a particular address. This entry holds that code. It is
+    typically placed at offset CONFIG_SYS_X86_START16. The code is responsible
+    for changing to 32-bit mode and jumping to U-Boot's entry point, which
+    requires 32-bit mode (for 32-bit U-Boot).
+
+    For 64-bit U-Boot, the 'x86_start16_spl' entry type is used instead.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'u-boot-x86-16bit.bin'
diff --git a/tools/u-boot-tools/binman/etype/x86_start16_spl.py b/tools/u-boot-tools/binman/etype/x86_start16_spl.py
new file mode 100644
index 0000000000000000000000000000000000000000..d85909e7ae8e7ac765352971e2cb52837cf08262
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/x86_start16_spl.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 start-up code for U-Boot SPL
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_x86_start16_spl(Entry_blob):
+    """x86 16-bit start-up code for SPL
+
+    Properties / Entry arguments:
+        - filename: Filename of spl/u-boot-x86-16bit-spl.bin (default
+            'spl/u-boot-x86-16bit-spl.bin')
+
+    x86 CPUs start up in 16-bit mode, even if they are 64-bit CPUs. This code
+    must be placed at a particular address. This entry holds that code. It is
+    typically placed at offset CONFIG_SYS_X86_START16. The code is responsible
+    for changing to 32-bit mode and starting SPL, which in turn changes to
+    64-bit mode and jumps to U-Boot (for 64-bit U-Boot).
+
+    For 32-bit U-Boot, the 'x86_start16' entry type is used instead.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'spl/u-boot-x86-16bit-spl.bin'
diff --git a/tools/u-boot-tools/binman/etype/x86_start16_tpl.py b/tools/u-boot-tools/binman/etype/x86_start16_tpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..46ce169ae0a6b9d4f04fdfccbee1df89c4c14ad9
--- /dev/null
+++ b/tools/u-boot-tools/binman/etype/x86_start16_tpl.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for the 16-bit x86 start-up code for U-Boot TPL
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_x86_start16_tpl(Entry_blob):
+    """x86 16-bit start-up code for TPL
+
+    Properties / Entry arguments:
+        - filename: Filename of tpl/u-boot-x86-16bit-tpl.bin (default
+            'tpl/u-boot-x86-16bit-tpl.bin')
+
+    x86 CPUs start up in 16-bit mode, even if they are 64-bit CPUs. This code
+    must be placed at a particular address. This entry holds that code. It is
+    typically placed at offset CONFIG_SYS_X86_START16. The code is responsible
+    for changing to 32-bit mode and starting TPL, which in turn jumps to SPL.
+
+    If TPL is not being used, the 'x86_start16_spl or 'x86_start16' entry types
+    may be used instead.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'tpl/u-boot-x86-16bit-tpl.bin'
diff --git a/tools/u-boot-tools/binman/fdt_test.py b/tools/u-boot-tools/binman/fdt_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac6f910d3c00fb4a1accf94701a22fe19f8e3db9
--- /dev/null
+++ b/tools/u-boot-tools/binman/fdt_test.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the fdt modules
+
+import os
+import sys
+import tempfile
+import unittest
+
+import fdt
+from fdt import FdtScan
+import fdt_util
+import tools
+
+class TestFdt(unittest.TestCase):
+    @classmethod
+    def setUpClass(self):
+        self._binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+        self._indir = tempfile.mkdtemp(prefix='binmant.')
+        tools.PrepareOutputDir(self._indir, True)
+
+    @classmethod
+    def tearDownClass(self):
+        tools._FinaliseForTest()
+
+    def TestFile(self, fname):
+        return os.path.join(self._binman_dir, 'test', fname)
+
+    def GetCompiled(self, fname):
+        return fdt_util.EnsureCompiled(self.TestFile(fname))
+
+    def _DeleteProp(self, dt):
+        node = dt.GetNode('/microcode/update@0')
+        node.DeleteProp('data')
+
+    def testFdtNormal(self):
+        fname = self.GetCompiled('034_x86_ucode.dts')
+        dt = FdtScan(fname)
+        self._DeleteProp(dt)
+
+    def testFdtNormalProp(self):
+        fname = self.GetCompiled('045_prop_test.dts')
+        dt = FdtScan(fname)
+        node = dt.GetNode('/binman/intel-me')
+        self.assertEquals('intel-me', node.name)
+        val = fdt_util.GetString(node, 'filename')
+        self.assertEquals(str, type(val))
+        self.assertEquals('me.bin', val)
+
+        prop = node.props['intval']
+        self.assertEquals(fdt.TYPE_INT, prop.type)
+        self.assertEquals(3, fdt_util.GetInt(node, 'intval'))
+
+        prop = node.props['intarray']
+        self.assertEquals(fdt.TYPE_INT, prop.type)
+        self.assertEquals(list, type(prop.value))
+        self.assertEquals(2, len(prop.value))
+        self.assertEquals([5, 6],
+                          [fdt_util.fdt32_to_cpu(val) for val in prop.value])
+
+        prop = node.props['byteval']
+        self.assertEquals(fdt.TYPE_BYTE, prop.type)
+        self.assertEquals(chr(8), prop.value)
+
+        prop = node.props['bytearray']
+        self.assertEquals(fdt.TYPE_BYTE, prop.type)
+        self.assertEquals(list, type(prop.value))
+        self.assertEquals(str, type(prop.value[0]))
+        self.assertEquals(3, len(prop.value))
+        self.assertEquals([chr(1), '#', '4'], prop.value)
+
+        prop = node.props['longbytearray']
+        self.assertEquals(fdt.TYPE_INT, prop.type)
+        self.assertEquals(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray'))
+
+        prop = node.props['stringval']
+        self.assertEquals(fdt.TYPE_STRING, prop.type)
+        self.assertEquals('message2', fdt_util.GetString(node, 'stringval'))
+
+        prop = node.props['stringarray']
+        self.assertEquals(fdt.TYPE_STRING, prop.type)
+        self.assertEquals(list, type(prop.value))
+        self.assertEquals(3, len(prop.value))
+        self.assertEquals(['another', 'multi-word', 'message'], prop.value)
diff --git a/tools/u-boot-tools/binman/fmap_util.py b/tools/u-boot-tools/binman/fmap_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..be3cbee87bd151e32eb3a180bd4b6584a45f74fc
--- /dev/null
+++ b/tools/u-boot-tools/binman/fmap_util.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Support for flashrom's FMAP format. This supports a header followed by a
+# number of 'areas', describing regions of a firmware storage device,
+# generally SPI flash.
+
+import collections
+import struct
+
+# constants imported from lib/fmap.h
+FMAP_SIGNATURE = '__FMAP__'
+FMAP_VER_MAJOR = 1
+FMAP_VER_MINOR = 0
+FMAP_STRLEN = 32
+
+FMAP_AREA_STATIC = 1 << 0
+FMAP_AREA_COMPRESSED = 1 << 1
+FMAP_AREA_RO = 1 << 2
+
+FMAP_HEADER_LEN = 56
+FMAP_AREA_LEN = 42
+
+FMAP_HEADER_FORMAT = '<8sBBQI%dsH'% (FMAP_STRLEN)
+FMAP_AREA_FORMAT = '<II%dsH' % (FMAP_STRLEN)
+
+FMAP_HEADER_NAMES = (
+    'signature',
+    'ver_major',
+    'ver_minor',
+    'base',
+    'image_size',
+    'name',
+    'nareas',
+)
+
+FMAP_AREA_NAMES = (
+    'offset',
+    'size',
+    'name',
+    'flags',
+)
+
+# These are the two data structures supported by flashrom, a header (which
+# appears once at the start) and an area (which is repeated until the end of
+# the list of areas)
+FmapHeader = collections.namedtuple('FmapHeader', FMAP_HEADER_NAMES)
+FmapArea = collections.namedtuple('FmapArea', FMAP_AREA_NAMES)
+
+
+def NameToFmap(name):
+    return name.replace('\0', '').replace('-', '_').upper()
+
+def ConvertName(field_names, fields):
+    """Convert a name to something flashrom likes
+
+    Flashrom requires upper case, underscores instead of hyphens. We remove any
+    null characters as well. This updates the 'name' value in fields.
+
+    Args:
+        field_names: List of field names for this struct
+        fields: Dict:
+            key: Field name
+            value: value of that field (string for the ones we support)
+    """
+    name_index = field_names.index('name')
+    fields[name_index] = NameToFmap(fields[name_index])
+
+def DecodeFmap(data):
+    """Decode a flashmap into a header and list of areas
+
+    Args:
+        data: Data block containing the FMAP
+
+    Returns:
+        Tuple:
+            header: FmapHeader object
+            List of FmapArea objects
+    """
+    fields = list(struct.unpack(FMAP_HEADER_FORMAT, data[:FMAP_HEADER_LEN]))
+    ConvertName(FMAP_HEADER_NAMES, fields)
+    header = FmapHeader(*fields)
+    areas = []
+    data = data[FMAP_HEADER_LEN:]
+    for area in range(header.nareas):
+        fields = list(struct.unpack(FMAP_AREA_FORMAT, data[:FMAP_AREA_LEN]))
+        ConvertName(FMAP_AREA_NAMES, fields)
+        areas.append(FmapArea(*fields))
+        data = data[FMAP_AREA_LEN:]
+    return header, areas
+
+def EncodeFmap(image_size, name, areas):
+    """Create a new FMAP from a list of areas
+
+    Args:
+        image_size: Size of image, to put in the header
+        name: Name of image, to put in the header
+        areas: List of FmapArea objects
+
+    Returns:
+        String containing the FMAP created
+    """
+    def _FormatBlob(fmt, names, obj):
+        params = [getattr(obj, name) for name in names]
+        ConvertName(names, params)
+        return struct.pack(fmt, *params)
+
+    values = FmapHeader(FMAP_SIGNATURE, 1, 0, 0, image_size, name, len(areas))
+    blob = _FormatBlob(FMAP_HEADER_FORMAT, FMAP_HEADER_NAMES, values)
+    for area in areas:
+        blob += _FormatBlob(FMAP_AREA_FORMAT, FMAP_AREA_NAMES, area)
+    return blob
diff --git a/tools/u-boot-tools/binman/ftest.py b/tools/u-boot-tools/binman/ftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..e77fce5a26fc4544f4f6141995013032056f1e34
--- /dev/null
+++ b/tools/u-boot-tools/binman/ftest.py
@@ -0,0 +1,1776 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# To run a single test, change to this directory, and:
+#
+#    python -m unittest func_test.TestFunctional.testHelp
+
+import hashlib
+from optparse import OptionParser
+import os
+import shutil
+import struct
+import sys
+import tempfile
+import unittest
+
+import binman
+import cmdline
+import command
+import control
+import elf
+import fdt
+import fdt_util
+import fmap_util
+import test_util
+import state
+import tools
+import tout
+
+# Contents of test files, corresponding to different entry types
+U_BOOT_DATA           = '1234'
+U_BOOT_IMG_DATA       = 'img'
+U_BOOT_SPL_DATA       = '56780123456789abcde'
+U_BOOT_TPL_DATA       = 'tpl'
+BLOB_DATA             = '89'
+ME_DATA               = '0abcd'
+VGA_DATA              = 'vga'
+U_BOOT_DTB_DATA       = 'udtb'
+U_BOOT_SPL_DTB_DATA   = 'spldtb'
+U_BOOT_TPL_DTB_DATA   = 'tpldtb'
+X86_START16_DATA      = 'start16'
+X86_START16_SPL_DATA  = 'start16spl'
+X86_START16_TPL_DATA  = 'start16tpl'
+PPC_MPC85XX_BR_DATA   = 'ppcmpc85xxbr'
+U_BOOT_NODTB_DATA     = 'nodtb with microcode pointer somewhere in here'
+U_BOOT_SPL_NODTB_DATA = 'splnodtb with microcode pointer somewhere in here'
+U_BOOT_TPL_NODTB_DATA = 'tplnodtb with microcode pointer somewhere in here'
+FSP_DATA              = 'fsp'
+CMC_DATA              = 'cmc'
+VBT_DATA              = 'vbt'
+MRC_DATA              = 'mrc'
+TEXT_DATA             = 'text'
+TEXT_DATA2            = 'text2'
+TEXT_DATA3            = 'text3'
+CROS_EC_RW_DATA       = 'ecrw'
+GBB_DATA              = 'gbbd'
+BMPBLK_DATA           = 'bmp'
+VBLOCK_DATA           = 'vblk'
+FILES_DATA            = ("sorry I'm late\nOh, don't bother apologising, I'm " +
+                         "sorry you're alive\n")
+COMPRESS_DATA         = 'data to compress'
+REFCODE_DATA          = 'refcode'
+
+
+class TestFunctional(unittest.TestCase):
+    """Functional tests for binman
+
+    Most of these use a sample .dts file to build an image and then check
+    that it looks correct. The sample files are in the test/ subdirectory
+    and are numbered.
+
+    For each entry type a very small test file is created using fixed
+    string contents. This makes it easy to test that things look right, and
+    debug problems.
+
+    In some cases a 'real' file must be used - these are also supplied in
+    the test/ diurectory.
+    """
+    @classmethod
+    def setUpClass(self):
+        global entry
+        import entry
+
+        # Handle the case where argv[0] is 'python'
+        self._binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+        self._binman_pathname = os.path.join(self._binman_dir, 'binman')
+
+        # Create a temporary directory for input files
+        self._indir = tempfile.mkdtemp(prefix='binmant.')
+
+        # Create some test files
+        TestFunctional._MakeInputFile('u-boot.bin', U_BOOT_DATA)
+        TestFunctional._MakeInputFile('u-boot.img', U_BOOT_IMG_DATA)
+        TestFunctional._MakeInputFile('spl/u-boot-spl.bin', U_BOOT_SPL_DATA)
+        TestFunctional._MakeInputFile('tpl/u-boot-tpl.bin', U_BOOT_TPL_DATA)
+        TestFunctional._MakeInputFile('blobfile', BLOB_DATA)
+        TestFunctional._MakeInputFile('me.bin', ME_DATA)
+        TestFunctional._MakeInputFile('vga.bin', VGA_DATA)
+        self._ResetDtbs()
+        TestFunctional._MakeInputFile('u-boot-x86-16bit.bin', X86_START16_DATA)
+        TestFunctional._MakeInputFile('u-boot-br.bin', PPC_MPC85XX_BR_DATA)
+        TestFunctional._MakeInputFile('spl/u-boot-x86-16bit-spl.bin',
+                                      X86_START16_SPL_DATA)
+        TestFunctional._MakeInputFile('tpl/u-boot-x86-16bit-tpl.bin',
+                                      X86_START16_TPL_DATA)
+        TestFunctional._MakeInputFile('u-boot-nodtb.bin', U_BOOT_NODTB_DATA)
+        TestFunctional._MakeInputFile('spl/u-boot-spl-nodtb.bin',
+                                      U_BOOT_SPL_NODTB_DATA)
+        TestFunctional._MakeInputFile('tpl/u-boot-tpl-nodtb.bin',
+                                      U_BOOT_TPL_NODTB_DATA)
+        TestFunctional._MakeInputFile('fsp.bin', FSP_DATA)
+        TestFunctional._MakeInputFile('cmc.bin', CMC_DATA)
+        TestFunctional._MakeInputFile('vbt.bin', VBT_DATA)
+        TestFunctional._MakeInputFile('mrc.bin', MRC_DATA)
+        TestFunctional._MakeInputFile('ecrw.bin', CROS_EC_RW_DATA)
+        TestFunctional._MakeInputDir('devkeys')
+        TestFunctional._MakeInputFile('bmpblk.bin', BMPBLK_DATA)
+        TestFunctional._MakeInputFile('refcode.bin', REFCODE_DATA)
+
+        # ELF file with a '_dt_ucode_base_size' symbol
+        with open(self.TestFile('u_boot_ucode_ptr')) as fd:
+            TestFunctional._MakeInputFile('u-boot', fd.read())
+
+        # Intel flash descriptor file
+        with open(self.TestFile('descriptor.bin')) as fd:
+            TestFunctional._MakeInputFile('descriptor.bin', fd.read())
+
+        shutil.copytree(self.TestFile('files'),
+                        os.path.join(self._indir, 'files'))
+
+        TestFunctional._MakeInputFile('compress', COMPRESS_DATA)
+
+    @classmethod
+    def tearDownClass(self):
+        """Remove the temporary input directory and its contents"""
+        if self._indir:
+            shutil.rmtree(self._indir)
+        self._indir = None
+
+    def setUp(self):
+        # Enable this to turn on debugging output
+        # tout.Init(tout.DEBUG)
+        command.test_result = None
+
+    def tearDown(self):
+        """Remove the temporary output directory"""
+        tools._FinaliseForTest()
+
+    @classmethod
+    def _ResetDtbs(self):
+        TestFunctional._MakeInputFile('u-boot.dtb', U_BOOT_DTB_DATA)
+        TestFunctional._MakeInputFile('spl/u-boot-spl.dtb', U_BOOT_SPL_DTB_DATA)
+        TestFunctional._MakeInputFile('tpl/u-boot-tpl.dtb', U_BOOT_TPL_DTB_DATA)
+
+    def _RunBinman(self, *args, **kwargs):
+        """Run binman using the command line
+
+        Args:
+            Arguments to pass, as a list of strings
+            kwargs: Arguments to pass to Command.RunPipe()
+        """
+        result = command.RunPipe([[self._binman_pathname] + list(args)],
+                capture=True, capture_stderr=True, raise_on_error=False)
+        if result.return_code and kwargs.get('raise_on_error', True):
+            raise Exception("Error running '%s': %s" % (' '.join(args),
+                            result.stdout + result.stderr))
+        return result
+
+    def _DoBinman(self, *args):
+        """Run binman using directly (in the same process)
+
+        Args:
+            Arguments to pass, as a list of strings
+        Returns:
+            Return value (0 for success)
+        """
+        args = list(args)
+        if '-D' in sys.argv:
+            args = args + ['-D']
+        (options, args) = cmdline.ParseArgs(args)
+        options.pager = 'binman-invalid-pager'
+        options.build_dir = self._indir
+
+        # For testing, you can force an increase in verbosity here
+        # options.verbosity = tout.DEBUG
+        return control.Binman(options, args)
+
+    def _DoTestFile(self, fname, debug=False, map=False, update_dtb=False,
+                    entry_args=None, images=None, use_real_dtb=False):
+        """Run binman with a given test file
+
+        Args:
+            fname: Device-tree source filename to use (e.g. 005_simple.dts)
+            debug: True to enable debugging output
+            map: True to output map files for the images
+            update_dtb: Update the offset and size of each entry in the device
+                tree before packing it into the image
+            entry_args: Dict of entry args to supply to binman
+                key: arg name
+                value: value of that arg
+            images: List of image names to build
+        """
+        args = ['-p', '-I', self._indir, '-d', self.TestFile(fname)]
+        if debug:
+            args.append('-D')
+        if map:
+            args.append('-m')
+        if update_dtb:
+            args.append('-up')
+        if not use_real_dtb:
+            args.append('--fake-dtb')
+        if entry_args:
+            for arg, value in entry_args.iteritems():
+                args.append('-a%s=%s' % (arg, value))
+        if images:
+            for image in images:
+                args += ['-i', image]
+        return self._DoBinman(*args)
+
+    def _SetupDtb(self, fname, outfile='u-boot.dtb'):
+        """Set up a new test device-tree file
+
+        The given file is compiled and set up as the device tree to be used
+        for ths test.
+
+        Args:
+            fname: Filename of .dts file to read
+            outfile: Output filename for compiled device-tree binary
+
+        Returns:
+            Contents of device-tree binary
+        """
+        tools.PrepareOutputDir(None)
+        dtb = fdt_util.EnsureCompiled(self.TestFile(fname))
+        with open(dtb) as fd:
+            data = fd.read()
+            TestFunctional._MakeInputFile(outfile, data)
+        tools.FinaliseOutputDir()
+        return data
+
+    def _GetDtbContentsForSplTpl(self, dtb_data, name):
+        """Create a version of the main DTB for SPL or SPL
+
+        For testing we don't actually have different versions of the DTB. With
+        U-Boot we normally run fdtgrep to remove unwanted nodes, but for tests
+        we don't normally have any unwanted nodes.
+
+        We still want the DTBs for SPL and TPL to be different though, since
+        otherwise it is confusing to know which one we are looking at. So add
+        an 'spl' or 'tpl' property to the top-level node.
+        """
+        dtb = fdt.Fdt.FromData(dtb_data)
+        dtb.Scan()
+        dtb.GetNode('/binman').AddZeroProp(name)
+        dtb.Sync(auto_resize=True)
+        dtb.Pack()
+        return dtb.GetContents()
+
+    def _DoReadFileDtb(self, fname, use_real_dtb=False, map=False,
+                       update_dtb=False, entry_args=None, reset_dtbs=True):
+        """Run binman and return the resulting image
+
+        This runs binman with a given test file and then reads the resulting
+        output file. It is a shortcut function since most tests need to do
+        these steps.
+
+        Raises an assertion failure if binman returns a non-zero exit code.
+
+        Args:
+            fname: Device-tree source filename to use (e.g. 005_simple.dts)
+            use_real_dtb: True to use the test file as the contents of
+                the u-boot-dtb entry. Normally this is not needed and the
+                test contents (the U_BOOT_DTB_DATA string) can be used.
+                But in some test we need the real contents.
+            map: True to output map files for the images
+            update_dtb: Update the offset and size of each entry in the device
+                tree before packing it into the image
+
+        Returns:
+            Tuple:
+                Resulting image contents
+                Device tree contents
+                Map data showing contents of image (or None if none)
+                Output device tree binary filename ('u-boot.dtb' path)
+        """
+        dtb_data = None
+        # Use the compiled test file as the u-boot-dtb input
+        if use_real_dtb:
+            dtb_data = self._SetupDtb(fname)
+            infile = os.path.join(self._indir, 'u-boot.dtb')
+
+            # For testing purposes, make a copy of the DT for SPL and TPL. Add
+            # a node indicating which it is, so aid verification.
+            for name in ['spl', 'tpl']:
+                dtb_fname = '%s/u-boot-%s.dtb' % (name, name)
+                outfile = os.path.join(self._indir, dtb_fname)
+                TestFunctional._MakeInputFile(dtb_fname,
+                        self._GetDtbContentsForSplTpl(dtb_data, name))
+
+        try:
+            retcode = self._DoTestFile(fname, map=map, update_dtb=update_dtb,
+                    entry_args=entry_args, use_real_dtb=use_real_dtb)
+            self.assertEqual(0, retcode)
+            out_dtb_fname = tools.GetOutputFilename('u-boot.dtb.out')
+
+            # Find the (only) image, read it and return its contents
+            image = control.images['image']
+            image_fname = tools.GetOutputFilename('image.bin')
+            self.assertTrue(os.path.exists(image_fname))
+            if map:
+                map_fname = tools.GetOutputFilename('image.map')
+                with open(map_fname) as fd:
+                    map_data = fd.read()
+            else:
+                map_data = None
+            with open(image_fname) as fd:
+                return fd.read(), dtb_data, map_data, out_dtb_fname
+        finally:
+            # Put the test file back
+            if reset_dtbs and use_real_dtb:
+                self._ResetDtbs()
+
+    def _DoReadFile(self, fname, use_real_dtb=False):
+        """Helper function which discards the device-tree binary
+
+        Args:
+            fname: Device-tree source filename to use (e.g. 005_simple.dts)
+            use_real_dtb: True to use the test file as the contents of
+                the u-boot-dtb entry. Normally this is not needed and the
+                test contents (the U_BOOT_DTB_DATA string) can be used.
+                But in some test we need the real contents.
+
+        Returns:
+            Resulting image contents
+        """
+        return self._DoReadFileDtb(fname, use_real_dtb)[0]
+
+    @classmethod
+    def _MakeInputFile(self, fname, contents):
+        """Create a new test input file, creating directories as needed
+
+        Args:
+            fname: Filename to create
+            contents: File contents to write in to the file
+        Returns:
+            Full pathname of file created
+        """
+        pathname = os.path.join(self._indir, fname)
+        dirname = os.path.dirname(pathname)
+        if dirname and not os.path.exists(dirname):
+            os.makedirs(dirname)
+        with open(pathname, 'wb') as fd:
+            fd.write(contents)
+        return pathname
+
+    @classmethod
+    def _MakeInputDir(self, dirname):
+        """Create a new test input directory, creating directories as needed
+
+        Args:
+            dirname: Directory name to create
+
+        Returns:
+            Full pathname of directory created
+        """
+        pathname = os.path.join(self._indir, dirname)
+        if not os.path.exists(pathname):
+            os.makedirs(pathname)
+        return pathname
+
+    @classmethod
+    def _SetupSplElf(self, src_fname='bss_data'):
+        """Set up an ELF file with a '_dt_ucode_base_size' symbol
+
+        Args:
+            Filename of ELF file to use as SPL
+        """
+        with open(self.TestFile(src_fname)) as fd:
+            TestFunctional._MakeInputFile('spl/u-boot-spl', fd.read())
+
+    @classmethod
+    def TestFile(self, fname):
+        return os.path.join(self._binman_dir, 'test', fname)
+
+    def AssertInList(self, grep_list, target):
+        """Assert that at least one of a list of things is in a target
+
+        Args:
+            grep_list: List of strings to check
+            target: Target string
+        """
+        for grep in grep_list:
+            if grep in target:
+                return
+        self.fail("Error: '%' not found in '%s'" % (grep_list, target))
+
+    def CheckNoGaps(self, entries):
+        """Check that all entries fit together without gaps
+
+        Args:
+            entries: List of entries to check
+        """
+        offset = 0
+        for entry in entries.values():
+            self.assertEqual(offset, entry.offset)
+            offset += entry.size
+
+    def GetFdtLen(self, dtb):
+        """Get the totalsize field from a device-tree binary
+
+        Args:
+            dtb: Device-tree binary contents
+
+        Returns:
+            Total size of device-tree binary, from the header
+        """
+        return struct.unpack('>L', dtb[4:8])[0]
+
+    def _GetPropTree(self, dtb, prop_names):
+        def AddNode(node, path):
+            if node.name != '/':
+                path += '/' + node.name
+            for subnode in node.subnodes:
+                for prop in subnode.props.values():
+                    if prop.name in prop_names:
+                        prop_path = path + '/' + subnode.name + ':' + prop.name
+                        tree[prop_path[len('/binman/'):]] = fdt_util.fdt32_to_cpu(
+                            prop.value)
+                AddNode(subnode, path)
+
+        tree = {}
+        AddNode(dtb.GetRoot(), '')
+        return tree
+
+    def testRun(self):
+        """Test a basic run with valid args"""
+        result = self._RunBinman('-h')
+
+    def testFullHelp(self):
+        """Test that the full help is displayed with -H"""
+        result = self._RunBinman('-H')
+        help_file = os.path.join(self._binman_dir, 'README')
+        # Remove possible extraneous strings
+        extra = '::::::::::::::\n' + help_file + '\n::::::::::::::\n'
+        gothelp = result.stdout.replace(extra, '')
+        self.assertEqual(len(gothelp), os.path.getsize(help_file))
+        self.assertEqual(0, len(result.stderr))
+        self.assertEqual(0, result.return_code)
+
+    def testFullHelpInternal(self):
+        """Test that the full help is displayed with -H"""
+        try:
+            command.test_result = command.CommandResult()
+            result = self._DoBinman('-H')
+            help_file = os.path.join(self._binman_dir, 'README')
+        finally:
+            command.test_result = None
+
+    def testHelp(self):
+        """Test that the basic help is displayed with -h"""
+        result = self._RunBinman('-h')
+        self.assertTrue(len(result.stdout) > 200)
+        self.assertEqual(0, len(result.stderr))
+        self.assertEqual(0, result.return_code)
+
+    def testBoard(self):
+        """Test that we can run it with a specific board"""
+        self._SetupDtb('005_simple.dts', 'sandbox/u-boot.dtb')
+        TestFunctional._MakeInputFile('sandbox/u-boot.bin', U_BOOT_DATA)
+        result = self._DoBinman('-b', 'sandbox')
+        self.assertEqual(0, result)
+
+    def testNeedBoard(self):
+        """Test that we get an error when no board ius supplied"""
+        with self.assertRaises(ValueError) as e:
+            result = self._DoBinman()
+        self.assertIn("Must provide a board to process (use -b <board>)",
+                str(e.exception))
+
+    def testMissingDt(self):
+        """Test that an invalid device-tree file generates an error"""
+        with self.assertRaises(Exception) as e:
+            self._RunBinman('-d', 'missing_file')
+        # We get one error from libfdt, and a different one from fdtget.
+        self.AssertInList(["Couldn't open blob from 'missing_file'",
+                           'No such file or directory'], str(e.exception))
+
+    def testBrokenDt(self):
+        """Test that an invalid device-tree source file generates an error
+
+        Since this is a source file it should be compiled and the error
+        will come from the device-tree compiler (dtc).
+        """
+        with self.assertRaises(Exception) as e:
+            self._RunBinman('-d', self.TestFile('001_invalid.dts'))
+        self.assertIn("FATAL ERROR: Unable to parse input tree",
+                str(e.exception))
+
+    def testMissingNode(self):
+        """Test that a device tree without a 'binman' node generates an error"""
+        with self.assertRaises(Exception) as e:
+            self._DoBinman('-d', self.TestFile('002_missing_node.dts'))
+        self.assertIn("does not have a 'binman' node", str(e.exception))
+
+    def testEmpty(self):
+        """Test that an empty binman node works OK (i.e. does nothing)"""
+        result = self._RunBinman('-d', self.TestFile('003_empty.dts'))
+        self.assertEqual(0, len(result.stderr))
+        self.assertEqual(0, result.return_code)
+
+    def testInvalidEntry(self):
+        """Test that an invalid entry is flagged"""
+        with self.assertRaises(Exception) as e:
+            result = self._RunBinman('-d',
+                                     self.TestFile('004_invalid_entry.dts'))
+        self.assertIn("Unknown entry type 'not-a-valid-type' in node "
+                "'/binman/not-a-valid-type'", str(e.exception))
+
+    def testSimple(self):
+        """Test a simple binman with a single file"""
+        data = self._DoReadFile('005_simple.dts')
+        self.assertEqual(U_BOOT_DATA, data)
+
+    def testSimpleDebug(self):
+        """Test a simple binman run with debugging enabled"""
+        data = self._DoTestFile('005_simple.dts', debug=True)
+
+    def testDual(self):
+        """Test that we can handle creating two images
+
+        This also tests image padding.
+        """
+        retcode = self._DoTestFile('006_dual_image.dts')
+        self.assertEqual(0, retcode)
+
+        image = control.images['image1']
+        self.assertEqual(len(U_BOOT_DATA), image._size)
+        fname = tools.GetOutputFilename('image1.bin')
+        self.assertTrue(os.path.exists(fname))
+        with open(fname) as fd:
+            data = fd.read()
+            self.assertEqual(U_BOOT_DATA, data)
+
+        image = control.images['image2']
+        self.assertEqual(3 + len(U_BOOT_DATA) + 5, image._size)
+        fname = tools.GetOutputFilename('image2.bin')
+        self.assertTrue(os.path.exists(fname))
+        with open(fname) as fd:
+            data = fd.read()
+            self.assertEqual(U_BOOT_DATA, data[3:7])
+            self.assertEqual(chr(0) * 3, data[:3])
+            self.assertEqual(chr(0) * 5, data[7:])
+
+    def testBadAlign(self):
+        """Test that an invalid alignment value is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('007_bad_align.dts')
+        self.assertIn("Node '/binman/u-boot': Alignment 23 must be a power "
+                      "of two", str(e.exception))
+
+    def testPackSimple(self):
+        """Test that packing works as expected"""
+        retcode = self._DoTestFile('008_pack.dts')
+        self.assertEqual(0, retcode)
+        self.assertIn('image', control.images)
+        image = control.images['image']
+        entries = image.GetEntries()
+        self.assertEqual(5, len(entries))
+
+        # First u-boot
+        self.assertIn('u-boot', entries)
+        entry = entries['u-boot']
+        self.assertEqual(0, entry.offset)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        # Second u-boot, aligned to 16-byte boundary
+        self.assertIn('u-boot-align', entries)
+        entry = entries['u-boot-align']
+        self.assertEqual(16, entry.offset)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        # Third u-boot, size 23 bytes
+        self.assertIn('u-boot-size', entries)
+        entry = entries['u-boot-size']
+        self.assertEqual(20, entry.offset)
+        self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+        self.assertEqual(23, entry.size)
+
+        # Fourth u-boot, placed immediate after the above
+        self.assertIn('u-boot-next', entries)
+        entry = entries['u-boot-next']
+        self.assertEqual(43, entry.offset)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        # Fifth u-boot, placed at a fixed offset
+        self.assertIn('u-boot-fixed', entries)
+        entry = entries['u-boot-fixed']
+        self.assertEqual(61, entry.offset)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        self.assertEqual(65, image._size)
+
+    def testPackExtra(self):
+        """Test that extra packing feature works as expected"""
+        retcode = self._DoTestFile('009_pack_extra.dts')
+
+        self.assertEqual(0, retcode)
+        self.assertIn('image', control.images)
+        image = control.images['image']
+        entries = image.GetEntries()
+        self.assertEqual(5, len(entries))
+
+        # First u-boot with padding before and after
+        self.assertIn('u-boot', entries)
+        entry = entries['u-boot']
+        self.assertEqual(0, entry.offset)
+        self.assertEqual(3, entry.pad_before)
+        self.assertEqual(3 + 5 + len(U_BOOT_DATA), entry.size)
+
+        # Second u-boot has an aligned size, but it has no effect
+        self.assertIn('u-boot-align-size-nop', entries)
+        entry = entries['u-boot-align-size-nop']
+        self.assertEqual(12, entry.offset)
+        self.assertEqual(4, entry.size)
+
+        # Third u-boot has an aligned size too
+        self.assertIn('u-boot-align-size', entries)
+        entry = entries['u-boot-align-size']
+        self.assertEqual(16, entry.offset)
+        self.assertEqual(32, entry.size)
+
+        # Fourth u-boot has an aligned end
+        self.assertIn('u-boot-align-end', entries)
+        entry = entries['u-boot-align-end']
+        self.assertEqual(48, entry.offset)
+        self.assertEqual(16, entry.size)
+
+        # Fifth u-boot immediately afterwards
+        self.assertIn('u-boot-align-both', entries)
+        entry = entries['u-boot-align-both']
+        self.assertEqual(64, entry.offset)
+        self.assertEqual(64, entry.size)
+
+        self.CheckNoGaps(entries)
+        self.assertEqual(128, image._size)
+
+    def testPackAlignPowerOf2(self):
+        """Test that invalid entry alignment is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('010_pack_align_power2.dts')
+        self.assertIn("Node '/binman/u-boot': Alignment 5 must be a power "
+                      "of two", str(e.exception))
+
+    def testPackAlignSizePowerOf2(self):
+        """Test that invalid entry size alignment is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('011_pack_align_size_power2.dts')
+        self.assertIn("Node '/binman/u-boot': Alignment size 55 must be a "
+                      "power of two", str(e.exception))
+
+    def testPackInvalidAlign(self):
+        """Test detection of an offset that does not match its alignment"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('012_pack_inv_align.dts')
+        self.assertIn("Node '/binman/u-boot': Offset 0x5 (5) does not match "
+                      "align 0x4 (4)", str(e.exception))
+
+    def testPackInvalidSizeAlign(self):
+        """Test that invalid entry size alignment is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('013_pack_inv_size_align.dts')
+        self.assertIn("Node '/binman/u-boot': Size 0x5 (5) does not match "
+                      "align-size 0x4 (4)", str(e.exception))
+
+    def testPackOverlap(self):
+        """Test that overlapping regions are detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('014_pack_overlap.dts')
+        self.assertIn("Node '/binman/u-boot-align': Offset 0x3 (3) overlaps "
+                      "with previous entry '/binman/u-boot' ending at 0x4 (4)",
+                      str(e.exception))
+
+    def testPackEntryOverflow(self):
+        """Test that entries that overflow their size are detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('015_pack_overflow.dts')
+        self.assertIn("Node '/binman/u-boot': Entry contents size is 0x4 (4) "
+                      "but entry size is 0x3 (3)", str(e.exception))
+
+    def testPackImageOverflow(self):
+        """Test that entries which overflow the image size are detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('016_pack_image_overflow.dts')
+        self.assertIn("Section '/binman': contents size 0x4 (4) exceeds section "
+                      "size 0x3 (3)", str(e.exception))
+
+    def testPackImageSize(self):
+        """Test that the image size can be set"""
+        retcode = self._DoTestFile('017_pack_image_size.dts')
+        self.assertEqual(0, retcode)
+        self.assertIn('image', control.images)
+        image = control.images['image']
+        self.assertEqual(7, image._size)
+
+    def testPackImageSizeAlign(self):
+        """Test that image size alignemnt works as expected"""
+        retcode = self._DoTestFile('018_pack_image_align.dts')
+        self.assertEqual(0, retcode)
+        self.assertIn('image', control.images)
+        image = control.images['image']
+        self.assertEqual(16, image._size)
+
+    def testPackInvalidImageAlign(self):
+        """Test that invalid image alignment is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('019_pack_inv_image_align.dts')
+        self.assertIn("Section '/binman': Size 0x7 (7) does not match "
+                      "align-size 0x8 (8)", str(e.exception))
+
+    def testPackAlignPowerOf2(self):
+        """Test that invalid image alignment is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('020_pack_inv_image_align_power2.dts')
+        self.assertIn("Section '/binman': Alignment size 131 must be a power of "
+                      "two", str(e.exception))
+
+    def testImagePadByte(self):
+        """Test that the image pad byte can be specified"""
+        self._SetupSplElf()
+        data = self._DoReadFile('021_image_pad.dts')
+        self.assertEqual(U_BOOT_SPL_DATA + (chr(0xff) * 1) + U_BOOT_DATA, data)
+
+    def testImageName(self):
+        """Test that image files can be named"""
+        retcode = self._DoTestFile('022_image_name.dts')
+        self.assertEqual(0, retcode)
+        image = control.images['image1']
+        fname = tools.GetOutputFilename('test-name')
+        self.assertTrue(os.path.exists(fname))
+
+        image = control.images['image2']
+        fname = tools.GetOutputFilename('test-name.xx')
+        self.assertTrue(os.path.exists(fname))
+
+    def testBlobFilename(self):
+        """Test that generic blobs can be provided by filename"""
+        data = self._DoReadFile('023_blob.dts')
+        self.assertEqual(BLOB_DATA, data)
+
+    def testPackSorted(self):
+        """Test that entries can be sorted"""
+        self._SetupSplElf()
+        data = self._DoReadFile('024_sorted.dts')
+        self.assertEqual(chr(0) * 1 + U_BOOT_SPL_DATA + chr(0) * 2 +
+                         U_BOOT_DATA, data)
+
+    def testPackZeroOffset(self):
+        """Test that an entry at offset 0 is not given a new offset"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('025_pack_zero_size.dts')
+        self.assertIn("Node '/binman/u-boot-spl': Offset 0x0 (0) overlaps "
+                      "with previous entry '/binman/u-boot' ending at 0x4 (4)",
+                      str(e.exception))
+
+    def testPackUbootDtb(self):
+        """Test that a device tree can be added to U-Boot"""
+        data = self._DoReadFile('026_pack_u_boot_dtb.dts')
+        self.assertEqual(U_BOOT_NODTB_DATA + U_BOOT_DTB_DATA, data)
+
+    def testPackX86RomNoSize(self):
+        """Test that the end-at-4gb property requires a size property"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('027_pack_4gb_no_size.dts')
+        self.assertIn("Section '/binman': Section size must be provided when "
+                      "using end-at-4gb", str(e.exception))
+
+    def test4gbAndSkipAtStartTogether(self):
+        """Test that the end-at-4gb and skip-at-size property can't be used
+        together"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('80_4gb_and_skip_at_start_together.dts')
+        self.assertIn("Section '/binman': Provide either 'end-at-4gb' or "
+                      "'skip-at-start'", str(e.exception))
+
+    def testPackX86RomOutside(self):
+        """Test that the end-at-4gb property checks for offset boundaries"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('028_pack_4gb_outside.dts')
+        self.assertIn("Node '/binman/u-boot': Offset 0x0 (0) is outside "
+                      "the section starting at 0xffffffe0 (4294967264)",
+                      str(e.exception))
+
+    def testPackX86Rom(self):
+        """Test that a basic x86 ROM can be created"""
+        self._SetupSplElf()
+        data = self._DoReadFile('029_x86-rom.dts')
+        self.assertEqual(U_BOOT_DATA + chr(0) * 7 + U_BOOT_SPL_DATA +
+                         chr(0) * 2, data)
+
+    def testPackX86RomMeNoDesc(self):
+        """Test that an invalid Intel descriptor entry is detected"""
+        TestFunctional._MakeInputFile('descriptor.bin', '')
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('031_x86-rom-me.dts')
+        self.assertIn("Node '/binman/intel-descriptor': Cannot find FD "
+                      "signature", str(e.exception))
+
+    def testPackX86RomBadDesc(self):
+        """Test that the Intel requires a descriptor entry"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('030_x86-rom-me-no-desc.dts')
+        self.assertIn("Node '/binman/intel-me': No offset set with "
+                      "offset-unset: should another entry provide this correct "
+                      "offset?", str(e.exception))
+
+    def testPackX86RomMe(self):
+        """Test that an x86 ROM with an ME region can be created"""
+        data = self._DoReadFile('031_x86-rom-me.dts')
+        self.assertEqual(ME_DATA, data[0x1000:0x1000 + len(ME_DATA)])
+
+    def testPackVga(self):
+        """Test that an image with a VGA binary can be created"""
+        data = self._DoReadFile('032_intel-vga.dts')
+        self.assertEqual(VGA_DATA, data[:len(VGA_DATA)])
+
+    def testPackStart16(self):
+        """Test that an image with an x86 start16 region can be created"""
+        data = self._DoReadFile('033_x86-start16.dts')
+        self.assertEqual(X86_START16_DATA, data[:len(X86_START16_DATA)])
+
+    def testPackPowerpcMpc85xxBootpgResetvec(self):
+        """Test that an image with powerpc-mpc85xx-bootpg-resetvec can be
+        created"""
+        data = self._DoReadFile('81_powerpc_mpc85xx_bootpg_resetvec.dts')
+        self.assertEqual(PPC_MPC85XX_BR_DATA, data[:len(PPC_MPC85XX_BR_DATA)])
+
+    def _RunMicrocodeTest(self, dts_fname, nodtb_data, ucode_second=False):
+        """Handle running a test for insertion of microcode
+
+        Args:
+            dts_fname: Name of test .dts file
+            nodtb_data: Data that we expect in the first section
+            ucode_second: True if the microsecond entry is second instead of
+                third
+
+        Returns:
+            Tuple:
+                Contents of first region (U-Boot or SPL)
+                Offset and size components of microcode pointer, as inserted
+                    in the above (two 4-byte words)
+        """
+        data = self._DoReadFile(dts_fname, True)
+
+        # Now check the device tree has no microcode
+        if ucode_second:
+            ucode_content = data[len(nodtb_data):]
+            ucode_pos = len(nodtb_data)
+            dtb_with_ucode = ucode_content[16:]
+            fdt_len = self.GetFdtLen(dtb_with_ucode)
+        else:
+            dtb_with_ucode = data[len(nodtb_data):]
+            fdt_len = self.GetFdtLen(dtb_with_ucode)
+            ucode_content = dtb_with_ucode[fdt_len:]
+            ucode_pos = len(nodtb_data) + fdt_len
+        fname = tools.GetOutputFilename('test.dtb')
+        with open(fname, 'wb') as fd:
+            fd.write(dtb_with_ucode)
+        dtb = fdt.FdtScan(fname)
+        ucode = dtb.GetNode('/microcode')
+        self.assertTrue(ucode)
+        for node in ucode.subnodes:
+            self.assertFalse(node.props.get('data'))
+
+        # Check that the microcode appears immediately after the Fdt
+        # This matches the concatenation of the data properties in
+        # the /microcode/update@xxx nodes in 34_x86_ucode.dts.
+        ucode_data = struct.pack('>4L', 0x12345678, 0x12345679, 0xabcd0000,
+                                 0x78235609)
+        self.assertEqual(ucode_data, ucode_content[:len(ucode_data)])
+
+        # Check that the microcode pointer was inserted. It should match the
+        # expected offset and size
+        pos_and_size = struct.pack('<2L', 0xfffffe00 + ucode_pos,
+                                   len(ucode_data))
+        u_boot = data[:len(nodtb_data)]
+        return u_boot, pos_and_size
+
+    def testPackUbootMicrocode(self):
+        """Test that x86 microcode can be handled correctly
+
+        We expect to see the following in the image, in order:
+            u-boot-nodtb.bin with a microcode pointer inserted at the correct
+                place
+            u-boot.dtb with the microcode removed
+            the microcode
+        """
+        first, pos_and_size = self._RunMicrocodeTest('034_x86_ucode.dts',
+                                                     U_BOOT_NODTB_DATA)
+        self.assertEqual('nodtb with microcode' + pos_and_size +
+                         ' somewhere in here', first)
+
+    def _RunPackUbootSingleMicrocode(self):
+        """Test that x86 microcode can be handled correctly
+
+        We expect to see the following in the image, in order:
+            u-boot-nodtb.bin with a microcode pointer inserted at the correct
+                place
+            u-boot.dtb with the microcode
+            an empty microcode region
+        """
+        # We need the libfdt library to run this test since only that allows
+        # finding the offset of a property. This is required by
+        # Entry_u_boot_dtb_with_ucode.ObtainContents().
+        data = self._DoReadFile('035_x86_single_ucode.dts', True)
+
+        second = data[len(U_BOOT_NODTB_DATA):]
+
+        fdt_len = self.GetFdtLen(second)
+        third = second[fdt_len:]
+        second = second[:fdt_len]
+
+        ucode_data = struct.pack('>2L', 0x12345678, 0x12345679)
+        self.assertIn(ucode_data, second)
+        ucode_pos = second.find(ucode_data) + len(U_BOOT_NODTB_DATA)
+
+        # Check that the microcode pointer was inserted. It should match the
+        # expected offset and size
+        pos_and_size = struct.pack('<2L', 0xfffffe00 + ucode_pos,
+                                   len(ucode_data))
+        first = data[:len(U_BOOT_NODTB_DATA)]
+        self.assertEqual('nodtb with microcode' + pos_and_size +
+                         ' somewhere in here', first)
+
+    def testPackUbootSingleMicrocode(self):
+        """Test that x86 microcode can be handled correctly with fdt_normal.
+        """
+        self._RunPackUbootSingleMicrocode()
+
+    def testUBootImg(self):
+        """Test that u-boot.img can be put in a file"""
+        data = self._DoReadFile('036_u_boot_img.dts')
+        self.assertEqual(U_BOOT_IMG_DATA, data)
+
+    def testNoMicrocode(self):
+        """Test that a missing microcode region is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('037_x86_no_ucode.dts', True)
+        self.assertIn("Node '/binman/u-boot-dtb-with-ucode': No /microcode "
+                      "node found in ", str(e.exception))
+
+    def testMicrocodeWithoutNode(self):
+        """Test that a missing u-boot-dtb-with-ucode node is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('038_x86_ucode_missing_node.dts', True)
+        self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot find "
+                "microcode region u-boot-dtb-with-ucode", str(e.exception))
+
+    def testMicrocodeWithoutNode2(self):
+        """Test that a missing u-boot-ucode node is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('039_x86_ucode_missing_node2.dts', True)
+        self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot find "
+            "microcode region u-boot-ucode", str(e.exception))
+
+    def testMicrocodeWithoutPtrInElf(self):
+        """Test that a U-Boot binary without the microcode symbol is detected"""
+        # ELF file without a '_dt_ucode_base_size' symbol
+        try:
+            with open(self.TestFile('u_boot_no_ucode_ptr')) as fd:
+                TestFunctional._MakeInputFile('u-boot', fd.read())
+
+            with self.assertRaises(ValueError) as e:
+                self._RunPackUbootSingleMicrocode()
+            self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot locate "
+                    "_dt_ucode_base_size symbol in u-boot", str(e.exception))
+
+        finally:
+            # Put the original file back
+            with open(self.TestFile('u_boot_ucode_ptr')) as fd:
+                TestFunctional._MakeInputFile('u-boot', fd.read())
+
+    def testMicrocodeNotInImage(self):
+        """Test that microcode must be placed within the image"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('040_x86_ucode_not_in_image.dts', True)
+        self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Microcode "
+                "pointer _dt_ucode_base_size at fffffe14 is outside the "
+                "section ranging from 00000000 to 0000002e", str(e.exception))
+
+    def testWithoutMicrocode(self):
+        """Test that we can cope with an image without microcode (e.g. qemu)"""
+        with open(self.TestFile('u_boot_no_ucode_ptr')) as fd:
+            TestFunctional._MakeInputFile('u-boot', fd.read())
+        data, dtb, _, _ = self._DoReadFileDtb('044_x86_optional_ucode.dts', True)
+
+        # Now check the device tree has no microcode
+        self.assertEqual(U_BOOT_NODTB_DATA, data[:len(U_BOOT_NODTB_DATA)])
+        second = data[len(U_BOOT_NODTB_DATA):]
+
+        fdt_len = self.GetFdtLen(second)
+        self.assertEqual(dtb, second[:fdt_len])
+
+        used_len = len(U_BOOT_NODTB_DATA) + fdt_len
+        third = data[used_len:]
+        self.assertEqual(chr(0) * (0x200 - used_len), third)
+
+    def testUnknownPosSize(self):
+        """Test that microcode must be placed within the image"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('041_unknown_pos_size.dts', True)
+        self.assertIn("Section '/binman': Unable to set offset/size for unknown "
+                "entry 'invalid-entry'", str(e.exception))
+
+    def testPackFsp(self):
+        """Test that an image with a FSP binary can be created"""
+        data = self._DoReadFile('042_intel-fsp.dts')
+        self.assertEqual(FSP_DATA, data[:len(FSP_DATA)])
+
+    def testPackCmc(self):
+        """Test that an image with a CMC binary can be created"""
+        data = self._DoReadFile('043_intel-cmc.dts')
+        self.assertEqual(CMC_DATA, data[:len(CMC_DATA)])
+
+    def testPackVbt(self):
+        """Test that an image with a VBT binary can be created"""
+        data = self._DoReadFile('046_intel-vbt.dts')
+        self.assertEqual(VBT_DATA, data[:len(VBT_DATA)])
+
+    def testSplBssPad(self):
+        """Test that we can pad SPL's BSS with zeros"""
+        # ELF file with a '__bss_size' symbol
+        self._SetupSplElf()
+        data = self._DoReadFile('047_spl_bss_pad.dts')
+        self.assertEqual(U_BOOT_SPL_DATA + (chr(0) * 10) + U_BOOT_DATA, data)
+
+    def testSplBssPadMissing(self):
+        """Test that a missing symbol is detected"""
+        self._SetupSplElf('u_boot_ucode_ptr')
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('047_spl_bss_pad.dts')
+        self.assertIn('Expected __bss_size symbol in spl/u-boot-spl',
+                      str(e.exception))
+
+    def testPackStart16Spl(self):
+        """Test that an image with an x86 start16 SPL region can be created"""
+        data = self._DoReadFile('048_x86-start16-spl.dts')
+        self.assertEqual(X86_START16_SPL_DATA, data[:len(X86_START16_SPL_DATA)])
+
+    def _PackUbootSplMicrocode(self, dts, ucode_second=False):
+        """Helper function for microcode tests
+
+        We expect to see the following in the image, in order:
+            u-boot-spl-nodtb.bin with a microcode pointer inserted at the
+                correct place
+            u-boot.dtb with the microcode removed
+            the microcode
+
+        Args:
+            dts: Device tree file to use for test
+            ucode_second: True if the microsecond entry is second instead of
+                third
+        """
+        self._SetupSplElf('u_boot_ucode_ptr')
+        first, pos_and_size = self._RunMicrocodeTest(dts, U_BOOT_SPL_NODTB_DATA,
+                                                     ucode_second=ucode_second)
+        self.assertEqual('splnodtb with microc' + pos_and_size +
+                         'ter somewhere in here', first)
+
+    def testPackUbootSplMicrocode(self):
+        """Test that x86 microcode can be handled correctly in SPL"""
+        self._PackUbootSplMicrocode('049_x86_ucode_spl.dts')
+
+    def testPackUbootSplMicrocodeReorder(self):
+        """Test that order doesn't matter for microcode entries
+
+        This is the same as testPackUbootSplMicrocode but when we process the
+        u-boot-ucode entry we have not yet seen the u-boot-dtb-with-ucode
+        entry, so we reply on binman to try later.
+        """
+        self._PackUbootSplMicrocode('058_x86_ucode_spl_needs_retry.dts',
+                                    ucode_second=True)
+
+    def testPackMrc(self):
+        """Test that an image with an MRC binary can be created"""
+        data = self._DoReadFile('050_intel_mrc.dts')
+        self.assertEqual(MRC_DATA, data[:len(MRC_DATA)])
+
+    def testSplDtb(self):
+        """Test that an image with spl/u-boot-spl.dtb can be created"""
+        data = self._DoReadFile('051_u_boot_spl_dtb.dts')
+        self.assertEqual(U_BOOT_SPL_DTB_DATA, data[:len(U_BOOT_SPL_DTB_DATA)])
+
+    def testSplNoDtb(self):
+        """Test that an image with spl/u-boot-spl-nodtb.bin can be created"""
+        data = self._DoReadFile('052_u_boot_spl_nodtb.dts')
+        self.assertEqual(U_BOOT_SPL_NODTB_DATA, data[:len(U_BOOT_SPL_NODTB_DATA)])
+
+    def testSymbols(self):
+        """Test binman can assign symbols embedded in U-Boot"""
+        elf_fname = self.TestFile('u_boot_binman_syms')
+        syms = elf.GetSymbols(elf_fname, ['binman', 'image'])
+        addr = elf.GetSymbolAddress(elf_fname, '__image_copy_start')
+        self.assertEqual(syms['_binman_u_boot_spl_prop_offset'].address, addr)
+
+        self._SetupSplElf('u_boot_binman_syms')
+        data = self._DoReadFile('053_symbols.dts')
+        sym_values = struct.pack('<LQL', 0x24 + 0, 0x24 + 24, 0x24 + 20)
+        expected = (sym_values + U_BOOT_SPL_DATA[16:] + chr(0xff) +
+                    U_BOOT_DATA +
+                    sym_values + U_BOOT_SPL_DATA[16:])
+        self.assertEqual(expected, data)
+
+    def testPackUnitAddress(self):
+        """Test that we support multiple binaries with the same name"""
+        data = self._DoReadFile('054_unit_address.dts')
+        self.assertEqual(U_BOOT_DATA + U_BOOT_DATA, data)
+
+    def testSections(self):
+        """Basic test of sections"""
+        data = self._DoReadFile('055_sections.dts')
+        expected = (U_BOOT_DATA + '!' * 12 + U_BOOT_DATA + 'a' * 12 +
+                    U_BOOT_DATA + '&' * 4)
+        self.assertEqual(expected, data)
+
+    def testMap(self):
+        """Tests outputting a map of the images"""
+        _, _, map_data, _ = self._DoReadFileDtb('055_sections.dts', map=True)
+        self.assertEqual('''ImagePos    Offset      Size  Name
+00000000  00000000  00000028  main-section
+00000000   00000000  00000010  section@0
+00000000    00000000  00000004  u-boot
+00000010   00000010  00000010  section@1
+00000010    00000000  00000004  u-boot
+00000020   00000020  00000004  section@2
+00000020    00000000  00000004  u-boot
+''', map_data)
+
+    def testNamePrefix(self):
+        """Tests that name prefixes are used"""
+        _, _, map_data, _ = self._DoReadFileDtb('056_name_prefix.dts', map=True)
+        self.assertEqual('''ImagePos    Offset      Size  Name
+00000000  00000000  00000028  main-section
+00000000   00000000  00000010  section@0
+00000000    00000000  00000004  ro-u-boot
+00000010   00000010  00000010  section@1
+00000010    00000000  00000004  rw-u-boot
+''', map_data)
+
+    def testUnknownContents(self):
+        """Test that obtaining the contents works as expected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('057_unknown_contents.dts', True)
+        self.assertIn("Section '/binman': Internal error: Could not complete "
+                "processing of contents: remaining [<_testing.Entry__testing ",
+                str(e.exception))
+
+    def testBadChangeSize(self):
+        """Test that trying to change the size of an entry fails"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('059_change_size.dts', True)
+        self.assertIn("Node '/binman/_testing': Cannot update entry size from "
+                      '2 to 1', str(e.exception))
+
+    def testUpdateFdt(self):
+        """Test that we can update the device tree with offset/size info"""
+        _, _, _, out_dtb_fname = self._DoReadFileDtb('060_fdt_update.dts',
+                                                     update_dtb=True)
+        dtb = fdt.Fdt(out_dtb_fname)
+        dtb.Scan()
+        props = self._GetPropTree(dtb, ['offset', 'size', 'image-pos'])
+        self.assertEqual({
+            'image-pos': 0,
+            'offset': 0,
+            '_testing:offset': 32,
+            '_testing:size': 1,
+            '_testing:image-pos': 32,
+            'section@0/u-boot:offset': 0,
+            'section@0/u-boot:size': len(U_BOOT_DATA),
+            'section@0/u-boot:image-pos': 0,
+            'section@0:offset': 0,
+            'section@0:size': 16,
+            'section@0:image-pos': 0,
+
+            'section@1/u-boot:offset': 0,
+            'section@1/u-boot:size': len(U_BOOT_DATA),
+            'section@1/u-boot:image-pos': 16,
+            'section@1:offset': 16,
+            'section@1:size': 16,
+            'section@1:image-pos': 16,
+            'size': 40
+        }, props)
+
+    def testUpdateFdtBad(self):
+        """Test that we detect when ProcessFdt never completes"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('061_fdt_update_bad.dts', update_dtb=True)
+        self.assertIn('Could not complete processing of Fdt: remaining '
+                      '[<_testing.Entry__testing', str(e.exception))
+
+    def testEntryArgs(self):
+        """Test passing arguments to entries from the command line"""
+        entry_args = {
+            'test-str-arg': 'test1',
+            'test-int-arg': '456',
+        }
+        self._DoReadFileDtb('062_entry_args.dts', entry_args=entry_args)
+        self.assertIn('image', control.images)
+        entry = control.images['image'].GetEntries()['_testing']
+        self.assertEqual('test0', entry.test_str_fdt)
+        self.assertEqual('test1', entry.test_str_arg)
+        self.assertEqual(123, entry.test_int_fdt)
+        self.assertEqual(456, entry.test_int_arg)
+
+    def testEntryArgsMissing(self):
+        """Test missing arguments and properties"""
+        entry_args = {
+            'test-int-arg': '456',
+        }
+        self._DoReadFileDtb('063_entry_args_missing.dts', entry_args=entry_args)
+        entry = control.images['image'].GetEntries()['_testing']
+        self.assertEqual('test0', entry.test_str_fdt)
+        self.assertEqual(None, entry.test_str_arg)
+        self.assertEqual(None, entry.test_int_fdt)
+        self.assertEqual(456, entry.test_int_arg)
+
+    def testEntryArgsRequired(self):
+        """Test missing arguments and properties"""
+        entry_args = {
+            'test-int-arg': '456',
+        }
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('064_entry_args_required.dts')
+        self.assertIn("Node '/binman/_testing': Missing required "
+            'properties/entry args: test-str-arg, test-int-fdt, test-int-arg',
+            str(e.exception))
+
+    def testEntryArgsInvalidFormat(self):
+        """Test that an invalid entry-argument format is detected"""
+        args = ['-d', self.TestFile('064_entry_args_required.dts'), '-ano-value']
+        with self.assertRaises(ValueError) as e:
+            self._DoBinman(*args)
+        self.assertIn("Invalid entry arguemnt 'no-value'", str(e.exception))
+
+    def testEntryArgsInvalidInteger(self):
+        """Test that an invalid entry-argument integer is detected"""
+        entry_args = {
+            'test-int-arg': 'abc',
+        }
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('062_entry_args.dts', entry_args=entry_args)
+        self.assertIn("Node '/binman/_testing': Cannot convert entry arg "
+                      "'test-int-arg' (value 'abc') to integer",
+            str(e.exception))
+
+    def testEntryArgsInvalidDatatype(self):
+        """Test that an invalid entry-argument datatype is detected
+
+        This test could be written in entry_test.py except that it needs
+        access to control.entry_args, which seems more than that module should
+        be able to see.
+        """
+        entry_args = {
+            'test-bad-datatype-arg': '12',
+        }
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('065_entry_args_unknown_datatype.dts',
+                                entry_args=entry_args)
+        self.assertIn('GetArg() internal error: Unknown data type ',
+                      str(e.exception))
+
+    def testText(self):
+        """Test for a text entry type"""
+        entry_args = {
+            'test-id': TEXT_DATA,
+            'test-id2': TEXT_DATA2,
+            'test-id3': TEXT_DATA3,
+        }
+        data, _, _, _ = self._DoReadFileDtb('066_text.dts',
+                                            entry_args=entry_args)
+        expected = (TEXT_DATA + chr(0) * (8 - len(TEXT_DATA)) + TEXT_DATA2 +
+                    TEXT_DATA3 + 'some text')
+        self.assertEqual(expected, data)
+
+    def testEntryDocs(self):
+        """Test for creation of entry documentation"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            control.WriteEntryDocs(binman.GetEntryModules())
+        self.assertTrue(len(stdout.getvalue()) > 0)
+
+    def testEntryDocsMissing(self):
+        """Test handling of missing entry documentation"""
+        with self.assertRaises(ValueError) as e:
+            with test_util.capture_sys_output() as (stdout, stderr):
+                control.WriteEntryDocs(binman.GetEntryModules(), 'u_boot')
+        self.assertIn('Documentation is missing for modules: u_boot',
+                      str(e.exception))
+
+    def testFmap(self):
+        """Basic test of generation of a flashrom fmap"""
+        data = self._DoReadFile('067_fmap.dts')
+        fhdr, fentries = fmap_util.DecodeFmap(data[32:])
+        expected = U_BOOT_DATA + '!' * 12 + U_BOOT_DATA + 'a' * 12
+        self.assertEqual(expected, data[:32])
+        self.assertEqual('__FMAP__', fhdr.signature)
+        self.assertEqual(1, fhdr.ver_major)
+        self.assertEqual(0, fhdr.ver_minor)
+        self.assertEqual(0, fhdr.base)
+        self.assertEqual(16 + 16 +
+                         fmap_util.FMAP_HEADER_LEN +
+                         fmap_util.FMAP_AREA_LEN * 3, fhdr.image_size)
+        self.assertEqual('FMAP', fhdr.name)
+        self.assertEqual(3, fhdr.nareas)
+        for fentry in fentries:
+            self.assertEqual(0, fentry.flags)
+
+        self.assertEqual(0, fentries[0].offset)
+        self.assertEqual(4, fentries[0].size)
+        self.assertEqual('RO_U_BOOT', fentries[0].name)
+
+        self.assertEqual(16, fentries[1].offset)
+        self.assertEqual(4, fentries[1].size)
+        self.assertEqual('RW_U_BOOT', fentries[1].name)
+
+        self.assertEqual(32, fentries[2].offset)
+        self.assertEqual(fmap_util.FMAP_HEADER_LEN +
+                         fmap_util.FMAP_AREA_LEN * 3, fentries[2].size)
+        self.assertEqual('FMAP', fentries[2].name)
+
+    def testBlobNamedByArg(self):
+        """Test we can add a blob with the filename coming from an entry arg"""
+        entry_args = {
+            'cros-ec-rw-path': 'ecrw.bin',
+        }
+        data, _, _, _ = self._DoReadFileDtb('068_blob_named_by_arg.dts',
+                                            entry_args=entry_args)
+
+    def testFill(self):
+        """Test for an fill entry type"""
+        data = self._DoReadFile('069_fill.dts')
+        expected = 8 * chr(0xff) + 8 * chr(0)
+        self.assertEqual(expected, data)
+
+    def testFillNoSize(self):
+        """Test for an fill entry type with no size"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('070_fill_no_size.dts')
+        self.assertIn("'fill' entry must have a size property",
+                      str(e.exception))
+
+    def _HandleGbbCommand(self, pipe_list):
+        """Fake calls to the futility utility"""
+        if pipe_list[0][0] == 'futility':
+            fname = pipe_list[0][-1]
+            # Append our GBB data to the file, which will happen every time the
+            # futility command is called.
+            with open(fname, 'a') as fd:
+                fd.write(GBB_DATA)
+            return command.CommandResult()
+
+    def testGbb(self):
+        """Test for the Chromium OS Google Binary Block"""
+        command.test_result = self._HandleGbbCommand
+        entry_args = {
+            'keydir': 'devkeys',
+            'bmpblk': 'bmpblk.bin',
+        }
+        data, _, _, _ = self._DoReadFileDtb('071_gbb.dts', entry_args=entry_args)
+
+        # Since futility
+        expected = GBB_DATA + GBB_DATA + 8 * chr(0) + (0x2180 - 16) * chr(0)
+        self.assertEqual(expected, data)
+
+    def testGbbTooSmall(self):
+        """Test for the Chromium OS Google Binary Block being large enough"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('072_gbb_too_small.dts')
+        self.assertIn("Node '/binman/gbb': GBB is too small",
+                      str(e.exception))
+
+    def testGbbNoSize(self):
+        """Test for the Chromium OS Google Binary Block having a size"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('073_gbb_no_size.dts')
+        self.assertIn("Node '/binman/gbb': GBB must have a fixed size",
+                      str(e.exception))
+
+    def _HandleVblockCommand(self, pipe_list):
+        """Fake calls to the futility utility"""
+        if pipe_list[0][0] == 'futility':
+            fname = pipe_list[0][3]
+            with open(fname, 'wb') as fd:
+                fd.write(VBLOCK_DATA)
+            return command.CommandResult()
+
+    def testVblock(self):
+        """Test for the Chromium OS Verified Boot Block"""
+        command.test_result = self._HandleVblockCommand
+        entry_args = {
+            'keydir': 'devkeys',
+        }
+        data, _, _, _ = self._DoReadFileDtb('074_vblock.dts',
+                                            entry_args=entry_args)
+        expected = U_BOOT_DATA + VBLOCK_DATA + U_BOOT_DTB_DATA
+        self.assertEqual(expected, data)
+
+    def testVblockNoContent(self):
+        """Test we detect a vblock which has no content to sign"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('075_vblock_no_content.dts')
+        self.assertIn("Node '/binman/vblock': Vblock must have a 'content' "
+                      'property', str(e.exception))
+
+    def testVblockBadPhandle(self):
+        """Test that we detect a vblock with an invalid phandle in contents"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('076_vblock_bad_phandle.dts')
+        self.assertIn("Node '/binman/vblock': Cannot find node for phandle "
+                      '1000', str(e.exception))
+
+    def testVblockBadEntry(self):
+        """Test that we detect an entry that points to a non-entry"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('077_vblock_bad_entry.dts')
+        self.assertIn("Node '/binman/vblock': Cannot find entry for node "
+                      "'other'", str(e.exception))
+
+    def testTpl(self):
+        """Test that an image with TPL and ots device tree can be created"""
+        # ELF file with a '__bss_size' symbol
+        with open(self.TestFile('bss_data')) as fd:
+            TestFunctional._MakeInputFile('tpl/u-boot-tpl', fd.read())
+        data = self._DoReadFile('078_u_boot_tpl.dts')
+        self.assertEqual(U_BOOT_TPL_DATA + U_BOOT_TPL_DTB_DATA, data)
+
+    def testUsesPos(self):
+        """Test that the 'pos' property cannot be used anymore"""
+        with self.assertRaises(ValueError) as e:
+           data = self._DoReadFile('079_uses_pos.dts')
+        self.assertIn("Node '/binman/u-boot': Please use 'offset' instead of "
+                      "'pos'", str(e.exception))
+
+    def testFillZero(self):
+        """Test for an fill entry type with a size of 0"""
+        data = self._DoReadFile('080_fill_empty.dts')
+        self.assertEqual(chr(0) * 16, data)
+
+    def testTextMissing(self):
+        """Test for a text entry type where there is no text"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('066_text.dts',)
+        self.assertIn("Node '/binman/text': No value provided for text label "
+                      "'test-id'", str(e.exception))
+
+    def testPackStart16Tpl(self):
+        """Test that an image with an x86 start16 TPL region can be created"""
+        data = self._DoReadFile('081_x86-start16-tpl.dts')
+        self.assertEqual(X86_START16_TPL_DATA, data[:len(X86_START16_TPL_DATA)])
+
+    def testSelectImage(self):
+        """Test that we can select which images to build"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            retcode = self._DoTestFile('006_dual_image.dts', images=['image2'])
+        self.assertEqual(0, retcode)
+        self.assertIn('Skipping images: image1', stdout.getvalue())
+
+        self.assertFalse(os.path.exists(tools.GetOutputFilename('image1.bin')))
+        self.assertTrue(os.path.exists(tools.GetOutputFilename('image2.bin')))
+
+    def testUpdateFdtAll(self):
+        """Test that all device trees are updated with offset/size info"""
+        data, _, _, _ = self._DoReadFileDtb('082_fdt_update_all.dts',
+                                            use_real_dtb=True, update_dtb=True)
+
+        base_expected = {
+            'section:image-pos': 0,
+            'u-boot-tpl-dtb:size': 513,
+            'u-boot-spl-dtb:size': 513,
+            'u-boot-spl-dtb:offset': 493,
+            'image-pos': 0,
+            'section/u-boot-dtb:image-pos': 0,
+            'u-boot-spl-dtb:image-pos': 493,
+            'section/u-boot-dtb:size': 493,
+            'u-boot-tpl-dtb:image-pos': 1006,
+            'section/u-boot-dtb:offset': 0,
+            'section:size': 493,
+            'offset': 0,
+            'section:offset': 0,
+            'u-boot-tpl-dtb:offset': 1006,
+            'size': 1519
+        }
+
+        # We expect three device-tree files in the output, one after the other.
+        # Read them in sequence. We look for an 'spl' property in the SPL tree,
+        # and 'tpl' in the TPL tree, to make sure they are distinct from the
+        # main U-Boot tree. All three should have the same postions and offset.
+        start = 0
+        for item in ['', 'spl', 'tpl']:
+            dtb = fdt.Fdt.FromData(data[start:])
+            dtb.Scan()
+            props = self._GetPropTree(dtb, ['offset', 'size', 'image-pos',
+                                            'spl', 'tpl'])
+            expected = dict(base_expected)
+            if item:
+                expected[item] = 0
+            self.assertEqual(expected, props)
+            start += dtb._fdt_obj.totalsize()
+
+    def testUpdateFdtOutput(self):
+        """Test that output DTB files are updated"""
+        try:
+            data, dtb_data, _, _ = self._DoReadFileDtb('082_fdt_update_all.dts',
+                    use_real_dtb=True, update_dtb=True, reset_dtbs=False)
+
+            # Unfortunately, compiling a source file always results in a file
+            # called source.dtb (see fdt_util.EnsureCompiled()). The test
+            # source file (e.g. test/075_fdt_update_all.dts) thus does not enter
+            # binman as a file called u-boot.dtb. To fix this, copy the file
+            # over to the expected place.
+            #tools.WriteFile(os.path.join(self._indir, 'u-boot.dtb'),
+                    #tools.ReadFile(tools.GetOutputFilename('source.dtb')))
+            start = 0
+            for fname in ['u-boot.dtb.out', 'spl/u-boot-spl.dtb.out',
+                          'tpl/u-boot-tpl.dtb.out']:
+                dtb = fdt.Fdt.FromData(data[start:])
+                size = dtb._fdt_obj.totalsize()
+                pathname = tools.GetOutputFilename(os.path.split(fname)[1])
+                outdata = tools.ReadFile(pathname)
+                name = os.path.split(fname)[0]
+
+                if name:
+                    orig_indata = self._GetDtbContentsForSplTpl(dtb_data, name)
+                else:
+                    orig_indata = dtb_data
+                self.assertNotEqual(outdata, orig_indata,
+                        "Expected output file '%s' be updated" % pathname)
+                self.assertEqual(outdata, data[start:start + size],
+                        "Expected output file '%s' to match output image" %
+                        pathname)
+                start += size
+        finally:
+            self._ResetDtbs()
+
+    def _decompress(self, data):
+        out = os.path.join(self._indir, 'lz4.tmp')
+        with open(out, 'wb') as fd:
+            fd.write(data)
+        return tools.Run('lz4', '-dc', out)
+        '''
+        try:
+            orig = lz4.frame.decompress(data)
+        except AttributeError:
+            orig = lz4.decompress(data)
+        '''
+
+    def testCompress(self):
+        """Test compression of blobs"""
+        data, _, _, out_dtb_fname = self._DoReadFileDtb('083_compress.dts',
+                                            use_real_dtb=True, update_dtb=True)
+        dtb = fdt.Fdt(out_dtb_fname)
+        dtb.Scan()
+        props = self._GetPropTree(dtb, ['size', 'uncomp-size'])
+        orig = self._decompress(data)
+        self.assertEquals(COMPRESS_DATA, orig)
+        expected = {
+            'blob:uncomp-size': len(COMPRESS_DATA),
+            'blob:size': len(data),
+            'size': len(data),
+            }
+        self.assertEqual(expected, props)
+
+    def testFiles(self):
+        """Test bringing in multiple files"""
+        data = self._DoReadFile('084_files.dts')
+        self.assertEqual(FILES_DATA, data)
+
+    def testFilesCompress(self):
+        """Test bringing in multiple files and compressing them"""
+        data = self._DoReadFile('085_files_compress.dts')
+
+        image = control.images['image']
+        entries = image.GetEntries()
+        files = entries['files']
+        entries = files._section._entries
+
+        orig = ''
+        for i in range(1, 3):
+            key = '%d.dat' % i
+            start = entries[key].image_pos
+            len = entries[key].size
+            chunk = data[start:start + len]
+            orig += self._decompress(chunk)
+
+        self.assertEqual(FILES_DATA, orig)
+
+    def testFilesMissing(self):
+        """Test missing files"""
+        with self.assertRaises(ValueError) as e:
+            data = self._DoReadFile('086_files_none.dts')
+        self.assertIn("Node '/binman/files': Pattern \'files/*.none\' matched "
+                      'no files', str(e.exception))
+
+    def testFilesNoPattern(self):
+        """Test missing files"""
+        with self.assertRaises(ValueError) as e:
+            data = self._DoReadFile('087_files_no_pattern.dts')
+        self.assertIn("Node '/binman/files': Missing 'pattern' property",
+                      str(e.exception))
+
+    def testExpandSize(self):
+        """Test an expanding entry"""
+        data, _, map_data, _ = self._DoReadFileDtb('088_expand_size.dts',
+                                                   map=True)
+        expect = ('a' * 8 + U_BOOT_DATA +
+                  MRC_DATA + 'b' * 1 + U_BOOT_DATA +
+                  'c' * 8 + U_BOOT_DATA +
+                  'd' * 8)
+        self.assertEqual(expect, data)
+        self.assertEqual('''ImagePos    Offset      Size  Name
+00000000  00000000  00000028  main-section
+00000000   00000000  00000008  fill
+00000008   00000008  00000004  u-boot
+0000000c   0000000c  00000004  section
+0000000c    00000000  00000003  intel-mrc
+00000010   00000010  00000004  u-boot2
+00000014   00000014  0000000c  section2
+00000014    00000000  00000008  fill
+0000001c    00000008  00000004  u-boot
+00000020   00000020  00000008  fill2
+''', map_data)
+
+    def testExpandSizeBad(self):
+        """Test an expanding entry which fails to provide contents"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            with self.assertRaises(ValueError) as e:
+                self._DoReadFileDtb('089_expand_size_bad.dts', map=True)
+        self.assertIn("Node '/binman/_testing': Cannot obtain contents when "
+                      'expanding entry', str(e.exception))
+
+    def testHash(self):
+        """Test hashing of the contents of an entry"""
+        _, _, _, out_dtb_fname = self._DoReadFileDtb('090_hash.dts',
+                use_real_dtb=True, update_dtb=True)
+        dtb = fdt.Fdt(out_dtb_fname)
+        dtb.Scan()
+        hash_node = dtb.GetNode('/binman/u-boot/hash').props['value']
+        m = hashlib.sha256()
+        m.update(U_BOOT_DATA)
+        self.assertEqual(m.digest(), ''.join(hash_node.value))
+
+    def testHashNoAlgo(self):
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('091_hash_no_algo.dts', update_dtb=True)
+        self.assertIn("Node \'/binman/u-boot\': Missing \'algo\' property for "
+                      'hash node', str(e.exception))
+
+    def testHashBadAlgo(self):
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFileDtb('092_hash_bad_algo.dts', update_dtb=True)
+        self.assertIn("Node '/binman/u-boot': Unknown hash algorithm",
+                      str(e.exception))
+
+    def testHashSection(self):
+        """Test hashing of the contents of an entry"""
+        _, _, _, out_dtb_fname = self._DoReadFileDtb('099_hash_section.dts',
+                use_real_dtb=True, update_dtb=True)
+        dtb = fdt.Fdt(out_dtb_fname)
+        dtb.Scan()
+        hash_node = dtb.GetNode('/binman/section/hash').props['value']
+        m = hashlib.sha256()
+        m.update(U_BOOT_DATA)
+        m.update(16 * 'a')
+        self.assertEqual(m.digest(), ''.join(hash_node.value))
+
+    def testPackUBootTplMicrocode(self):
+        """Test that x86 microcode can be handled correctly in TPL
+
+        We expect to see the following in the image, in order:
+            u-boot-tpl-nodtb.bin with a microcode pointer inserted at the correct
+                place
+            u-boot-tpl.dtb with the microcode removed
+            the microcode
+        """
+        with open(self.TestFile('u_boot_ucode_ptr')) as fd:
+            TestFunctional._MakeInputFile('tpl/u-boot-tpl', fd.read())
+        first, pos_and_size = self._RunMicrocodeTest('093_x86_tpl_ucode.dts',
+                                                     U_BOOT_TPL_NODTB_DATA)
+        self.assertEqual('tplnodtb with microc' + pos_and_size +
+                         'ter somewhere in here', first)
+
+    def testFmapX86(self):
+        """Basic test of generation of a flashrom fmap"""
+        data = self._DoReadFile('094_fmap_x86.dts')
+        fhdr, fentries = fmap_util.DecodeFmap(data[32:])
+        expected = U_BOOT_DATA + MRC_DATA + 'a' * (32 - 7)
+        self.assertEqual(expected, data[:32])
+        fhdr, fentries = fmap_util.DecodeFmap(data[32:])
+
+        self.assertEqual(0x100, fhdr.image_size)
+
+        self.assertEqual(0, fentries[0].offset)
+        self.assertEqual(4, fentries[0].size)
+        self.assertEqual('U_BOOT', fentries[0].name)
+
+        self.assertEqual(4, fentries[1].offset)
+        self.assertEqual(3, fentries[1].size)
+        self.assertEqual('INTEL_MRC', fentries[1].name)
+
+        self.assertEqual(32, fentries[2].offset)
+        self.assertEqual(fmap_util.FMAP_HEADER_LEN +
+                         fmap_util.FMAP_AREA_LEN * 3, fentries[2].size)
+        self.assertEqual('FMAP', fentries[2].name)
+
+    def testFmapX86Section(self):
+        """Basic test of generation of a flashrom fmap"""
+        data = self._DoReadFile('095_fmap_x86_section.dts')
+        expected = U_BOOT_DATA + MRC_DATA + 'b' * (32 - 7)
+        self.assertEqual(expected, data[:32])
+        fhdr, fentries = fmap_util.DecodeFmap(data[36:])
+
+        self.assertEqual(0x100, fhdr.image_size)
+
+        self.assertEqual(0, fentries[0].offset)
+        self.assertEqual(4, fentries[0].size)
+        self.assertEqual('U_BOOT', fentries[0].name)
+
+        self.assertEqual(4, fentries[1].offset)
+        self.assertEqual(3, fentries[1].size)
+        self.assertEqual('INTEL_MRC', fentries[1].name)
+
+        self.assertEqual(36, fentries[2].offset)
+        self.assertEqual(fmap_util.FMAP_HEADER_LEN +
+                         fmap_util.FMAP_AREA_LEN * 3, fentries[2].size)
+        self.assertEqual('FMAP', fentries[2].name)
+
+    def testElf(self):
+        """Basic test of ELF entries"""
+        self._SetupSplElf()
+        with open(self.TestFile('bss_data')) as fd:
+            TestFunctional._MakeInputFile('-boot', fd.read())
+        data = self._DoReadFile('096_elf.dts')
+
+    def testElfStripg(self):
+        """Basic test of ELF entries"""
+        self._SetupSplElf()
+        with open(self.TestFile('bss_data')) as fd:
+            TestFunctional._MakeInputFile('-boot', fd.read())
+        data = self._DoReadFile('097_elf_strip.dts')
+
+    def testPackOverlapMap(self):
+        """Test that overlapping regions are detected"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            with self.assertRaises(ValueError) as e:
+                self._DoTestFile('014_pack_overlap.dts', map=True)
+        map_fname = tools.GetOutputFilename('image.map')
+        self.assertEqual("Wrote map file '%s' to show errors\n" % map_fname,
+                         stdout.getvalue())
+
+        # We should not get an inmage, but there should be a map file
+        self.assertFalse(os.path.exists(tools.GetOutputFilename('image.bin')))
+        self.assertTrue(os.path.exists(map_fname))
+        map_data = tools.ReadFile(map_fname)
+        self.assertEqual('''ImagePos    Offset      Size  Name
+<none>    00000000  00000007  main-section
+<none>     00000000  00000004  u-boot
+<none>     00000003  00000004  u-boot-align
+''', map_data)
+
+    def testPacRefCode(self):
+        """Test that an image with an Intel Reference code binary works"""
+        data = self._DoReadFile('100_intel_refcode.dts')
+        self.assertEqual(REFCODE_DATA, data[:len(REFCODE_DATA)])
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tools/u-boot-tools/binman/image.py b/tools/u-boot-tools/binman/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..f237ae302df5382f23e162419d8af3eb65d2719f
--- /dev/null
+++ b/tools/u-boot-tools/binman/image.py
@@ -0,0 +1,153 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Class for an image, the output of binman
+#
+
+from __future__ import print_function
+
+from collections import OrderedDict
+from operator import attrgetter
+import re
+import sys
+
+import fdt_util
+import bsection
+import tools
+
+class Image:
+    """A Image, representing an output from binman
+
+    An image is comprised of a collection of entries each containing binary
+    data. The image size must be large enough to hold all of this data.
+
+    This class implements the various operations needed for images.
+
+    Atrtributes:
+        _node: Node object that contains the image definition in device tree
+        _name: Image name
+        _size: Image size in bytes, or None if not known yet
+        _filename: Output filename for image
+        _sections: Sections present in this image (may be one or more)
+
+    Args:
+        test: True if this is being called from a test of Images. This this case
+            there is no device tree defining the structure of the section, so
+            we create a section manually.
+    """
+    def __init__(self, name, node, test=False):
+        self._node = node
+        self._name = name
+        self._size = None
+        self._filename = '%s.bin' % self._name
+        if test:
+            self._section = bsection.Section('main-section', None, self._node,
+                                             self, True)
+        else:
+            self._ReadNode()
+
+    def _ReadNode(self):
+        """Read properties from the image node"""
+        self._size = fdt_util.GetInt(self._node, 'size')
+        filename = fdt_util.GetString(self._node, 'filename')
+        if filename:
+            self._filename = filename
+        self._section = bsection.Section('main-section', None, self._node, self)
+
+    def GetFdtSet(self):
+        """Get the set of device tree files used by this image"""
+        return self._section.GetFdtSet()
+
+    def ExpandEntries(self):
+        """Expand out any entries which have calculated sub-entries
+
+        Some entries are expanded out at runtime, e.g. 'files', which produces
+        a section containing a list of files. Process these entries so that
+        this information is added to the device tree.
+        """
+        self._section.ExpandEntries()
+
+    def AddMissingProperties(self):
+        """Add properties that are not present in the device tree
+
+        When binman has completed packing the entries the offset and size of
+        each entry are known. But before this the device tree may not specify
+        these. Add any missing properties, with a dummy value, so that the
+        size of the entry is correct. That way we can insert the correct values
+        later.
+        """
+        self._section.AddMissingProperties()
+
+    def ProcessFdt(self, fdt):
+        """Allow entries to adjust the device tree
+
+        Some entries need to adjust the device tree for their purposes. This
+        may involve adding or deleting properties.
+        """
+        return self._section.ProcessFdt(fdt)
+
+    def GetEntryContents(self):
+        """Call ObtainContents() for the section
+        """
+        self._section.GetEntryContents()
+
+    def GetEntryOffsets(self):
+        """Handle entries that want to set the offset/size of other entries
+
+        This calls each entry's GetOffsets() method. If it returns a list
+        of entries to update, it updates them.
+        """
+        self._section.GetEntryOffsets()
+
+    def PackEntries(self):
+        """Pack all entries into the image"""
+        self._section.PackEntries()
+
+    def CheckSize(self):
+        """Check that the image contents does not exceed its size, etc."""
+        self._size = self._section.CheckSize()
+
+    def CheckEntries(self):
+        """Check that entries do not overlap or extend outside the image"""
+        self._section.CheckEntries()
+
+    def SetCalculatedProperties(self):
+        self._section.SetCalculatedProperties()
+
+    def SetImagePos(self):
+        self._section.SetImagePos(0)
+
+    def ProcessEntryContents(self):
+        """Call the ProcessContents() method for each entry
+
+        This is intended to adjust the contents as needed by the entry type.
+        """
+        self._section.ProcessEntryContents()
+
+    def WriteSymbols(self):
+        """Write symbol values into binary files for access at run time"""
+        self._section.WriteSymbols()
+
+    def BuildImage(self):
+        """Write the image to a file"""
+        fname = tools.GetOutputFilename(self._filename)
+        with open(fname, 'wb') as fd:
+            self._section.BuildSection(fd, 0)
+
+    def GetEntries(self):
+        return self._section.GetEntries()
+
+    def WriteMap(self):
+        """Write a map of the image to a .map file
+
+        Returns:
+            Filename of map file written
+        """
+        filename = '%s.map' % self._name
+        fname = tools.GetOutputFilename(filename)
+        with open(fname, 'w') as fd:
+            print('%8s  %8s  %8s  %s' % ('ImagePos', 'Offset', 'Size', 'Name'),
+                  file=fd)
+            self._section.WriteMap(fd, 0)
+        return fname
diff --git a/tools/u-boot-tools/binman/image_test.py b/tools/u-boot-tools/binman/image_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3775e1afb074dc3c68c4a742f5848257d9f46a29
--- /dev/null
+++ b/tools/u-boot-tools/binman/image_test.py
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Test for the image module
+
+import unittest
+
+from image import Image
+from test_util import capture_sys_output
+
+class TestImage(unittest.TestCase):
+    def testInvalidFormat(self):
+        image = Image('name', 'node', test=True)
+        section = image._section
+        with self.assertRaises(ValueError) as e:
+            section.LookupSymbol('_binman_something_prop_', False, 'msg')
+        self.assertIn(
+            "msg: Symbol '_binman_something_prop_' has invalid format",
+            str(e.exception))
+
+    def testMissingSymbol(self):
+        image = Image('name', 'node', test=True)
+        section = image._section
+        section._entries = {}
+        with self.assertRaises(ValueError) as e:
+            section.LookupSymbol('_binman_type_prop_pname', False, 'msg')
+        self.assertIn("msg: Entry 'type' not found in list ()",
+                      str(e.exception))
+
+    def testMissingSymbolOptional(self):
+        image = Image('name', 'node', test=True)
+        section = image._section
+        section._entries = {}
+        with capture_sys_output() as (stdout, stderr):
+            val = section.LookupSymbol('_binman_type_prop_pname', True, 'msg')
+        self.assertEqual(val, None)
+        self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n",
+                         stderr.getvalue())
+        self.assertEqual('', stdout.getvalue())
+
+    def testBadProperty(self):
+        image = Image('name', 'node', test=True)
+        section = image._section
+        section._entries = {'u-boot': 1}
+        with self.assertRaises(ValueError) as e:
+            section.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg')
+        self.assertIn("msg: No such property 'bad", str(e.exception))
diff --git a/tools/u-boot-tools/binman/state.py b/tools/u-boot-tools/binman/state.py
new file mode 100644
index 0000000000000000000000000000000000000000..d945e4bf6576f2d316d81c1f9d0482c5fdaf5604
--- /dev/null
+++ b/tools/u-boot-tools/binman/state.py
@@ -0,0 +1,253 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Holds and modifies the state information held by binman
+#
+
+import hashlib
+import re
+from sets import Set
+
+import os
+import tools
+
+# Records the device-tree files known to binman, keyed by filename (e.g.
+# 'u-boot-spl.dtb')
+fdt_files = {}
+
+# Arguments passed to binman to provide arguments to entries
+entry_args = {}
+
+# True to use fake device-tree files for testing (see U_BOOT_DTB_DATA in
+# ftest.py)
+use_fake_dtb = False
+
+# Set of all device tree files references by images
+fdt_set = Set()
+
+# Same as above, but excluding the main one
+fdt_subset = Set()
+
+# The DTB which contains the full image information
+main_dtb = None
+
+def GetFdt(fname):
+    """Get the Fdt object for a particular device-tree filename
+
+    Binman keeps track of at least one device-tree file called u-boot.dtb but
+    can also have others (e.g. for SPL). This function looks up the given
+    filename and returns the associated Fdt object.
+
+    Args:
+        fname: Filename to look up (e.g. 'u-boot.dtb').
+
+    Returns:
+        Fdt object associated with the filename
+    """
+    return fdt_files[fname]
+
+def GetFdtPath(fname):
+    """Get the full pathname of a particular Fdt object
+
+    Similar to GetFdt() but returns the pathname associated with the Fdt.
+
+    Args:
+        fname: Filename to look up (e.g. 'u-boot.dtb').
+
+    Returns:
+        Full path name to the associated Fdt
+    """
+    return fdt_files[fname]._fname
+
+def GetFdtContents(fname):
+    """Looks up the FDT pathname and contents
+
+    This is used to obtain the Fdt pathname and contents when needed by an
+    entry. It supports a 'fake' dtb, allowing tests to substitute test data for
+    the real dtb.
+
+    Args:
+        fname: Filename to look up (e.g. 'u-boot.dtb').
+
+    Returns:
+        tuple:
+            pathname to Fdt
+            Fdt data (as bytes)
+    """
+    if fname in fdt_files and not use_fake_dtb:
+        pathname = GetFdtPath(fname)
+        data = GetFdt(fname).GetContents()
+    else:
+        pathname = tools.GetInputFilename(fname)
+        data = tools.ReadFile(pathname)
+    return pathname, data
+
+def SetEntryArgs(args):
+    """Set the value of the entry args
+
+    This sets up the entry_args dict which is used to supply entry arguments to
+    entries.
+
+    Args:
+        args: List of entry arguments, each in the format "name=value"
+    """
+    global entry_args
+
+    entry_args = {}
+    if args:
+        for arg in args:
+            m = re.match('([^=]*)=(.*)', arg)
+            if not m:
+                raise ValueError("Invalid entry arguemnt '%s'" % arg)
+            entry_args[m.group(1)] = m.group(2)
+
+def GetEntryArg(name):
+    """Get the value of an entry argument
+
+    Args:
+        name: Name of argument to retrieve
+
+    Returns:
+        String value of argument
+    """
+    return entry_args.get(name)
+
+def Prepare(images, dtb):
+    """Get device tree files ready for use
+
+    This sets up a set of device tree files that can be retrieved by GetFdts().
+    At present there is only one, that for U-Boot proper.
+
+    Args:
+        images: List of images being used
+        dtb: Main dtb
+    """
+    global fdt_set, fdt_subset, fdt_files, main_dtb
+    # Import these here in case libfdt.py is not available, in which case
+    # the above help option still works.
+    import fdt
+    import fdt_util
+
+    # If we are updating the DTBs we need to put these updated versions
+    # where Entry_blob_dtb can find them. We can ignore 'u-boot.dtb'
+    # since it is assumed to be the one passed in with options.dt, and
+    # was handled just above.
+    main_dtb = dtb
+    fdt_files.clear()
+    fdt_files['u-boot.dtb'] = dtb
+    fdt_subset = Set()
+    if not use_fake_dtb:
+        for image in images.values():
+            fdt_subset.update(image.GetFdtSet())
+        fdt_subset.discard('u-boot.dtb')
+        for other_fname in fdt_subset:
+            infile = tools.GetInputFilename(other_fname)
+            other_fname_dtb = fdt_util.EnsureCompiled(infile)
+            out_fname = tools.GetOutputFilename('%s.out' %
+                    os.path.split(other_fname)[1])
+            tools.WriteFile(out_fname, tools.ReadFile(other_fname_dtb))
+            other_dtb = fdt.FdtScan(out_fname)
+            fdt_files[other_fname] = other_dtb
+
+def GetFdts():
+    """Yield all device tree files being used by binman
+
+    Yields:
+        Device trees being used (U-Boot proper, SPL, TPL)
+    """
+    yield main_dtb
+    for other_fname in fdt_subset:
+        yield fdt_files[other_fname]
+
+def GetUpdateNodes(node):
+    """Yield all the nodes that need to be updated in all device trees
+
+    The property referenced by this node is added to any device trees which
+    have the given node. Due to removable of unwanted notes, SPL and TPL may
+    not have this node.
+
+    Args:
+        node: Node object in the main device tree to look up
+
+    Yields:
+        Node objects in each device tree that is in use (U-Boot proper, which
+            is node, SPL and TPL)
+    """
+    yield node
+    for dtb in fdt_files.values():
+        if dtb != node.GetFdt():
+            other_node = dtb.GetNode(node.path)
+            if other_node:
+                yield other_node
+
+def AddZeroProp(node, prop):
+    """Add a new property to affected device trees with an integer value of 0.
+
+    Args:
+        prop_name: Name of property
+    """
+    for n in GetUpdateNodes(node):
+        n.AddZeroProp(prop)
+
+def AddSubnode(node, name):
+    """Add a new subnode to a node in affected device trees
+
+    Args:
+        node: Node to add to
+        name: name of node to add
+
+    Returns:
+        New subnode that was created in main tree
+    """
+    first = None
+    for n in GetUpdateNodes(node):
+        subnode = n.AddSubnode(name)
+        if not first:
+            first = subnode
+    return first
+
+def AddString(node, prop, value):
+    """Add a new string property to affected device trees
+
+    Args:
+        prop_name: Name of property
+        value: String value (which will be \0-terminated in the DT)
+    """
+    for n in GetUpdateNodes(node):
+        n.AddString(prop, value)
+
+def SetInt(node, prop, value):
+    """Update an integer property in affected device trees with an integer value
+
+    This is not allowed to change the size of the FDT.
+
+    Args:
+        prop_name: Name of property
+    """
+    for n in GetUpdateNodes(node):
+        n.SetInt(prop, value)
+
+def CheckAddHashProp(node):
+    hash_node = node.FindNode('hash')
+    if hash_node:
+        algo = hash_node.props.get('algo')
+        if not algo:
+            return "Missing 'algo' property for hash node"
+        if algo.value == 'sha256':
+            size = 32
+        else:
+            return "Unknown hash algorithm '%s'" % algo
+        for n in GetUpdateNodes(hash_node):
+            n.AddEmptyProp('value', size)
+
+def CheckSetHashValue(node, get_data_func):
+    hash_node = node.FindNode('hash')
+    if hash_node:
+        algo = hash_node.props.get('algo').value
+        if algo == 'sha256':
+            m = hashlib.sha256()
+            m.update(get_data_func())
+            data = m.digest()
+        for n in GetUpdateNodes(hash_node):
+            n.SetData('value', data)
diff --git a/tools/u-boot-tools/binman/test/001_invalid.dts b/tools/u-boot-tools/binman/test/001_invalid.dts
new file mode 100644
index 0000000000000000000000000000000000000000..7d00455d7c1e1f581f1c6bdf99c31398d86078d0
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/001_invalid.dts
@@ -0,0 +1,5 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
diff --git a/tools/u-boot-tools/binman/test/002_missing_node.dts b/tools/u-boot-tools/binman/test/002_missing_node.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3a51ec2be58a5ccd435fcb0d02e7c26be8bdb889
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/002_missing_node.dts
@@ -0,0 +1,6 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+};
diff --git a/tools/u-boot-tools/binman/test/003_empty.dts b/tools/u-boot-tools/binman/test/003_empty.dts
new file mode 100644
index 0000000000000000000000000000000000000000..493c9a04c97eb6c2185f3534019d6b164f87be1c
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/003_empty.dts
@@ -0,0 +1,9 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/004_invalid_entry.dts b/tools/u-boot-tools/binman/test/004_invalid_entry.dts
new file mode 100644
index 0000000000000000000000000000000000000000..b043455bb575cc577b1d01c92db4b8c09adbba9a
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/004_invalid_entry.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		not-a-valid-type {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/005_simple.dts b/tools/u-boot-tools/binman/test/005_simple.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3771aa2261ce0f44d9d368be92a908dfaef50aeb
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/005_simple.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/006_dual_image.dts b/tools/u-boot-tools/binman/test/006_dual_image.dts
new file mode 100644
index 0000000000000000000000000000000000000000..78be16f164914fd74432b1c11e1d5935b1217767
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/006_dual_image.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		multiple-images;
+		image1 {
+			u-boot {
+			};
+		};
+
+		image2 {
+			pad-before = <3>;
+			pad-after = <5>;
+
+			u-boot {
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/007_bad_align.dts b/tools/u-boot-tools/binman/test/007_bad_align.dts
new file mode 100644
index 0000000000000000000000000000000000000000..123bb13558186ef9a4072504d2566686c5616cc7
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/007_bad_align.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			align = <23>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/008_pack.dts b/tools/u-boot-tools/binman/test/008_pack.dts
new file mode 100644
index 0000000000000000000000000000000000000000..a88785d8352f9c2fbf10263681d6873a6b55444f
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/008_pack.dts
@@ -0,0 +1,30 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+
+		u-boot-align {
+			type = "u-boot";
+			align = <16>;
+		};
+
+		u-boot-size {
+			type = "u-boot";
+			size = <23>;
+		};
+
+		u-boot-next {
+			type = "u-boot";
+		};
+
+		u-boot-fixed {
+			type = "u-boot";
+			offset = <61>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/009_pack_extra.dts b/tools/u-boot-tools/binman/test/009_pack_extra.dts
new file mode 100644
index 0000000000000000000000000000000000000000..0765707dea2da2bd96e1c26724062b7ea25f1ded
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/009_pack_extra.dts
@@ -0,0 +1,35 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			pad-before = <3>;
+			pad-after = <5>;
+		};
+
+		u-boot-align-size-nop {
+			type = "u-boot";
+			align-size = <4>;
+		};
+
+		u-boot-align-size {
+			type = "u-boot";
+			align = <16>;
+			align-size = <32>;
+		};
+
+		u-boot-align-end {
+			type = "u-boot";
+			align-end = <64>;
+		};
+
+		u-boot-align-both {
+			type = "u-boot";
+			align= <64>;
+			align-end = <128>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/010_pack_align_power2.dts b/tools/u-boot-tools/binman/test/010_pack_align_power2.dts
new file mode 100644
index 0000000000000000000000000000000000000000..8f6253a3d0f61f311702f3d6f695f53aca9cb5cb
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/010_pack_align_power2.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			align = <5>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/011_pack_align_size_power2.dts b/tools/u-boot-tools/binman/test/011_pack_align_size_power2.dts
new file mode 100644
index 0000000000000000000000000000000000000000..04f7672ea47158c0c2b1596de393249c3a6f5f50
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/011_pack_align_size_power2.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			align-size = <55>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/012_pack_inv_align.dts b/tools/u-boot-tools/binman/test/012_pack_inv_align.dts
new file mode 100644
index 0000000000000000000000000000000000000000..d8dd600edb8ba7997295cae09e81bbe1c32558b8
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/012_pack_inv_align.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			offset = <5>;
+			align = <4>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/013_pack_inv_size_align.dts b/tools/u-boot-tools/binman/test/013_pack_inv_size_align.dts
new file mode 100644
index 0000000000000000000000000000000000000000..dfafa134d7b4b1877ec9bddccd9711f206cf97dc
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/013_pack_inv_size_align.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			size = <5>;
+			align-size = <4>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/014_pack_overlap.dts b/tools/u-boot-tools/binman/test/014_pack_overlap.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3895cba3bdb7b1577f5536f366cadedbf430341d
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/014_pack_overlap.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+
+		u-boot-align {
+			type = "u-boot";
+			offset = <3>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/015_pack_overflow.dts b/tools/u-boot-tools/binman/test/015_pack_overflow.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6f654330afcdabcebb3a1fd68fd4ffedf499a79e
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/015_pack_overflow.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+			size = <3>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/016_pack_image_overflow.dts b/tools/u-boot-tools/binman/test/016_pack_image_overflow.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6ae66f3ac95f154882063ccda800d13d5cd03d65
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/016_pack_image_overflow.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <3>;
+
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/017_pack_image_size.dts b/tools/u-boot-tools/binman/test/017_pack_image_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..2360eb5d19a264d06fb50eeda3491bc7b64cfd7b
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/017_pack_image_size.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <7>;
+
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/018_pack_image_align.dts b/tools/u-boot-tools/binman/test/018_pack_image_align.dts
new file mode 100644
index 0000000000000000000000000000000000000000..16cd2a422ef3b2e4c0c01deb7501f8996a0df685
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/018_pack_image_align.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		align-size = <16>;
+
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/019_pack_inv_image_align.dts b/tools/u-boot-tools/binman/test/019_pack_inv_image_align.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e5ee87b88fb7ee2bee7c7db37b313b4b44ad4074
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/019_pack_inv_image_align.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <7>;
+		align-size = <8>;
+
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/020_pack_inv_image_align_power2.dts b/tools/u-boot-tools/binman/test/020_pack_inv_image_align_power2.dts
new file mode 100644
index 0000000000000000000000000000000000000000..a428c4be5204f5452ec40a167035ad163f14e3b4
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/020_pack_inv_image_align_power2.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		align-size = <131>;
+
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/021_image_pad.dts b/tools/u-boot-tools/binman/test/021_image_pad.dts
new file mode 100644
index 0000000000000000000000000000000000000000..c6516689d94c95e4e73b09a7c6ea5f7584cc9396
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/021_image_pad.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		pad-byte = <0xff>;
+		u-boot-spl {
+		};
+
+		u-boot {
+			offset = <20>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/022_image_name.dts b/tools/u-boot-tools/binman/test/022_image_name.dts
new file mode 100644
index 0000000000000000000000000000000000000000..94fc069c1764562d712819cea3f678a6a2771945
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/022_image_name.dts
@@ -0,0 +1,21 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		multiple-images;
+		image1 {
+			filename = "test-name";
+			u-boot {
+			};
+		};
+
+		image2 {
+			filename = "test-name.xx";
+			u-boot {
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/023_blob.dts b/tools/u-boot-tools/binman/test/023_blob.dts
new file mode 100644
index 0000000000000000000000000000000000000000..7dcff69666af3fcfda065bb8525ff5ef07542c27
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/023_blob.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		blob {
+			filename = "blobfile";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/024_sorted.dts b/tools/u-boot-tools/binman/test/024_sorted.dts
new file mode 100644
index 0000000000000000000000000000000000000000..d35d39f077d9825f3e03fc836512eedc7c8a4019
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/024_sorted.dts
@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		u-boot {
+			offset = <22>;
+		};
+
+		u-boot-spl {
+			offset = <1>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/025_pack_zero_size.dts b/tools/u-boot-tools/binman/test/025_pack_zero_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e863c44e3fde5ca41709f37f7a9b0aac9440deaf
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/025_pack_zero_size.dts
@@ -0,0 +1,15 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+
+		u-boot-spl {
+			offset = <0>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/026_pack_u_boot_dtb.dts b/tools/u-boot-tools/binman/test/026_pack_u_boot_dtb.dts
new file mode 100644
index 0000000000000000000000000000000000000000..2707a7347a43906193b7773f7ef4d015e6b01e56
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/026_pack_u_boot_dtb.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot-nodtb {
+		};
+
+		u-boot-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/027_pack_4gb_no_size.dts b/tools/u-boot-tools/binman/test/027_pack_4gb_no_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..371cca10d587126bd0ce424eace0c4b5d9c69e72
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/027_pack_4gb_no_size.dts
@@ -0,0 +1,18 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		u-boot {
+			offset = <0xfffffff0>;
+		};
+
+		u-boot-spl {
+			offset = <0xfffffff7>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/028_pack_4gb_outside.dts b/tools/u-boot-tools/binman/test/028_pack_4gb_outside.dts
new file mode 100644
index 0000000000000000000000000000000000000000..2216abfb70c1de8a16c297b11a8a99cb6ad01a14
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/028_pack_4gb_outside.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <32>;
+		u-boot {
+			offset = <0>;
+		};
+
+		u-boot-spl {
+			offset = <0xffffffeb>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/029_x86-rom.dts b/tools/u-boot-tools/binman/test/029_x86-rom.dts
new file mode 100644
index 0000000000000000000000000000000000000000..d5c69f9d4a9582608ec69b25e9c66fc8d06c0089
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/029_x86-rom.dts
@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <32>;
+		u-boot {
+			offset = <0xffffffe0>;
+		};
+
+		u-boot-spl {
+			offset = <0xffffffeb>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/030_x86-rom-me-no-desc.dts b/tools/u-boot-tools/binman/test/030_x86-rom-me-no-desc.dts
new file mode 100644
index 0000000000000000000000000000000000000000..796cb87afc7020430321a2e245d4cc95d5aaf155
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/030_x86-rom-me-no-desc.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <16>;
+		intel-me {
+			filename = "me.bin";
+			offset-unset;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/031_x86-rom-me.dts b/tools/u-boot-tools/binman/test/031_x86-rom-me.dts
new file mode 100644
index 0000000000000000000000000000000000000000..b8b0a5a74bb18b5c262b1d14ac35581f6b06a046
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/031_x86-rom-me.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x800000>;
+		intel-descriptor {
+			filename = "descriptor.bin";
+		};
+
+		intel-me {
+			filename = "me.bin";
+			offset-unset;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/032_intel-vga.dts b/tools/u-boot-tools/binman/test/032_intel-vga.dts
new file mode 100644
index 0000000000000000000000000000000000000000..9c532d03d3cdc9e712e9d0e117c6ec944100daa2
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/032_intel-vga.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		intel-vga {
+			filename = "vga.bin";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/033_x86-start16.dts b/tools/u-boot-tools/binman/test/033_x86-start16.dts
new file mode 100644
index 0000000000000000000000000000000000000000..2e279dee9d63a5e1fb1652a3d2a38aed1dbf6b03
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/033_x86-start16.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		x86-start16 {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/034_x86_ucode.dts b/tools/u-boot-tools/binman/test/034_x86_ucode.dts
new file mode 100644
index 0000000000000000000000000000000000000000..40725731cd3948e304feb6578b1c1e59b4f1dc7e
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/034_x86_ucode.dts
@@ -0,0 +1,29 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/035_x86_single_ucode.dts b/tools/u-boot-tools/binman/test/035_x86_single_ucode.dts
new file mode 100644
index 0000000000000000000000000000000000000000..2b1f086a41c1b7118440f2725fc32dff3d69b183
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/035_x86_single_ucode.dts
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/036_u_boot_img.dts b/tools/u-boot-tools/binman/test/036_u_boot_img.dts
new file mode 100644
index 0000000000000000000000000000000000000000..aa5a3fe4810eefca69a0ce6dd73dd24078380b63
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/036_u_boot_img.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot-img {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/037_x86_no_ucode.dts b/tools/u-boot-tools/binman/test/037_x86_no_ucode.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6da49c3da6dacc600fc332e11df532eab6c642f1
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/037_x86_no_ucode.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/038_x86_ucode_missing_node.dts b/tools/u-boot-tools/binman/test/038_x86_ucode_missing_node.dts
new file mode 100644
index 0000000000000000000000000000000000000000..720677c9c1e9f7a44e40c570bf42a99be6abfc03
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/038_x86_ucode_missing_node.dts
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/039_x86_ucode_missing_node2.dts b/tools/u-boot-tools/binman/test/039_x86_ucode_missing_node2.dts
new file mode 100644
index 0000000000000000000000000000000000000000..10ac086d5493b3593d8989d2c53f27459693f9d1
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/039_x86_ucode_missing_node2.dts
@@ -0,0 +1,23 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/040_x86_ucode_not_in_image.dts b/tools/u-boot-tools/binman/test/040_x86_ucode_not_in_image.dts
new file mode 100644
index 0000000000000000000000000000000000000000..609725824a528c0080956ce0fd38bbedffb80335
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/040_x86_ucode_not_in_image.dts
@@ -0,0 +1,28 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/041_unknown_pos_size.dts b/tools/u-boot-tools/binman/test/041_unknown_pos_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..94fe821c470b3216c98e15c64308342474ddf8e3
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/041_unknown_pos_size.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			return-invalid-entry;
+		};
+        };
+};
diff --git a/tools/u-boot-tools/binman/test/042_intel-fsp.dts b/tools/u-boot-tools/binman/test/042_intel-fsp.dts
new file mode 100644
index 0000000000000000000000000000000000000000..8a7c889251b993e9d7f4d9d81e468cb89b70d1e4
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/042_intel-fsp.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		intel-fsp {
+			filename = "fsp.bin";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/043_intel-cmc.dts b/tools/u-boot-tools/binman/test/043_intel-cmc.dts
new file mode 100644
index 0000000000000000000000000000000000000000..5a56c7d881ae499c22cf445185c8e831049af794
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/043_intel-cmc.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		intel-cmc {
+			filename = "cmc.bin";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/044_x86_optional_ucode.dts b/tools/u-boot-tools/binman/test/044_x86_optional_ucode.dts
new file mode 100644
index 0000000000000000000000000000000000000000..24a7040d318365e2dd84af95706cdb8ca14f0253
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/044_x86_optional_ucode.dts
@@ -0,0 +1,30 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-with-ucode-ptr {
+			optional-ucode;
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/045_prop_test.dts b/tools/u-boot-tools/binman/test/045_prop_test.dts
new file mode 100644
index 0000000000000000000000000000000000000000..064de2b3167c52127a7bcfabb36956af957b5b9b
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/045_prop_test.dts
@@ -0,0 +1,23 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <16>;
+		intel-me {
+			filename = "me.bin";
+			offset-unset;
+			intval = <3>;
+			intarray = <5 6>;
+			byteval = [08];
+			bytearray = [01 23 34];
+			longbytearray = [09 0a 0b 0c];
+			stringval = "message2";
+			stringarray = "another", "multi-word", "message";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/046_intel-vbt.dts b/tools/u-boot-tools/binman/test/046_intel-vbt.dts
new file mode 100644
index 0000000000000000000000000000000000000000..733f5751d5a4d4cc03048b1dc5a5a74900f802d5
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/046_intel-vbt.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		intel-vbt {
+			filename = "vbt.bin";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/047_spl_bss_pad.dts b/tools/u-boot-tools/binman/test/047_spl_bss_pad.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6bd88b83f985a86f009aa42ff2f6b354e68628d6
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/047_spl_bss_pad.dts
@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot-spl {
+		};
+
+		u-boot-spl-bss-pad {
+		};
+
+		u-boot {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/048_x86-start16-spl.dts b/tools/u-boot-tools/binman/test/048_x86-start16-spl.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e2009f15f059ec4e721eccd1c9854fa92dcd7ebf
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/048_x86-start16-spl.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		x86-start16-spl {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/049_x86_ucode_spl.dts b/tools/u-boot-tools/binman/test/049_x86_ucode_spl.dts
new file mode 100644
index 0000000000000000000000000000000000000000..350d2c4730bb43f51915a99eddfd3c10736fcdd2
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/049_x86_ucode_spl.dts
@@ -0,0 +1,29 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-spl-with-ucode-ptr {
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/050_intel_mrc.dts b/tools/u-boot-tools/binman/test/050_intel_mrc.dts
new file mode 100644
index 0000000000000000000000000000000000000000..54cd52a2b711dbda69b9679943cfc71700692110
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/050_intel_mrc.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		intel-mrc {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/051_u_boot_spl_dtb.dts b/tools/u-boot-tools/binman/test/051_u_boot_spl_dtb.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3912f86b4cd7f962e477201ec81dbbe0ea9047df
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/051_u_boot_spl_dtb.dts
@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		u-boot-spl-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/052_u_boot_spl_nodtb.dts b/tools/u-boot-tools/binman/test/052_u_boot_spl_nodtb.dts
new file mode 100644
index 0000000000000000000000000000000000000000..7f4e27780fe4250a6c1efb3db91648c2bcbce273
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/052_u_boot_spl_nodtb.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot-spl-nodtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/053_symbols.dts b/tools/u-boot-tools/binman/test/053_symbols.dts
new file mode 100644
index 0000000000000000000000000000000000000000..9f135676cb0743219a6598b44b43b89e6a3b7ad1
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/053_symbols.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		pad-byte = <0xff>;
+		u-boot-spl {
+		};
+
+		u-boot {
+			offset = <20>;
+		};
+
+		u-boot-spl2 {
+			type = "u-boot-spl";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/054_unit_address.dts b/tools/u-boot-tools/binman/test/054_unit_address.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3216dbbcc19595659d209243b4285727118e8b2f
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/054_unit_address.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot@0 {
+		};
+		u-boot@1 {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/055_sections.dts b/tools/u-boot-tools/binman/test/055_sections.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6b306aeda4621c7d60038783376a4e173d07c03c
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/055_sections.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		pad-byte = <0x26>;
+		size = <0x28>;
+		section@0 {
+			read-only;
+			size = <0x10>;
+			pad-byte = <0x21>;
+
+			u-boot {
+			};
+		};
+		section@1 {
+			size = <0x10>;
+			pad-byte = <0x61>;
+
+			u-boot {
+			};
+		};
+		section@2 {
+			u-boot {
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/056_name_prefix.dts b/tools/u-boot-tools/binman/test/056_name_prefix.dts
new file mode 100644
index 0000000000000000000000000000000000000000..f38c80eb1831e1d21b8bc6e8d20e62c7b556ed3b
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/056_name_prefix.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		pad-byte = <0x26>;
+		size = <0x28>;
+		section@0 {
+			read-only;
+			name-prefix = "ro-";
+			size = <0x10>;
+			pad-byte = <0x21>;
+
+			u-boot {
+			};
+		};
+		section@1 {
+			name-prefix = "rw-";
+			size = <0x10>;
+			pad-byte = <0x61>;
+
+			u-boot {
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/057_unknown_contents.dts b/tools/u-boot-tools/binman/test/057_unknown_contents.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6ea98d7cab6f0345e8b69bcacb3f97267aed4b16
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/057_unknown_contents.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			return-unknown-contents;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/058_x86_ucode_spl_needs_retry.dts b/tools/u-boot-tools/binman/test/058_x86_ucode_spl_needs_retry.dts
new file mode 100644
index 0000000000000000000000000000000000000000..a04adaaf7ba36cf3e504d52cd7e048f848f9c86d
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/058_x86_ucode_spl_needs_retry.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-spl-with-ucode-ptr {
+		};
+
+		/*
+		 * Microcode goes before the DTB which contains it, so binman
+		 * will need to obtain the contents of the next section before
+		 * obtaining the contents of this one.
+		 */
+		u-boot-ucode {
+		};
+
+		u-boot-dtb-with-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/059_change_size.dts b/tools/u-boot-tools/binman/test/059_change_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..1a69026a64cd41111853136012b77d3d0ad9b08a
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/059_change_size.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			bad-update-contents;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/060_fdt_update.dts b/tools/u-boot-tools/binman/test/060_fdt_update.dts
new file mode 100644
index 0000000000000000000000000000000000000000..f53c8a5053e4b8eaf5b0b0849f05bb0912e0fe9d
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/060_fdt_update.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		pad-byte = <0x26>;
+		size = <0x28>;
+		section@0 {
+			read-only;
+			name-prefix = "ro-";
+			size = <0x10>;
+			pad-byte = <0x21>;
+
+			u-boot {
+			};
+		};
+		section@1 {
+			name-prefix = "rw-";
+			size = <0x10>;
+			pad-byte = <0x61>;
+
+			u-boot {
+			};
+		};
+		_testing {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/061_fdt_update_bad.dts b/tools/u-boot-tools/binman/test/061_fdt_update_bad.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e5abf31699c3c2f3ceb7125495ea1f52f1cab37d
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/061_fdt_update_bad.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		pad-byte = <0x26>;
+		size = <0x28>;
+		section@0 {
+			read-only;
+			name-prefix = "ro-";
+			size = <0x10>;
+			pad-byte = <0x21>;
+
+			u-boot {
+			};
+		};
+		section@1 {
+			name-prefix = "rw-";
+			size = <0x10>;
+			pad-byte = <0x61>;
+
+			u-boot {
+			};
+		};
+		_testing {
+			never-complete-process-fdt;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/062_entry_args.dts b/tools/u-boot-tools/binman/test/062_entry_args.dts
new file mode 100644
index 0000000000000000000000000000000000000000..4d4f102d60c52bafc7f8849a983d36723b99ca14
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/062_entry_args.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			test-str-fdt = "test0";
+			test-int-fdt = <123>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/063_entry_args_missing.dts b/tools/u-boot-tools/binman/test/063_entry_args_missing.dts
new file mode 100644
index 0000000000000000000000000000000000000000..1644e2fef3a0c7132e4b4a9c210e9e6fcc880e8d
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/063_entry_args_missing.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			test-str-fdt = "test0";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/064_entry_args_required.dts b/tools/u-boot-tools/binman/test/064_entry_args_required.dts
new file mode 100644
index 0000000000000000000000000000000000000000..705be100691529ffab00d8feac865dde7ec1afde
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/064_entry_args_required.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			require-args;
+			test-str-fdt = "test0";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/065_entry_args_unknown_datatype.dts b/tools/u-boot-tools/binman/test/065_entry_args_unknown_datatype.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3e4838f4fff29171eb088a9006aac9e5ce5ec087
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/065_entry_args_unknown_datatype.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			test-str-fdt = "test0";
+			test-int-fdt = <123>;
+			force-bad-datatype;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/066_text.dts b/tools/u-boot-tools/binman/test/066_text.dts
new file mode 100644
index 0000000000000000000000000000000000000000..59b1fed0ef8320f0d950d1bb5237d8666316efc7
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/066_text.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		text {
+			size = <8>;
+			text-label = "test-id";
+		};
+		text2 {
+			type = "text";
+			text-label = "test-id2";
+		};
+		text3 {
+			type = "text";
+			text-label = "test-id3";
+		};
+		/* This one does not use command-line args */
+		text4 {
+			type = "text";
+			text-label = "test-id4";
+			test-id4 = "some text";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/067_fmap.dts b/tools/u-boot-tools/binman/test/067_fmap.dts
new file mode 100644
index 0000000000000000000000000000000000000000..9c0e293ac83179fdf3217177afc47ceef4beb305
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/067_fmap.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		section@0 {
+			read-only;
+			name-prefix = "ro-";
+			size = <0x10>;
+			pad-byte = <0x21>;
+
+			u-boot {
+			};
+		};
+		section@1 {
+			name-prefix = "rw-";
+			size = <0x10>;
+			pad-byte = <0x61>;
+
+			u-boot {
+			};
+		};
+		fmap {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/068_blob_named_by_arg.dts b/tools/u-boot-tools/binman/test/068_blob_named_by_arg.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e129f843cd5d388a17dddb5f1b226e6695afb371
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/068_blob_named_by_arg.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cros-ec-rw {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/069_fill.dts b/tools/u-boot-tools/binman/test/069_fill.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e372ea37aaa20290efbfdad2b710affb8d83f4b5
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/069_fill.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+		fill {
+			size = <8>;
+			fill-byte = [ff];
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/070_fill_no_size.dts b/tools/u-boot-tools/binman/test/070_fill_no_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..7b1fcf1b68bdf8a8e21a7b4a6552098aa580963a
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/070_fill_no_size.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+		fill {
+			fill-byte = [ff];
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/071_gbb.dts b/tools/u-boot-tools/binman/test/071_gbb.dts
new file mode 100644
index 0000000000000000000000000000000000000000..551756372af1ca12a7f0c992d29ff9d9ce940ab1
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/071_gbb.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		gbb {
+			size = <0x2180>;
+			flags {
+				dev-screen-short-delay;
+				load-option-roms;
+				enable-alternate-os;
+				force-dev-switch-on;
+				force-dev-boot-usb;
+				disable-fw-rollback-check;
+				enter-triggers-tonorm;
+				force-dev-boot-legacy;
+				faft-key-override;
+				disable-ec-software-sync;
+				default-dev-boot-legacy;
+				disable-pd-software-sync;
+				disable-lid-shutdown;
+				force-dev-boot-fastboot-full-cap;
+				enable-serial;
+				disable-dwmp;
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/072_gbb_too_small.dts b/tools/u-boot-tools/binman/test/072_gbb_too_small.dts
new file mode 100644
index 0000000000000000000000000000000000000000..c088f36a1d0dbc440c3a344bdd7f9abac7d4d78e
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/072_gbb_too_small.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		gbb {
+			size = <0x200>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/073_gbb_no_size.dts b/tools/u-boot-tools/binman/test/073_gbb_no_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..83be40378525ac50a52c2dda515cf9008e7ee079
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/073_gbb_no_size.dts
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		gbb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/074_vblock.dts b/tools/u-boot-tools/binman/test/074_vblock.dts
new file mode 100644
index 0000000000000000000000000000000000000000..f0c21bfe9fc4bc7d3129721e0315f3d0de18309b
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/074_vblock.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u_boot: u-boot {
+		};
+
+		vblock {
+			content = <&u_boot &dtb>;
+			keyblock = "firmware.keyblock";
+			signprivate = "firmware_data_key.vbprivk";
+			version = <1>;
+			kernelkey = "kernel_subkey.vbpubk";
+			preamble-flags = <1>;
+		};
+
+		/*
+		 * Put this after the vblock so that its contents are not
+		 * available when the vblock first tries to obtain its contents
+		 */
+		dtb: u-boot-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/075_vblock_no_content.dts b/tools/u-boot-tools/binman/test/075_vblock_no_content.dts
new file mode 100644
index 0000000000000000000000000000000000000000..676d9474b3102c9b4728714908a26892e0d28856
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/075_vblock_no_content.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u_boot: u-boot {
+		};
+
+		vblock {
+			keyblock = "firmware.keyblock";
+			signprivate = "firmware_data_key.vbprivk";
+			version = <1>;
+			kernelkey = "kernel_subkey.vbpubk";
+			preamble-flags = <1>;
+		};
+
+		dtb: u-boot-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/076_vblock_bad_phandle.dts b/tools/u-boot-tools/binman/test/076_vblock_bad_phandle.dts
new file mode 100644
index 0000000000000000000000000000000000000000..ffbd0c335c3c2a4db72739ee903fda30f98be713
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/076_vblock_bad_phandle.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u_boot: u-boot {
+		};
+
+		vblock {
+			content = <1000>;
+			keyblock = "firmware.keyblock";
+			signprivate = "firmware_data_key.vbprivk";
+			version = <1>;
+			kernelkey = "kernel_subkey.vbpubk";
+			preamble-flags = <1>;
+		};
+
+		dtb: u-boot-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/077_vblock_bad_entry.dts b/tools/u-boot-tools/binman/test/077_vblock_bad_entry.dts
new file mode 100644
index 0000000000000000000000000000000000000000..764c42a56e14f938a826f5239e9992236d7785f3
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/077_vblock_bad_entry.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u_boot: u-boot {
+		};
+
+		vblock {
+			content = <&u_boot &other>;
+			keyblock = "firmware.keyblock";
+			signprivate = "firmware_data_key.vbprivk";
+			version = <1>;
+			kernelkey = "kernel_subkey.vbpubk";
+			preamble-flags = <1>;
+		};
+
+		dtb: u-boot-dtb {
+		};
+	};
+
+	other: other {
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/078_u_boot_tpl.dts b/tools/u-boot-tools/binman/test/078_u_boot_tpl.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6c60b4c46f44ad9ceb25faf5b3c342f93ea8af18
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/078_u_boot_tpl.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		u-boot-tpl {
+		};
+		u-boot-tpl-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/079_uses_pos.dts b/tools/u-boot-tools/binman/test/079_uses_pos.dts
new file mode 100644
index 0000000000000000000000000000000000000000..7638b9b5e0cabcec8c1c691b2a5f3bd5922ab729
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/079_uses_pos.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		u-boot {
+			pos = <10>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/080_fill_empty.dts b/tools/u-boot-tools/binman/test/080_fill_empty.dts
new file mode 100644
index 0000000000000000000000000000000000000000..2b78d3ae88deaf29db0e5fb088f352f1102dd8b2
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/080_fill_empty.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+		fill {
+			size = <0>;
+			fill-byte = [ff];
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/081_x86-start16-tpl.dts b/tools/u-boot-tools/binman/test/081_x86-start16-tpl.dts
new file mode 100644
index 0000000000000000000000000000000000000000..68e6bbd68f0c66e978f6f3c5657a1d1f2abddb30
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/081_x86-start16-tpl.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		x86-start16-tpl {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/082_fdt_update_all.dts b/tools/u-boot-tools/binman/test/082_fdt_update_all.dts
new file mode 100644
index 0000000000000000000000000000000000000000..284975cc2899bfc62ef116bc1a10e8149a57dc26
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/082_fdt_update_all.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		section {
+			u-boot-dtb {
+			};
+		};
+		u-boot-spl-dtb {
+		};
+		u-boot-tpl-dtb {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/083_compress.dts b/tools/u-boot-tools/binman/test/083_compress.dts
new file mode 100644
index 0000000000000000000000000000000000000000..07813bdeaa3164cc8c8b25a44338df60ae7db07c
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/083_compress.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		blob {
+			filename = "compress";
+			compress = "lz4";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/084_files.dts b/tools/u-boot-tools/binman/test/084_files.dts
new file mode 100644
index 0000000000000000000000000000000000000000..83ddb78f8e7b48611a50170b6c01812d9d507530
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/084_files.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		files {
+			pattern = "files/*.dat";
+			compress = "none";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/085_files_compress.dts b/tools/u-boot-tools/binman/test/085_files_compress.dts
new file mode 100644
index 0000000000000000000000000000000000000000..847b398bf2bfe0ec7e3ba85f6e90a52474652459
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/085_files_compress.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		files {
+			pattern = "files/*.dat";
+			compress = "lz4";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/086_files_none.dts b/tools/u-boot-tools/binman/test/086_files_none.dts
new file mode 100644
index 0000000000000000000000000000000000000000..34bd92f224aa80dace663fa91674be17cbdbeb80
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/086_files_none.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		files {
+			pattern = "files/*.none";
+			compress = "none";
+			require-matches;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/087_files_no_pattern.dts b/tools/u-boot-tools/binman/test/087_files_no_pattern.dts
new file mode 100644
index 0000000000000000000000000000000000000000..0cb5b469cb02d04c8b9bfc28fbdc627b81282315
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/087_files_no_pattern.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		files {
+			compress = "none";
+			require-matches;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/088_expand_size.dts b/tools/u-boot-tools/binman/test/088_expand_size.dts
new file mode 100644
index 0000000000000000000000000000000000000000..c8a01308ec57b0a3737c6b4100a84dd3dba08a54
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/088_expand_size.dts
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		size = <40>;
+		fill {
+			expand-size;
+			fill-byte = [61];
+			size = <0>;
+		};
+		u-boot {
+			offset = <8>;
+		};
+		section {
+			expand-size;
+			pad-byte = <0x62>;
+			intel-mrc {
+			};
+		};
+		u-boot2 {
+			type = "u-boot";
+			offset = <16>;
+		};
+		section2 {
+			type = "section";
+			fill {
+				expand-size;
+				fill-byte = [63];
+				size = <0>;
+			};
+			u-boot {
+				offset = <8>;
+			};
+		};
+		fill2 {
+			type = "fill";
+			expand-size;
+			fill-byte = [64];
+			size = <0>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/089_expand_size_bad.dts b/tools/u-boot-tools/binman/test/089_expand_size_bad.dts
new file mode 100644
index 0000000000000000000000000000000000000000..edc0e5cf68164a59e371a130e31c82dcc052db94
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/089_expand_size_bad.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		_testing {
+			expand-size;
+			return-contents-once;
+		};
+		u-boot {
+			offset = <8>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/090_hash.dts b/tools/u-boot-tools/binman/test/090_hash.dts
new file mode 100644
index 0000000000000000000000000000000000000000..200304599dc37f94add5c28cc9b1df08bac536ba
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/090_hash.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		u-boot {
+			hash {
+				algo = "sha256";
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/091_hash_no_algo.dts b/tools/u-boot-tools/binman/test/091_hash_no_algo.dts
new file mode 100644
index 0000000000000000000000000000000000000000..b64df2051171eaa365bf2ef3d9bfbe1a5502b9fd
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/091_hash_no_algo.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		u-boot {
+			hash {
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/092_hash_bad_algo.dts b/tools/u-boot-tools/binman/test/092_hash_bad_algo.dts
new file mode 100644
index 0000000000000000000000000000000000000000..d2402000db63761ac3dc12c368bf9b5a88b84e25
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/092_hash_bad_algo.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		u-boot {
+			hash {
+				algo = "invalid";
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/093_x86_tpl_ucode.dts b/tools/u-boot-tools/binman/test/093_x86_tpl_ucode.dts
new file mode 100644
index 0000000000000000000000000000000000000000..d7ed9fc66b80deca43c331b7958814e0fd2c3309
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/093_x86_tpl_ucode.dts
@@ -0,0 +1,29 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		u-boot-tpl-with-ucode-ptr {
+		};
+
+		u-boot-tpl-dtb-with-ucode {
+		};
+
+		u-boot-ucode {
+		};
+	};
+
+	microcode {
+		update@0 {
+			data = <0x12345678 0x12345679>;
+		};
+		update@1 {
+			data = <0xabcd0000 0x78235609>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/094_fmap_x86.dts b/tools/u-boot-tools/binman/test/094_fmap_x86.dts
new file mode 100644
index 0000000000000000000000000000000000000000..613c5dab4253b2176e40bb3781aa9b6e40f75ac0
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/094_fmap_x86.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		end-at-4gb;
+		size = <0x100>;
+		pad-byte = <0x61>;
+		u-boot {
+		};
+		intel-mrc {
+		};
+		fmap {
+			offset = <0xffffff20>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/095_fmap_x86_section.dts b/tools/u-boot-tools/binman/test/095_fmap_x86_section.dts
new file mode 100644
index 0000000000000000000000000000000000000000..4cfce456705cea1e634fcd505199af30764c3568
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/095_fmap_x86_section.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		end-at-4gb;
+		size = <0x100>;
+		u-boot {
+		};
+		section {
+			pad-byte = <0x62>;
+			intel-mrc {
+			};
+			fmap {
+				offset = <0x20>;
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/096_elf.dts b/tools/u-boot-tools/binman/test/096_elf.dts
new file mode 100644
index 0000000000000000000000000000000000000000..df3440c319490f6ed0c72756bbb6ce54a17385c6
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/096_elf.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot-elf {
+		};
+		u-boot-spl-elf {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/097_elf_strip.dts b/tools/u-boot-tools/binman/test/097_elf_strip.dts
new file mode 100644
index 0000000000000000000000000000000000000000..6f3c66fd705c3154a35fa1eb5880936d42f8af8e
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/097_elf_strip.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot-elf {
+			strip;
+		};
+		u-boot-spl-elf {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/099_hash_section.dts b/tools/u-boot-tools/binman/test/099_hash_section.dts
new file mode 100644
index 0000000000000000000000000000000000000000..dcd8683d642eb7025e99f9083dc0729708d67272
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/099_hash_section.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		section {
+			u-boot {
+			};
+			fill {
+				size = <0x10>;
+				fill-byte = [61];
+			};
+			hash {
+				algo = "sha256";
+			};
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/100_intel_refcode.dts b/tools/u-boot-tools/binman/test/100_intel_refcode.dts
new file mode 100644
index 0000000000000000000000000000000000000000..0a1a0270e5f315c9417671dc63343a70c35ca1f2
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/100_intel_refcode.dts
@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <16>;
+
+		intel-refcode {
+			filename = "refcode.bin";
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/80_4gb_and_skip_at_start_together.dts b/tools/u-boot-tools/binman/test/80_4gb_and_skip_at_start_together.dts
new file mode 100644
index 0000000000000000000000000000000000000000..90c467d910bf491c5c172ed63a08195d0bcae4d0
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/80_4gb_and_skip_at_start_together.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <32>;
+		sort-by-offset;
+		end-at-4gb;
+		skip-at-start = <0xffffffe0>;
+		u-boot {
+			offset = <0xffffffe0>;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/81_powerpc_mpc85xx_bootpg_resetvec.dts b/tools/u-boot-tools/binman/test/81_powerpc_mpc85xx_bootpg_resetvec.dts
new file mode 100644
index 0000000000000000000000000000000000000000..8f4b16c399ca552b031f2e46c2e60513a5ee1a11
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/81_powerpc_mpc85xx_bootpg_resetvec.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		powerpc-mpc85xx-bootpg-resetvec {
+		};
+	};
+};
diff --git a/tools/u-boot-tools/binman/test/Makefile b/tools/u-boot-tools/binman/test/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..e58fc807757b3528acf7bfaef14266ed982d585d
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/Makefile
@@ -0,0 +1,55 @@
+#
+# Builds test programs
+#
+# Copyright (C) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# SPDX-License-Identifier:      GPL-2.0+
+#
+
+CFLAGS := -march=i386 -m32 -nostdlib -I ../../../include
+
+LDS_UCODE := -T u_boot_ucode_ptr.lds
+LDS_BINMAN := -T u_boot_binman_syms.lds
+LDS_BINMAN_BAD := -T u_boot_binman_syms_bad.lds
+
+TARGETS = u_boot_ucode_ptr u_boot_no_ucode_ptr bss_data \
+	u_boot_binman_syms u_boot_binman_syms.bin u_boot_binman_syms_bad \
+	u_boot_binman_syms_size
+
+all: $(TARGETS)
+
+u_boot_no_ucode_ptr: CFLAGS += $(LDS_UCODE)
+u_boot_no_ucode_ptr: u_boot_no_ucode_ptr.c
+
+u_boot_ucode_ptr: CFLAGS += $(LDS_UCODE)
+u_boot_ucode_ptr: u_boot_ucode_ptr.c
+
+bss_data: CFLAGS += bss_data.lds
+bss_data: bss_data.c
+
+u_boot_binman_syms.bin: u_boot_binman_syms
+	objcopy -O binary $< -R .note.gnu.build-id $@
+
+u_boot_binman_syms: CFLAGS += $(LDS_BINMAN)
+u_boot_binman_syms: u_boot_binman_syms.c
+
+u_boot_binman_syms_bad: CFLAGS += $(LDS_BINMAN_BAD)
+u_boot_binman_syms_bad: u_boot_binman_syms_bad.c
+
+u_boot_binman_syms_size: CFLAGS += $(LDS_BINMAN)
+u_boot_binman_syms_size: u_boot_binman_syms_size.c
+
+clean:
+	rm -f $(TARGETS)
+
+help:
+	@echo "Makefile for binman test programs"
+	@echo
+	@echo "Intended for use on x86 hosts"
+	@echo
+	@echo "Targets:"
+	@echo
+	@echo -e "\thelp	- Print help (this is it!)"
+	@echo -e "\tall	- Builds test programs (default targget)"
+	@echo -e "\tclean	- Delete output files"
diff --git a/tools/u-boot-tools/binman/test/bss_data b/tools/u-boot-tools/binman/test/bss_data
new file mode 100755
index 0000000000000000000000000000000000000000..afa28282aa157f6717c1ba244ecbfc6e1b081734
Binary files /dev/null and b/tools/u-boot-tools/binman/test/bss_data differ
diff --git a/tools/u-boot-tools/binman/test/bss_data.c b/tools/u-boot-tools/binman/test/bss_data.c
new file mode 100644
index 0000000000000000000000000000000000000000..79537c31b6587fbd4aece6e7fb75d7a38747da30
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/bss_data.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a _dt_ucode_base_size symbol which can be read
+ * by binutils. This is used by binman tests.
+ */
+
+int bss_data[10];
+int __bss_size = sizeof(bss_data);
+
+int main()
+{
+	bss_data[2] = 2;
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/binman/test/bss_data.lds b/tools/u-boot-tools/binman/test/bss_data.lds
new file mode 100644
index 0000000000000000000000000000000000000000..306dab504305277ffa59d09e4717fde560626f12
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/bss_data.lds
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+	. = 0xfffffdf0;
+	_start = .;
+	__bss_size = 10;
+}
diff --git a/tools/u-boot-tools/binman/test/files/1.dat b/tools/u-boot-tools/binman/test/files/1.dat
new file mode 100644
index 0000000000000000000000000000000000000000..a9524706171615c5578d4f8d75732e69dce2423b
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/files/1.dat
@@ -0,0 +1 @@
+sorry I'm late
diff --git a/tools/u-boot-tools/binman/test/files/2.dat b/tools/u-boot-tools/binman/test/files/2.dat
new file mode 100644
index 0000000000000000000000000000000000000000..687ea52730db7d59e2adcb1e170addce17b3c92a
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/files/2.dat
@@ -0,0 +1 @@
+Oh, don't bother apologising, I'm sorry you're alive
diff --git a/tools/u-boot-tools/binman/test/files/ignored_dir.dat/ignore b/tools/u-boot-tools/binman/test/files/ignored_dir.dat/ignore
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tools/u-boot-tools/binman/test/files/not-this-one b/tools/u-boot-tools/binman/test/files/not-this-one
new file mode 100644
index 0000000000000000000000000000000000000000..e71c2250f964c2967060c888ef99eb7c394c2a06
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/files/not-this-one
@@ -0,0 +1 @@
+this does not have a .dat extenion
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms b/tools/u-boot-tools/binman/test/u_boot_binman_syms
new file mode 100755
index 0000000000000000000000000000000000000000..126a1a623092cbe25f7a24118d26d4a0e8d0e0fc
Binary files /dev/null and b/tools/u-boot-tools/binman/test/u_boot_binman_syms differ
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms.c b/tools/u-boot-tools/binman/test/u_boot_binman_syms.c
new file mode 100644
index 0000000000000000000000000000000000000000..4898f983e32ceb66564bae80a4b5c6b2a6eadb95
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_binman_syms.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Google, Inc
+ *
+ * Simple program to create some binman symbols. This is used by binman tests.
+ */
+
+#define CONFIG_BINMAN
+#include <binman_sym.h>
+
+binman_sym_declare(unsigned long, u_boot_spl, offset);
+binman_sym_declare(unsigned long long, u_boot_spl2, offset);
+binman_sym_declare(unsigned long, u_boot_any, image_pos);
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms.lds b/tools/u-boot-tools/binman/test/u_boot_binman_syms.lds
new file mode 100644
index 0000000000000000000000000000000000000000..29cf9d0e546aaf99703ad821557422d706ccf41b
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_binman_syms.lds
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+	. = 0x00000000;
+	_start = .;
+
+	. = ALIGN(4);
+	.text :
+	{
+		__image_copy_start = .;
+		*(.text*)
+	}
+
+	. = ALIGN(4);
+	.binman_sym_table : {
+		__binman_sym_start = .;
+		KEEP(*(SORT(.binman_sym*)));
+		__binman_sym_end = .;
+	}
+
+}
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad b/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad
new file mode 100755
index 0000000000000000000000000000000000000000..8da3d9d48f388a9be53e92590984411691f6721f
Binary files /dev/null and b/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad differ
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad.c b/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad.c
new file mode 120000
index 0000000000000000000000000000000000000000..939b2e965f8df56937d88f5212ba0db5c72737e1
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad.c
@@ -0,0 +1 @@
+u_boot_binman_syms.c
\ No newline at end of file
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad.lds b/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad.lds
new file mode 100644
index 0000000000000000000000000000000000000000..849d158ac86e693db19c525486f4aa0c215ecc73
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_binman_syms_bad.lds
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+	. = 0x00000000;
+	_start = .;
+
+	. = ALIGN(4);
+	.text :
+	{
+		*(.text*)
+	}
+
+	. = ALIGN(4);
+	.binman_sym_table : {
+		__binman_sym_start = .;
+		KEEP(*(SORT(.binman_sym*)));
+		__binman_sym_end = .;
+	}
+
+}
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms_size b/tools/u-boot-tools/binman/test/u_boot_binman_syms_size
new file mode 100755
index 0000000000000000000000000000000000000000..d691e897c0f1842cb82efbc67f57d9f62853b99c
Binary files /dev/null and b/tools/u-boot-tools/binman/test/u_boot_binman_syms_size differ
diff --git a/tools/u-boot-tools/binman/test/u_boot_binman_syms_size.c b/tools/u-boot-tools/binman/test/u_boot_binman_syms_size.c
new file mode 100644
index 0000000000000000000000000000000000000000..7224bc1863c75b4e51d30ba3f73841d7fad28659
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_binman_syms_size.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Google, Inc
+ *
+ * Simple program to create some binman symbols. This is used by binman tests.
+ */
+
+#define CONFIG_BINMAN
+#include <binman_sym.h>
+
+binman_sym_declare(char, u_boot_spl, pos);
diff --git a/tools/u-boot-tools/binman/test/u_boot_no_ucode_ptr b/tools/u-boot-tools/binman/test/u_boot_no_ucode_ptr
new file mode 100755
index 0000000000000000000000000000000000000000..f72462f0be41a934d468481bf627d6c1ec9a8e1c
Binary files /dev/null and b/tools/u-boot-tools/binman/test/u_boot_no_ucode_ptr differ
diff --git a/tools/u-boot-tools/binman/test/u_boot_no_ucode_ptr.c b/tools/u-boot-tools/binman/test/u_boot_no_ucode_ptr.c
new file mode 100644
index 0000000000000000000000000000000000000000..24cdb909d002abc2b6e62b16813b378b14d54335
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_no_ucode_ptr.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a bad _dt_ucode_base_size symbol to create an
+ * error when it is used. This is used by binman tests.
+ */
+
+static unsigned long not__dt_ucode_base_size[2]
+		__attribute__((section(".ucode"))) = {1, 2};
diff --git a/tools/u-boot-tools/binman/test/u_boot_ucode_ptr b/tools/u-boot-tools/binman/test/u_boot_ucode_ptr
new file mode 100755
index 0000000000000000000000000000000000000000..dbfb184cecfbcf55cf43ed4f4ac0ee90a7364d93
Binary files /dev/null and b/tools/u-boot-tools/binman/test/u_boot_ucode_ptr differ
diff --git a/tools/u-boot-tools/binman/test/u_boot_ucode_ptr.c b/tools/u-boot-tools/binman/test/u_boot_ucode_ptr.c
new file mode 100644
index 0000000000000000000000000000000000000000..243c8e9e1a131d2ab2f1abe6b2ecc122a36188af
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_ucode_ptr.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Google, Inc
+ *
+ * Simple program to create a _dt_ucode_base_size symbol which can be read
+ * by binutils. This is used by binman tests.
+ */
+
+static unsigned long _dt_ucode_base_size[2]
+		__attribute__((section(".ucode"))) = {1, 2};
diff --git a/tools/u-boot-tools/binman/test/u_boot_ucode_ptr.lds b/tools/u-boot-tools/binman/test/u_boot_ucode_ptr.lds
new file mode 100644
index 0000000000000000000000000000000000000000..0cf9b762b5d761af1f17c3b8e565eca2f45ceb29
--- /dev/null
+++ b/tools/u-boot-tools/binman/test/u_boot_ucode_ptr.lds
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2016 Google, Inc
+ */
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+
+SECTIONS
+{
+	. = 0xfffffdf0;
+	_start = .;
+	.ucode : {
+		*(.ucode)
+	}
+}
diff --git a/tools/u-boot-tools/bmp_logo.c b/tools/u-boot-tools/bmp_logo.c
new file mode 100644
index 0000000000000000000000000000000000000000..55f833fb9b8bbcb11c58e29430b193ff327e7ab8
--- /dev/null
+++ b/tools/u-boot-tools/bmp_logo.c
@@ -0,0 +1,208 @@
+#include "compiler.h"
+
+enum {
+	MODE_GEN_INFO,
+	MODE_GEN_DATA
+};
+
+typedef struct bitmap_s {		/* bitmap description */
+	uint16_t width;
+	uint16_t height;
+	uint8_t	palette[256*3];
+	uint8_t	*data;
+} bitmap_t;
+
+#define DEFAULT_CMAP_SIZE	16	/* size of default color map	*/
+
+void usage(const char *prog)
+{
+	fprintf(stderr, "Usage: %s [--gen-info|--gen-data] file\n", prog);
+}
+
+/*
+ * Neutralize little endians.
+ */
+uint16_t le_short(uint16_t x)
+{
+    uint16_t val;
+    uint8_t *p = (uint8_t *)(&x);
+
+    val =  (*p++ & 0xff) << 0;
+    val |= (*p & 0xff) << 8;
+
+    return val;
+}
+
+void skip_bytes (FILE *fp, int n)
+{
+	while (n-- > 0)
+		fgetc (fp);
+}
+
+__attribute__ ((__noreturn__))
+int error (char * msg, FILE *fp)
+{
+	fprintf (stderr, "ERROR: %s\n", msg);
+
+	fclose (fp);
+
+	exit (EXIT_FAILURE);
+}
+
+void gen_info(bitmap_t *b, uint16_t n_colors)
+{
+	printf("/*\n"
+		" * Automatically generated by \"tools/bmp_logo\"\n"
+		" *\n"
+		" * DO NOT EDIT\n"
+		" *\n"
+		" */\n\n\n"
+		"#ifndef __BMP_LOGO_H__\n"
+		"#define __BMP_LOGO_H__\n\n"
+		"#define BMP_LOGO_WIDTH\t\t%d\n"
+		"#define BMP_LOGO_HEIGHT\t\t%d\n"
+		"#define BMP_LOGO_COLORS\t\t%d\n"
+		"#define BMP_LOGO_OFFSET\t\t%d\n\n"
+		"extern unsigned short bmp_logo_palette[];\n"
+		"extern unsigned char bmp_logo_bitmap[];\n\n"
+		"#endif /* __BMP_LOGO_H__ */\n",
+		b->width, b->height, n_colors,
+		DEFAULT_CMAP_SIZE);
+}
+
+int main (int argc, char *argv[])
+{
+	int	mode, i, x;
+	FILE	*fp;
+	bitmap_t bmp;
+	bitmap_t *b = &bmp;
+	uint16_t data_offset, n_colors, hdr_size;
+
+	if (argc < 3) {
+		usage(argv[0]);
+		exit (EXIT_FAILURE);
+	}
+
+	if (!strcmp(argv[1], "--gen-info"))
+		mode = MODE_GEN_INFO;
+	else if (!strcmp(argv[1], "--gen-data"))
+		mode = MODE_GEN_DATA;
+	else {
+		usage(argv[0]);
+		exit(EXIT_FAILURE);
+	}
+
+	fp = fopen(argv[2], "rb");
+	if (!fp) {
+		perror(argv[2]);
+		exit (EXIT_FAILURE);
+	}
+
+	if (fgetc (fp) != 'B' || fgetc (fp) != 'M')
+		error ("Input file is not a bitmap", fp);
+
+	/*
+	 * read width and height of the image, and the number of colors used;
+	 * ignore the rest
+	 */
+	skip_bytes (fp, 8);
+	if (fread (&data_offset, sizeof (uint16_t), 1, fp) != 1)
+		error ("Couldn't read bitmap data offset", fp);
+	skip_bytes(fp, 2);
+	if (fread(&hdr_size,   sizeof(uint16_t), 1, fp) != 1)
+		error("Couldn't read bitmap header size", fp);
+	if (hdr_size < 40)
+		error("Invalid bitmap header", fp);
+	skip_bytes(fp, 2);
+	if (fread (&b->width,   sizeof (uint16_t), 1, fp) != 1)
+		error ("Couldn't read bitmap width", fp);
+	skip_bytes (fp, 2);
+	if (fread (&b->height,  sizeof (uint16_t), 1, fp) != 1)
+		error ("Couldn't read bitmap height", fp);
+	skip_bytes (fp, 22);
+	if (fread (&n_colors, sizeof (uint16_t), 1, fp) != 1)
+		error ("Couldn't read bitmap colors", fp);
+	skip_bytes(fp, hdr_size - 34);
+
+	/*
+	 * Repair endianess.
+	 */
+	data_offset = le_short(data_offset);
+	b->width = le_short(b->width);
+	b->height = le_short(b->height);
+	n_colors = le_short(n_colors);
+
+	/* assume we are working with an 8-bit file */
+	if ((n_colors == 0) || (n_colors > 256 - DEFAULT_CMAP_SIZE)) {
+		/* reserve DEFAULT_CMAP_SIZE color map entries for default map */
+		n_colors = 256 - DEFAULT_CMAP_SIZE;
+	}
+
+	if (mode == MODE_GEN_INFO) {
+		gen_info(b, n_colors);
+		goto out;
+	}
+
+	printf("/*\n"
+		" * Automatically generated by \"tools/bmp_logo\"\n"
+		" *\n"
+		" * DO NOT EDIT\n"
+		" *\n"
+		" */\n\n\n"
+		"#ifndef __BMP_LOGO_DATA_H__\n"
+		"#define __BMP_LOGO_DATA_H__\n\n");
+
+	/* allocate memory */
+	if ((b->data = (uint8_t *)malloc(b->width * b->height)) == NULL)
+		error ("Error allocating memory for file", fp);
+
+	/* read and print the palette information */
+	printf("unsigned short bmp_logo_palette[] = {\n");
+
+	for (i=0; i<n_colors; ++i) {
+		b->palette[(int)(i*3+2)] = fgetc(fp);
+		b->palette[(int)(i*3+1)] = fgetc(fp);
+		b->palette[(int)(i*3+0)] = fgetc(fp);
+		x=fgetc(fp);
+
+		printf ("%s0x0%X%X%X,%s",
+			((i%8) == 0) ? "\t" : "  ",
+			(b->palette[(int)(i*3+0)] >> 4) & 0x0F,
+			(b->palette[(int)(i*3+1)] >> 4) & 0x0F,
+			(b->palette[(int)(i*3+2)] >> 4) & 0x0F,
+			((i%8) == 7) ? "\n" : ""
+		);
+	}
+
+	/* seek to offset indicated by file header */
+	fseek(fp, (long)data_offset, SEEK_SET);
+
+	/* read the bitmap; leave room for default color map */
+	printf ("\n");
+	printf ("};\n");
+	printf ("\n");
+	printf("unsigned char bmp_logo_bitmap[] = {\n");
+	for (i=(b->height-1)*b->width; i>=0; i-=b->width) {
+		for (x = 0; x < b->width; x++) {
+			b->data[i + x] = (uint8_t) fgetc(fp)
+						+ DEFAULT_CMAP_SIZE;
+		}
+	}
+
+	for (i=0; i<(b->height*b->width); ++i) {
+		if ((i%8) == 0)
+			putchar ('\t');
+		printf ("0x%02X,%c",
+			b->data[i],
+			((i%8) == 7) ? '\n' : ' '
+		);
+	}
+	printf ("\n"
+		"};\n\n"
+		"#endif /* __BMP_LOGO_DATA_H__ */\n"
+	);
+
+out:
+	fclose(fp);
+	return 0;
+}
diff --git a/tools/u-boot-tools/buildman/.gitignore b/tools/u-boot-tools/buildman/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d
--- /dev/null
+++ b/tools/u-boot-tools/buildman/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/u-boot-tools/buildman/README b/tools/u-boot-tools/buildman/README
new file mode 100644
index 0000000000000000000000000000000000000000..5a709c6ff9e3b5ea270b1890dd3d02e0e1b87b20
--- /dev/null
+++ b/tools/u-boot-tools/buildman/README
@@ -0,0 +1,1187 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2013 The Chromium OS Authors.
+
+(Please read 'How to change from MAKEALL' if you are used to that tool)
+
+Quick-start
+===========
+
+If you just want to quickly set up buildman so you can build something (for
+example Raspberry Pi 2):
+
+   cd /path/to/u-boot
+   PATH=$PATH:`pwd`/tools/buildman
+   buildman --fetch-arch arm
+   buildman -k rpi_2
+   ls ../current/rpi_2
+   # u-boot.bin is the output image
+
+
+What is this?
+=============
+
+This tool handles building U-Boot to check that you have not broken it
+with your patch series. It can build each individual commit and report
+which boards fail on which commits, and which errors come up. It aims
+to make full use of multi-processor machines.
+
+A key feature of buildman is its output summary, which allows warnings,
+errors or image size increases in a particular commit or board to be
+quickly identified and the offending commit pinpointed. This can be a big
+help for anyone working with >10 patches at a time.
+
+
+Caveats
+=======
+
+Buildman can be stopped and restarted, in which case it will continue
+where it left off. This should happen cleanly and without side-effects.
+If not, it is a bug, for which a patch would be welcome.
+
+Buildman gets so tied up in its work that it can ignore the outside world.
+You may need to press Ctrl-C several times to quit it. Also it will print
+out various exceptions when stopped. You may have to kill it since the
+Ctrl-C handling is somewhat broken.
+
+
+Theory of Operation
+===================
+
+(please read this section in full twice or you will be perpetually confused)
+
+Buildman is a builder. It is not make, although it runs make. It does not
+produce any useful output on the terminal while building, except for
+progress information (except with -v, see below). All the output (errors,
+warnings and binaries if you ask for them) is stored in output
+directories, which you can look at while the build is progressing, or when
+it is finished.
+
+Buildman is designed to build entire git branches, i.e. muliple commits. It
+can be run repeatedly on the same branch. In this case it will automatically
+rebuild commits which have changed (and remove its old results for that
+commit). It is possible to build a branch for one board, then later build it
+for another board. If you want buildman to re-build a commit it has already
+built (e.g. because of a toolchain update), use the -f flag.
+
+Buildman produces a concise summary of which boards succeeded and failed.
+It shows which commit introduced which board failure using a simple
+red/green colour coding. Full error information can be requested, in which
+case it is de-duped and displayed against the commit that introduced the
+error. An example workflow is below.
+
+Buildman stores image size information and can report changes in image size
+from commit to commit. An example of this is below.
+
+Buildman starts multiple threads, and each thread builds for one board at
+a time. A thread starts at the first commit, configures the source for your
+board and builds it. Then it checks out the next commit and does an
+incremental build. Eventually the thread reaches the last commit and stops.
+If errors or warnings are found along the way, the thread will reconfigure
+after every commit, and your build will be very slow. This is because a
+file that produces just a warning would not normally be rebuilt in an
+incremental build.
+
+Buildman works in an entirely separate place from your U-Boot repository.
+It creates a separate working directory for each thread, and puts the
+output files in the working directory, organised by commit name and board
+name, in a two-level hierarchy.
+
+Buildman is invoked in your U-Boot directory, the one with the .git
+directory. It clones this repository into a copy for each thread, and the
+threads do not affect the state of your git repository. Any checkouts done
+by the thread affect only the working directory for that thread.
+
+Buildman automatically selects the correct tool chain for each board. You
+must supply suitable tool chains, but buildman takes care of selecting the
+right one.
+
+Buildman generally builds a branch (with the -b flag), and in this case
+builds the upstream commit as well, for comparison. It cannot build
+individual commits at present, unless (maybe) you point it at an empty
+branch. Put all your commits in a branch, set the branch's upstream to a
+valid value, and all will be well. Otherwise buildman will perform random
+actions. Use -n to check what the random actions might be.
+
+If you just want to build the current source tree, leave off the -b flag
+and add -e. This will display results and errors as they happen. You can
+still look at them later using -se. Note that buildman will assume that the
+source has changed, and will build all specified boards in this case.
+
+Buildman is optimised for building many commits at once, for many boards.
+On multi-core machines, Buildman is fast because it uses most of the
+available CPU power. When it gets to the end, or if you are building just
+a few commits or boards, it will be pretty slow. As a tip, if you don't
+plan to use your machine for anything else, you can use -T to increase the
+number of threads beyond the default.
+
+
+Selecting which boards to build
+===============================
+
+Buildman lets you build all boards, or a subset. Specify the subset by passing
+command-line arguments that list the desired board name, architecture name,
+SOC name, or anything else in the boards.cfg file. Multiple arguments are
+allowed. Each argument will be interpreted as a regular expression, so
+behaviour is a superset of exact or substring matching. Examples are:
+
+* 'tegra20'      All boards with a Tegra20 SoC
+* 'tegra'        All boards with any Tegra Soc (Tegra20, Tegra30, Tegra114...)
+* '^tegra[23]0$' All boards with either Tegra20 or Tegra30 SoC
+* 'powerpc'      All PowerPC boards
+
+While the default is to OR the terms together, you can also make use of
+the '&' operator to limit the selection:
+
+* 'freescale & arm sandbox'  All Freescale boards with ARM architecture,
+                             plus sandbox
+
+You can also use -x to specifically exclude some boards. For example:
+
+ buildmand arm -x nvidia,freescale,.*ball$
+
+means to build all arm boards except nvidia, freescale and anything ending
+with 'ball'.
+
+For building specific boards you can use the --boards option, which takes a
+comma-separated list of board target names and be used multiple times on
+the command line:
+
+   buidman --boards sandbox,snow --boards
+
+It is convenient to use the -n option to see what will be built based on
+the subset given. Use -v as well to get an actual list of boards.
+
+Buildman does not store intermediate object files. It optionally copies
+the binary output into a directory when a build is successful (-k). Size
+information is always recorded. It needs a fair bit of disk space to work,
+typically 250MB per thread.
+
+
+Setting up
+==========
+
+1. Get the U-Boot source. You probably already have it, but if not these
+steps should get you started with a repo and some commits for testing.
+
+$ cd /path/to/u-boot
+$ git clone git://git.denx.de/u-boot.git .
+$ git checkout -b my-branch origin/master
+$ # Add some commits to the branch, reading for testing
+
+2. Create ~/.buildman to tell buildman where to find tool chains (see 'The
+.buildman file' later for details). As an example:
+
+# Buildman settings file
+
+[toolchain]
+root: /
+rest: /toolchains/*
+eldk: /opt/eldk-4.2
+arm: /opt/linaro/gcc-linaro-arm-linux-gnueabihf-4.8-2013.08_linux
+aarch64: /opt/linaro/gcc-linaro-aarch64-none-elf-4.8-2013.10_linux
+
+[toolchain-alias]
+x86: i386
+blackfin: bfin
+nds32: nds32le
+openrisc: or1k
+
+
+This selects the available toolchain paths. Add the base directory for
+each of your toolchains here. Buildman will search inside these directories
+and also in any '/usr' and '/usr/bin' subdirectories.
+
+Make sure the tags (here root: rest: and eldk:) are unique.
+
+The toolchain-alias section indicates that the i386 toolchain should be used
+to build x86 commits.
+
+Note that you can also specific exactly toolchain prefixes if you like:
+
+[toolchain-prefix]
+arm: /opt/arm-eabi-4.6/bin/arm-eabi-
+
+or even:
+
+[toolchain-prefix]
+arm: /opt/arm-eabi-4.6/bin/arm-eabi-gcc
+
+This tells buildman that you want to use this exact toolchain for the arm
+architecture. This will override any toolchains found by searching using the
+[toolchain] settings.
+
+Since the toolchain prefix is an explicit request, buildman will report an
+error if a toolchain is not found with that prefix. The current PATH will be
+searched, so it is possible to use:
+
+[toolchain-prefix]
+arm: arm-none-eabi-
+
+and buildman will find arm-none-eabi-gcc in /usr/bin if you have it installed.
+
+[toolchain-wrapper]
+wrapper: ccache
+
+This tells buildman to use a compiler wrapper in front of CROSS_COMPILE. In
+this example, ccache. It doesn't affect the toolchain scan. The wrapper is
+added when CROSS_COMPILE environtal variable is set. The name in this
+section is ignored. If more than one line is provided, only the last one
+is taken.
+
+3. Make sure you have the require Python pre-requisites
+
+Buildman uses multiprocessing, Queue, shutil, StringIO, ConfigParser and
+urllib2. These should normally be available, but if you get an error like
+this then you will need to obtain those modules:
+
+    ImportError: No module named multiprocessing
+
+
+4. Check the available toolchains
+
+Run this check to make sure that you have a toolchain for every architecture.
+
+$ ./tools/buildman/buildman --list-tool-chains
+Scanning for tool chains
+   - scanning prefix '/opt/gcc-4.6.3-nolibc/x86_64-linux/bin/x86_64-linux-'
+Tool chain test:  OK, arch='x86', priority 1
+   - scanning prefix '/opt/arm-eabi-4.6/bin/arm-eabi-'
+Tool chain test:  OK, arch='arm', priority 1
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/i386-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/i386-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/i386-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/i386-linux/bin/i386-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/i386-linux/usr/bin'
+Tool chain test:  OK, arch='i386', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/aarch64-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/aarch64-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/aarch64-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/aarch64-linux/bin/aarch64-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/aarch64-linux/usr/bin'
+Tool chain test:  OK, arch='aarch64', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/microblaze-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/microblaze-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/microblaze-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/microblaze-linux/bin/microblaze-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/microblaze-linux/usr/bin'
+Tool chain test:  OK, arch='microblaze', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/mips64-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/mips64-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/mips64-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/mips64-linux/bin/mips64-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/mips64-linux/usr/bin'
+Tool chain test:  OK, arch='mips64', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/sparc64-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/sparc64-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/sparc64-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/sparc64-linux/bin/sparc64-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/sparc64-linux/usr/bin'
+Tool chain test:  OK, arch='sparc64', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/arm-unknown-linux-gnueabi'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/arm-unknown-linux-gnueabi/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/arm-unknown-linux-gnueabi/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/arm-unknown-linux-gnueabi/bin/arm-unknown-linux-gnueabi-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/arm-unknown-linux-gnueabi/usr/bin'
+Tool chain test:  OK, arch='arm', priority 3
+Toolchain '/toolchains/gcc-4.9.0-nolibc/arm-unknown-linux-gnueabi/bin/arm-unknown-linux-gnueabi-gcc' at priority 3 will be ignored because another toolchain for arch 'arm' has priority 1
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/sparc-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/sparc-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/sparc-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/sparc-linux/bin/sparc-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/sparc-linux/usr/bin'
+Tool chain test:  OK, arch='sparc', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/mips-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/mips-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/mips-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/mips-linux/bin/mips-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/mips-linux/usr/bin'
+Tool chain test:  OK, arch='mips', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/x86_64-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/x86_64-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/x86_64-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/x86_64-linux/bin/x86_64-linux-gcc'
+         - found '/toolchains/gcc-4.9.0-nolibc/x86_64-linux/bin/x86_64-linux-x86_64-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/x86_64-linux/usr/bin'
+Tool chain test:  OK, arch='x86_64', priority 4
+Tool chain test:  OK, arch='x86_64', priority 4
+Toolchain '/toolchains/gcc-4.9.0-nolibc/x86_64-linux/bin/x86_64-linux-x86_64-linux-gcc' at priority 4 will be ignored because another toolchain for arch 'x86_64' has priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/m68k-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/m68k-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/m68k-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/m68k-linux/bin/m68k-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/m68k-linux/usr/bin'
+Tool chain test:  OK, arch='m68k', priority 4
+   - scanning path '/toolchains/gcc-4.9.0-nolibc/powerpc-linux'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/powerpc-linux/.'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/powerpc-linux/bin'
+         - found '/toolchains/gcc-4.9.0-nolibc/powerpc-linux/bin/powerpc-linux-gcc'
+      - looking in '/toolchains/gcc-4.9.0-nolibc/powerpc-linux/usr/bin'
+Tool chain test:  OK, arch='powerpc', priority 4
+   - scanning path '/toolchains/gcc-4.6.3-nolibc/bfin-uclinux'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/bfin-uclinux/.'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/bfin-uclinux/bin'
+         - found '/toolchains/gcc-4.6.3-nolibc/bfin-uclinux/bin/bfin-uclinux-gcc'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/bfin-uclinux/usr/bin'
+Tool chain test:  OK, arch='bfin', priority 6
+   - scanning path '/toolchains/gcc-4.6.3-nolibc/sparc-linux'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/sparc-linux/.'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/sparc-linux/bin'
+         - found '/toolchains/gcc-4.6.3-nolibc/sparc-linux/bin/sparc-linux-gcc'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/sparc-linux/usr/bin'
+Tool chain test:  OK, arch='sparc', priority 4
+Toolchain '/toolchains/gcc-4.6.3-nolibc/sparc-linux/bin/sparc-linux-gcc' at priority 4 will be ignored because another toolchain for arch 'sparc' has priority 4
+   - scanning path '/toolchains/gcc-4.6.3-nolibc/mips-linux'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/mips-linux/.'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/mips-linux/bin'
+         - found '/toolchains/gcc-4.6.3-nolibc/mips-linux/bin/mips-linux-gcc'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/mips-linux/usr/bin'
+Tool chain test:  OK, arch='mips', priority 4
+Toolchain '/toolchains/gcc-4.6.3-nolibc/mips-linux/bin/mips-linux-gcc' at priority 4 will be ignored because another toolchain for arch 'mips' has priority 4
+   - scanning path '/toolchains/gcc-4.6.3-nolibc/m68k-linux'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/m68k-linux/.'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/m68k-linux/bin'
+         - found '/toolchains/gcc-4.6.3-nolibc/m68k-linux/bin/m68k-linux-gcc'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/m68k-linux/usr/bin'
+Tool chain test:  OK, arch='m68k', priority 4
+Toolchain '/toolchains/gcc-4.6.3-nolibc/m68k-linux/bin/m68k-linux-gcc' at priority 4 will be ignored because another toolchain for arch 'm68k' has priority 4
+   - scanning path '/toolchains/gcc-4.6.3-nolibc/powerpc-linux'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/powerpc-linux/.'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/powerpc-linux/bin'
+         - found '/toolchains/gcc-4.6.3-nolibc/powerpc-linux/bin/powerpc-linux-gcc'
+      - looking in '/toolchains/gcc-4.6.3-nolibc/powerpc-linux/usr/bin'
+Tool chain test:  OK, arch='powerpc', priority 4
+Tool chain test:  OK, arch='or32', priority 4
+   - scanning path '/'
+      - looking in '/.'
+      - looking in '/bin'
+      - looking in '/usr/bin'
+         - found '/usr/bin/i586-mingw32msvc-gcc'
+         - found '/usr/bin/c89-gcc'
+         - found '/usr/bin/x86_64-linux-gnu-gcc'
+         - found '/usr/bin/gcc'
+         - found '/usr/bin/c99-gcc'
+         - found '/usr/bin/arm-linux-gnueabi-gcc'
+         - found '/usr/bin/aarch64-linux-gnu-gcc'
+         - found '/usr/bin/winegcc'
+         - found '/usr/bin/arm-linux-gnueabihf-gcc'
+Tool chain test:  OK, arch='i586', priority 11
+Tool chain test:  OK, arch='c89', priority 11
+Tool chain test:  OK, arch='x86_64', priority 4
+Toolchain '/usr/bin/x86_64-linux-gnu-gcc' at priority 4 will be ignored because another toolchain for arch 'x86_64' has priority 4
+Tool chain test:  OK, arch='sandbox', priority 11
+Tool chain test:  OK, arch='c99', priority 11
+Tool chain test:  OK, arch='arm', priority 4
+Toolchain '/usr/bin/arm-linux-gnueabi-gcc' at priority 4 will be ignored because another toolchain for arch 'arm' has priority 1
+Tool chain test:  OK, arch='aarch64', priority 4
+Toolchain '/usr/bin/aarch64-linux-gnu-gcc' at priority 4 will be ignored because another toolchain for arch 'aarch64' has priority 4
+Tool chain test:  OK, arch='sandbox', priority 11
+Toolchain '/usr/bin/winegcc' at priority 11 will be ignored because another toolchain for arch 'sandbox' has priority 11
+Tool chain test:  OK, arch='arm', priority 4
+Toolchain '/usr/bin/arm-linux-gnueabihf-gcc' at priority 4 will be ignored because another toolchain for arch 'arm' has priority 1
+List of available toolchains (34):
+aarch64   : /toolchains/gcc-4.9.0-nolibc/aarch64-linux/bin/aarch64-linux-gcc
+alpha     : /toolchains/gcc-4.9.0-nolibc/alpha-linux/bin/alpha-linux-gcc
+am33_2.0  : /toolchains/gcc-4.9.0-nolibc/am33_2.0-linux/bin/am33_2.0-linux-gcc
+arm       : /opt/arm-eabi-4.6/bin/arm-eabi-gcc
+bfin      : /toolchains/gcc-4.6.3-nolibc/bfin-uclinux/bin/bfin-uclinux-gcc
+c89       : /usr/bin/c89-gcc
+c99       : /usr/bin/c99-gcc
+frv       : /toolchains/gcc-4.9.0-nolibc/frv-linux/bin/frv-linux-gcc
+h8300     : /toolchains/gcc-4.9.0-nolibc/h8300-elf/bin/h8300-elf-gcc
+hppa      : /toolchains/gcc-4.9.0-nolibc/hppa-linux/bin/hppa-linux-gcc
+hppa64    : /toolchains/gcc-4.9.0-nolibc/hppa64-linux/bin/hppa64-linux-gcc
+i386      : /toolchains/gcc-4.9.0-nolibc/i386-linux/bin/i386-linux-gcc
+i586      : /usr/bin/i586-mingw32msvc-gcc
+ia64      : /toolchains/gcc-4.9.0-nolibc/ia64-linux/bin/ia64-linux-gcc
+m32r      : /toolchains/gcc-4.9.0-nolibc/m32r-linux/bin/m32r-linux-gcc
+m68k      : /toolchains/gcc-4.9.0-nolibc/m68k-linux/bin/m68k-linux-gcc
+microblaze: /toolchains/gcc-4.9.0-nolibc/microblaze-linux/bin/microblaze-linux-gcc
+mips      : /toolchains/gcc-4.9.0-nolibc/mips-linux/bin/mips-linux-gcc
+mips64    : /toolchains/gcc-4.9.0-nolibc/mips64-linux/bin/mips64-linux-gcc
+or32      : /toolchains/gcc-4.5.1-nolibc/or32-linux/bin/or32-linux-gcc
+powerpc   : /toolchains/gcc-4.9.0-nolibc/powerpc-linux/bin/powerpc-linux-gcc
+powerpc64 : /toolchains/gcc-4.9.0-nolibc/powerpc64-linux/bin/powerpc64-linux-gcc
+ppc64le   : /toolchains/gcc-4.9.0-nolibc/ppc64le-linux/bin/ppc64le-linux-gcc
+s390x     : /toolchains/gcc-4.9.0-nolibc/s390x-linux/bin/s390x-linux-gcc
+sandbox   : /usr/bin/gcc
+sh4       : /toolchains/gcc-4.6.3-nolibc/sh4-linux/bin/sh4-linux-gcc
+sparc     : /toolchains/gcc-4.9.0-nolibc/sparc-linux/bin/sparc-linux-gcc
+sparc64   : /toolchains/gcc-4.9.0-nolibc/sparc64-linux/bin/sparc64-linux-gcc
+tilegx    : /toolchains/gcc-4.6.2-nolibc/tilegx-linux/bin/tilegx-linux-gcc
+x86       : /opt/gcc-4.6.3-nolibc/x86_64-linux/bin/x86_64-linux-gcc
+x86_64    : /toolchains/gcc-4.9.0-nolibc/x86_64-linux/bin/x86_64-linux-gcc
+
+
+You can see that everything is covered, even some strange ones that won't
+be used (c88 and c99). This is a feature.
+
+
+5. Install new toolchains if needed
+
+You can download toolchains and update the [toolchain] section of the
+settings file to find them.
+
+To make this easier, buildman can automatically download and install
+toolchains from kernel.org. First list the available architectures:
+
+$ ./tools/buildman/buildman --fetch-arch list
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.6.3/
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.6.2/
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.5.1/
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.2.4/
+Available architectures: alpha am33_2.0 arm bfin cris crisv32 frv h8300
+hppa hppa64 i386 ia64 m32r m68k mips mips64 or32 powerpc powerpc64 s390x sh4
+sparc sparc64 tilegx x86_64 xtensa
+
+Then pick one and download it:
+
+$ ./tools/buildman/buildman --fetch-arch or32
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.6.3/
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.6.2/
+Checking: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.5.1/
+Downloading: https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.5.1//x86_64-gcc-4.5.1-nolibc_or32-linux.tar.xz
+Unpacking to: /home/sjg/.buildman-toolchains
+Testing
+      - looking in '/home/sjg/.buildman-toolchains/gcc-4.5.1-nolibc/or32-linux/.'
+      - looking in '/home/sjg/.buildman-toolchains/gcc-4.5.1-nolibc/or32-linux/bin'
+         - found '/home/sjg/.buildman-toolchains/gcc-4.5.1-nolibc/or32-linux/bin/or32-linux-gcc'
+Tool chain test:  OK
+
+Or download them all from kernel.org and move them to /toolchains directory,
+
+$ ./tools/buildman/buildman --fetch-arch all
+$ sudo mkdir -p /toolchains
+$ sudo mv ~/.buildman-toolchains/*/* /toolchains/
+
+For those not available from kernel.org, download from the following links.
+
+arc: https://github.com/foss-for-synopsys-dwc-arc-processors/toolchain/releases/
+    download/arc-2016.09-release/arc_gnu_2016.09_prebuilt_uclibc_le_archs_linux_install.tar.gz
+blackfin: http://sourceforge.net/projects/adi-toolchain/files/
+    blackfin-toolchain-elf-gcc-4.5-2014R1_45-RC2.x86_64.tar.bz2
+nds32: http://osdk.andestech.com/packages/
+    nds32le-linux-glibc-v1.tgz
+nios2: http://sourcery.mentor.com/public/gnu_toolchain/nios2-linux-gnu/
+    sourceryg++-2015.11-27-nios2-linux-gnu-i686-pc-linux-gnu.tar.bz2
+sh: http://sourcery.mentor.com/public/gnu_toolchain/sh-linux-gnu/
+    renesas-4.4-200-sh-linux-gnu-i686-pc-linux-gnu.tar.bz2
+
+Note openrisc kernel.org toolchain is out of date. Download the latest one from
+http://opencores.org/or1k/OpenRISC_GNU_tool_chain#Prebuilt_versions - eg:
+ftp://ocuser:ocuser@openrisc.opencores.org/toolchain/gcc-or1k-elf-4.8.1-x86.tar.bz2.
+
+Buildman should now be set up to use your new toolchain.
+
+At the time of writing, U-Boot has these architectures:
+
+   arc, arm, blackfin, m68k, microblaze, mips, nds32, nios2, openrisc
+   powerpc, sandbox, sh, sparc, x86
+
+Of these, only arc and nds32 are not available at kernel.org..
+
+
+How to run it
+=============
+
+First do a dry run using the -n flag: (replace <branch> with a real, local
+branch with a valid upstream)
+
+$ ./tools/buildman/buildman -b <branch> -n
+
+If it can't detect the upstream branch, try checking out the branch, and
+doing something like 'git branch --set-upstream-to upstream/master'
+or something similar. Buildman will try to guess a suitable upstream branch
+if it can't find one (you will see a message like" Guessing upstream as ...).
+
+As an example:
+
+Dry run, so not doing much. But I would do this:
+
+Building 18 commits for 1059 boards (4 threads, 1 job per thread)
+Build directory: ../lcd9b
+    5bb3505 Merge branch 'master' of git://git.denx.de/u-boot-arm
+    c18f1b4 tegra: Use const for pinmux_config_pingroup/table()
+    2f043ae tegra: Add display support to funcmux
+    e349900 tegra: fdt: Add pwm binding and node
+    424a5f0 tegra: fdt: Add LCD definitions for Tegra
+    0636ccf tegra: Add support for PWM
+    a994fe7 tegra: Add SOC support for display/lcd
+    fcd7350 tegra: Add LCD driver
+    4d46e9d tegra: Add LCD support to Nvidia boards
+    991bd48 arm: Add control over cachability of memory regions
+    54e8019 lcd: Add CONFIG_LCD_ALIGNMENT to select frame buffer alignment
+    d92aff7 lcd: Add support for flushing LCD fb from dcache after update
+    dbd0677 tegra: Align LCD frame buffer to section boundary
+    0cff9b8 tegra: Support control of cache settings for LCD
+    9c56900 tegra: fdt: Add LCD definitions for Seaboard
+    5cc29db lcd: Add CONFIG_CONSOLE_SCROLL_LINES option to speed console
+    cac5a23 tegra: Enable display/lcd support on Seaboard
+    49ff541 wip
+
+Total boards to build for each commit: 1059
+
+This shows that it will build all 1059 boards, using 4 threads (because
+we have a 4-core CPU). Each thread will run with -j1, meaning that each
+make job will use a single CPU. The list of commits to be built helps you
+confirm that things look about right. Notice that buildman has chosen a
+'base' directory for you, immediately above your source tree.
+
+Buildman works entirely inside the base directory, here ../lcd9b,
+creating a working directory for each thread, and creating output
+directories for each commit and board.
+
+
+Suggested Workflow
+==================
+
+To run the build for real, take off the -n:
+
+$ ./tools/buildman/buildman -b <branch>
+
+Buildman will set up some working directories, and get started. After a
+minute or so it will settle down to a steady pace, with a display like this:
+
+Building 18 commits for 1059 boards (4 threads, 1 job per thread)
+  528   36  124 /19062  1:13:30  : SIMPC8313_SP
+
+This means that it is building 19062 board/commit combinations. So far it
+has managed to successfully build 528. Another 36 have built with warnings,
+and 124 more didn't build at all. Buildman expects to complete the process
+in around an hour and a quarter. Use this time to buy a faster computer.
+
+
+To find out how the build went, ask for a summary with -s. You can do this
+either before the build completes (presumably in another terminal) or
+afterwards. Let's work through an example of how this is used:
+
+$ ./tools/buildman/buildman -b lcd9b -s
+...
+01: Merge branch 'master' of git://git.denx.de/u-boot-arm
+   powerpc:   + galaxy5200_LOWBOOT
+02: tegra: Use const for pinmux_config_pingroup/table()
+03: tegra: Add display support to funcmux
+04: tegra: fdt: Add pwm binding and node
+05: tegra: fdt: Add LCD definitions for Tegra
+06: tegra: Add support for PWM
+07: tegra: Add SOC support for display/lcd
+08: tegra: Add LCD driver
+09: tegra: Add LCD support to Nvidia boards
+10: arm: Add control over cachability of memory regions
+11: lcd: Add CONFIG_LCD_ALIGNMENT to select frame buffer alignment
+12: lcd: Add support for flushing LCD fb from dcache after update
+       arm:   + lubbock
+13: tegra: Align LCD frame buffer to section boundary
+14: tegra: Support control of cache settings for LCD
+15: tegra: fdt: Add LCD definitions for Seaboard
+16: lcd: Add CONFIG_CONSOLE_SCROLL_LINES option to speed console
+17: tegra: Enable display/lcd support on Seaboard
+18: wip
+
+This shows which commits have succeeded and which have failed. In this case
+the build is still in progress so many boards are not built yet (use -u to
+see which ones). But still we can see a few failures. The galaxy5200_LOWBOOT
+never builds correctly. This could be a problem with our toolchain, or it
+could be a bug in the upstream. The good news is that we probably don't need
+to blame our commits. The bad news is that our commits are not tested on that
+board.
+
+Commit 12 broke lubbock. That's what the '+ lubbock' means. The failure
+is never fixed by a later commit, or you would see lubbock again, in green,
+without the +.
+
+To see the actual error:
+
+$ ./tools/buildman/buildman -b <branch> -se lubbock
+...
+12: lcd: Add support for flushing LCD fb from dcache after update
+       arm:   + lubbock
++common/libcommon.o: In function `lcd_sync':
++/u-boot/lcd9b/.bm-work/00/common/lcd.c:120: undefined reference to `flush_dcache_range'
++arm-none-linux-gnueabi-ld: BFD (Sourcery G++ Lite 2010q1-202) 2.19.51.20090709 assertion fail /scratch/julian/2010q1-release-linux-lite/obj/binutils-src-2010q1-202-arm-none-linux-gnueabi-i686-pc-linux-gnu/bfd/elf32-arm.c:12572
++make: *** [/u-boot/lcd9b/.bm-work/00/build/u-boot] Error 139
+13: tegra: Align LCD frame buffer to section boundary
+14: tegra: Support control of cache settings for LCD
+15: tegra: fdt: Add LCD definitions for Seaboard
+16: lcd: Add CONFIG_CONSOLE_SCROLL_LINES option to speed console
+-/u-boot/lcd9b/.bm-work/00/common/lcd.c:120: undefined reference to `flush_dcache_range'
++/u-boot/lcd9b/.bm-work/00/common/lcd.c:125: undefined reference to `flush_dcache_range'
+17: tegra: Enable display/lcd support on Seaboard
+18: wip
+
+So the problem is in lcd.c, due to missing cache operations. This information
+should be enough to work out what that commit is doing to break these
+boards. (In this case pxa did not have cache operations defined).
+
+If you see error lines marked with '-', that means that the errors were fixed
+by that commit. Sometimes commits can be in the wrong order, so that a
+breakage is introduced for a few commits and fixed by later commits. This
+shows up clearly with buildman. You can then reorder the commits and try
+again.
+
+At commit 16, the error moves: you can see that the old error at line 120
+is fixed, but there is a new one at line 126. This is probably only because
+we added some code and moved the broken line further down the file.
+
+If many boards have the same error, then -e will display the error only
+once. This makes the output as concise as possible. To see which boards have
+each error, use -l. So it is safe to omit the board name - you will not get
+lots of repeated output for every board.
+
+Buildman tries to distinguish warnings from errors, and shows warning lines
+separately with a 'w' prefix.
+
+The full build output in this case is available in:
+
+../lcd9b/12_of_18_gd92aff7_lcd--Add-support-for/lubbock/
+
+   done: Indicates the build was done, and holds the return code from make.
+         This is 0 for a good build, typically 2 for a failure.
+
+   err:  Output from stderr, if any. Errors and warnings appear here.
+
+   log:  Output from stdout. Normally there isn't any since buildman runs
+         in silent mode. Use -V to force a verbose build (this passes V=1
+         to 'make')
+
+   toolchain: Shows information about the toolchain used for the build.
+
+   sizes: Shows image size information.
+
+It is possible to get the build binary output there also. Use the -k option
+for this. In that case you will also see some output files, like:
+
+   System.map  toolchain  u-boot  u-boot.bin  u-boot.map  autoconf.mk
+   (also SPL versions u-boot-spl and u-boot-spl.bin if available)
+
+
+Checking Image Sizes
+====================
+
+A key requirement for U-Boot is that you keep code/data size to a minimum.
+Where a new feature increases this noticeably it should normally be put
+behind a CONFIG flag so that boards can leave it disabled and keep the image
+size more or less the same with each new release.
+
+To check the impact of your commits on image size, use -S. For example:
+
+$ ./tools/buildman/buildman -b us-x86 -sS
+Summary of 10 commits for 1066 boards (4 threads, 1 job per thread)
+01: MAKEALL: add support for per architecture toolchains
+02: x86: Add function to get top of usable ram
+       x86: (for 1/3 boards)  text -272.0  rodata +41.0
+03: x86: Add basic cache operations
+04: x86: Permit bootstage and timer data to be used prior to relocation
+       x86: (for 1/3 boards)  data +16.0
+05: x86: Add an __end symbol to signal the end of the U-Boot binary
+       x86: (for 1/3 boards)  text +76.0
+06: x86: Rearrange the output input to remove BSS
+       x86: (for 1/3 boards)  bss -2140.0
+07: x86: Support relocation of FDT on start-up
+       x86: +   coreboot-x86
+08: x86: Add error checking to x86 relocation code
+09: x86: Adjust link device tree include file
+10: x86: Enable CONFIG_OF_CONTROL on coreboot
+
+
+You can see that image size only changed on x86, which is good because this
+series is not supposed to change any other board. From commit 7 onwards the
+build fails so we don't get code size numbers. The numbers are fractional
+because they are an average of all boards for that architecture. The
+intention is to allow you to quickly find image size problems introduced by
+your commits.
+
+Note that the 'text' region and 'rodata' are split out. You should add the
+two together to get the total read-only size (reported as the first column
+in the output from binutil's 'size' utility).
+
+A useful option is --step which lets you skip some commits. For example
+--step 2 will show the image sizes for only every 2nd commit (so it will
+compare the image sizes of the 1st, 3rd, 5th... commits). You can also use
+--step 0 which will compare only the first and last commits. This is useful
+for an overview of how your entire series affects code size. It will build
+only the upstream commit and your final branch commit.
+
+You can also use -d to see a detailed size breakdown for each board. This
+list is sorted in order from largest growth to largest reduction.
+
+It is even possible to go a little further with the -B option (--bloat). This
+shows where U-Boot has bloated, breaking the size change down to the function
+level. Example output is below:
+
+$ ./tools/buildman/buildman -b us-mem4 -sSdB
+...
+19: Roll crc32 into hash infrastructure
+       arm: (for 10/10 boards)  all -143.4  bss +1.2  data -4.8  rodata -48.2 text -91.6
+            paz00          :  all +23  bss -4  rodata -29  text +56
+               u-boot: add: 1/0, grow: 3/-2 bytes: 168/-104 (64)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 ext4fs_read_file                           540     568     +28
+                 insert_var_value_sub                       688     692      +4
+                 run_list_real                             1996    1992      -4
+                 do_mem_crc                                 168      68    -100
+            trimslice      :  all -9  bss +16  rodata -29  text +4
+               u-boot: add: 1/0, grow: 1/-3 bytes: 136/-124 (12)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 ext4fs_iterate_dir                         672     668      -4
+                 ext4fs_read_file                           568     548     -20
+                 do_mem_crc                                 168      68    -100
+            whistler       :  all -9  bss +16  rodata -29  text +4
+               u-boot: add: 1/0, grow: 1/-3 bytes: 136/-124 (12)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 ext4fs_iterate_dir                         672     668      -4
+                 ext4fs_read_file                           568     548     -20
+                 do_mem_crc                                 168      68    -100
+            seaboard       :  all -9  bss -28  rodata -29  text +48
+               u-boot: add: 1/0, grow: 3/-2 bytes: 160/-104 (56)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 ext4fs_read_file                           548     568     +20
+                 run_list_real                             1996    2000      +4
+                 do_nandboot                                760     756      -4
+                 do_mem_crc                                 168      68    -100
+            colibri_t20    :  all -9  rodata -29  text +20
+               u-boot: add: 1/0, grow: 2/-3 bytes: 140/-112 (28)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 read_abs_bbt                               204     208      +4
+                 do_nandboot                                760     756      -4
+                 ext4fs_read_file                           576     568      -8
+                 do_mem_crc                                 168      68    -100
+            ventana        :  all -37  bss -12  rodata -29  text +4
+               u-boot: add: 1/0, grow: 1/-3 bytes: 136/-124 (12)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 ext4fs_iterate_dir                         672     668      -4
+                 ext4fs_read_file                           568     548     -20
+                 do_mem_crc                                 168      68    -100
+            harmony        :  all -37  bss -16  rodata -29  text +8
+               u-boot: add: 1/0, grow: 2/-3 bytes: 140/-124 (16)
+                 function                                   old     new   delta
+                 hash_command                                80     160     +80
+                 crc32_wd_buf                                 -      56     +56
+                 nand_write_oob_syndrome                    428     432      +4
+                 ext4fs_iterate_dir                         672     668      -4
+                 ext4fs_read_file                           568     548     -20
+                 do_mem_crc                                 168      68    -100
+            medcom-wide    :  all -417  bss +28  data -16  rodata -93  text -336
+               u-boot: add: 1/-1, grow: 1/-2 bytes: 88/-376 (-288)
+                 function                                   old     new   delta
+                 crc32_wd_buf                                 -      56     +56
+                 do_fat_read_at                            2872    2904     +32
+                 hash_algo                                   16       -     -16
+                 do_mem_crc                                 168      68    -100
+                 hash_command                               420     160    -260
+            tec            :  all -449  bss -4  data -16  rodata -93  text -336
+               u-boot: add: 1/-1, grow: 1/-2 bytes: 88/-376 (-288)
+                 function                                   old     new   delta
+                 crc32_wd_buf                                 -      56     +56
+                 do_fat_read_at                            2872    2904     +32
+                 hash_algo                                   16       -     -16
+                 do_mem_crc                                 168      68    -100
+                 hash_command                               420     160    -260
+            plutux         :  all -481  bss +16  data -16  rodata -93  text -388
+               u-boot: add: 1/-1, grow: 1/-3 bytes: 68/-408 (-340)
+                 function                                   old     new   delta
+                 crc32_wd_buf                                 -      56     +56
+                 do_load_serial_bin                        1688    1700     +12
+                 hash_algo                                   16       -     -16
+                 do_fat_read_at                            2904    2872     -32
+                 do_mem_crc                                 168      68    -100
+                 hash_command                               420     160    -260
+   powerpc: (for 5/5 boards)  all +37.4  data -3.2  rodata -41.8  text +82.4
+            MPC8610HPCD    :  all +55  rodata -29  text +84
+               u-boot: add: 1/0, grow: 0/-1 bytes: 176/-96 (80)
+                 function                                   old     new   delta
+                 hash_command                                 -     176    +176
+                 do_mem_crc                                 184      88     -96
+            MPC8641HPCN    :  all +55  rodata -29  text +84
+               u-boot: add: 1/0, grow: 0/-1 bytes: 176/-96 (80)
+                 function                                   old     new   delta
+                 hash_command                                 -     176    +176
+                 do_mem_crc                                 184      88     -96
+            MPC8641HPCN_36BIT:  all +55  rodata -29  text +84
+               u-boot: add: 1/0, grow: 0/-1 bytes: 176/-96 (80)
+                 function                                   old     new   delta
+                 hash_command                                 -     176    +176
+                 do_mem_crc                                 184      88     -96
+            sbc8641d       :  all +55  rodata -29  text +84
+               u-boot: add: 1/0, grow: 0/-1 bytes: 176/-96 (80)
+                 function                                   old     new   delta
+                 hash_command                                 -     176    +176
+                 do_mem_crc                                 184      88     -96
+            xpedite517x    :  all -33  data -16  rodata -93  text +76
+               u-boot: add: 1/-1, grow: 0/-1 bytes: 176/-112 (64)
+                 function                                   old     new   delta
+                 hash_command                                 -     176    +176
+                 hash_algo                                   16       -     -16
+                 do_mem_crc                                 184      88     -96
+...
+
+
+This shows that commit 19 has reduced codesize for arm slightly and increased
+it for powerpc. This increase was offset in by reductions in rodata and
+data/bss.
+
+Shown below the summary lines are the sizes for each board. Below each board
+are the sizes for each function. This information starts with:
+
+   add - number of functions added / removed
+   grow - number of functions which grew / shrunk
+   bytes - number of bytes of code added to / removed from all functions,
+            plus the total byte change in brackets
+
+The change seems to be that hash_command() has increased by more than the
+do_mem_crc() function has decreased. The function sizes typically add up to
+roughly the text area size, but note that every read-only section except
+rodata is included in 'text', so the function total does not exactly
+correspond.
+
+It is common when refactoring code for the rodata to decrease as the text size
+increases, and vice versa.
+
+
+The .buildman file
+==================
+
+The .buildman file provides information about the available toolchains and
+also allows build flags to be passed to 'make'. It consists of several
+sections, with the section name in square brackets. Within each section are
+a set of (tag, value) pairs.
+
+'[toolchain]' section
+
+    This lists the available toolchains. The tag here doesn't matter, but
+    make sure it is unique. The value is the path to the toolchain. Buildman
+    will look in that path for a file ending in 'gcc'. It will then execute
+    it to check that it is a C compiler, passing only the --version flag to
+    it. If the return code is 0, buildman assumes that it is a valid C
+    compiler. It uses the first part of the name as the architecture and
+    strips off the last part when setting the CROSS_COMPILE environment
+    variable (parts are delimited with a hyphen).
+
+    For example powerpc-linux-gcc will be noted as a toolchain for 'powerpc'
+    and CROSS_COMPILE will be set to powerpc-linux- when using it.
+
+'[toolchain-alias]' section
+
+    This converts toolchain architecture names to U-Boot names. For example,
+    if an x86 toolchains is called i386-linux-gcc it will not normally be
+    used for architecture 'x86'. Adding 'x86: i386 x86_64' to this section
+    will tell buildman that the i386 and x86_64 toolchains can be used for
+    the x86 architecture.
+
+'[make-flags]' section
+
+    U-Boot's build system supports a few flags (such as BUILD_TAG) which
+    affect the build product. These flags can be specified in the buildman
+    settings file. They can also be useful when building U-Boot against other
+    open source software.
+
+    [make-flags]
+    at91-boards=ENABLE_AT91_TEST=1
+    snapper9260=${at91-boards} BUILD_TAG=442
+    snapper9g45=${at91-boards} BUILD_TAG=443
+
+    This will use 'make ENABLE_AT91_TEST=1 BUILD_TAG=442' for snapper9260
+    and 'make ENABLE_AT91_TEST=1 BUILD_TAG=443' for snapper9g45. A special
+    variable ${target} is available to access the target name (snapper9260
+    and snapper9g20 in this case). Variables are resolved recursively. Note
+    that variables can only contain the characters A-Z, a-z, 0-9, hyphen (-)
+    and underscore (_).
+
+    It is expected that any variables added are dealt with in U-Boot's
+    config.mk file and documented in the README.
+
+    Note that you can pass ad-hoc options to the build using environment
+    variables, for example:
+
+       SOME_OPTION=1234 ./tools/buildman/buildman my_board
+
+
+Quick Sanity Check
+==================
+
+If you have made changes and want to do a quick sanity check of the
+currently checked-out source, run buildman without the -b flag. This will
+build the selected boards and display build status as it runs (i.e. -v is
+enabled automatically). Use -e to see errors/warnings as well.
+
+
+Building Ranges
+===============
+
+You can build a range of commits by specifying a range instead of a branch
+when using the -b flag. For example:
+
+    upstream/master..us-buildman
+
+will build commits in us-buildman that are not in upstream/master.
+
+
+Building Faster
+===============
+
+By default, buildman executes 'make mrproper' prior to building the first
+commit for each board. This causes everything to be built from scratch. If you
+trust the build system's incremental build capabilities, you can pass the -I
+flag to skip the 'make mproper' invocation, which will reduce the amount of
+work 'make' does, and hence speed up the build. This flag will speed up any
+buildman invocation, since it reduces the amount of work done on any build.
+
+One possible application of buildman is as part of a continual edit, build,
+edit, build, ... cycle; repeatedly applying buildman to the same change or
+series of changes while making small incremental modifications to the source
+each time. This provides quick feedback regarding the correctness of recent
+modifications. In this scenario, buildman's default choice of build directory
+causes more build work to be performed than strictly necessary.
+
+By default, each buildman thread uses a single directory for all builds. When a
+thread builds multiple boards, the configuration built in this directory will
+cycle through various different configurations, one per board built by the
+thread. Variations in the configuration will force a rebuild of affected source
+files when a thread switches between boards. Ideally, such buildman-induced
+rebuilds would not happen, thus allowing the build to operate as efficiently as
+the build system and source changes allow. buildman's -P flag may be used to
+enable this; -P causes each board to be built in a separate (board-specific)
+directory, thus avoiding any buildman-induced configuration changes in any
+build directory.
+
+U-Boot's build system embeds information such as a build timestamp into the
+final binary. This information varies each time U-Boot is built. This causes
+various files to be rebuilt even if no source changes are made, which in turn
+requires that the final U-Boot binary be re-linked. This unnecessary work can
+be avoided by turning off the timestamp feature. This can be achieved by
+setting the SOURCE_DATE_EPOCH environment variable to 0.
+
+Combining all of these options together yields the command-line shown below.
+This will provide the quickest possible feedback regarding the current content
+of the source tree, thus allowing rapid tested evolution of the code.
+
+    SOURCE_DATE_EPOCH=0 ./tools/buildman/buildman -I -P tegra
+
+
+Checking configuration
+======================
+
+A common requirement when converting CONFIG options to Kconfig is to check
+that the effective configuration has not changed due to the conversion.
+Buildman supports this with the -K option, used after a build. This shows
+differences in effective configuration between one commit and the next.
+
+For example:
+
+    $ buildman -b kc4 -sK
+    ...
+    43: Convert CONFIG_SPL_USBETH_SUPPORT to Kconfig
+    arm:
+    + u-boot.cfg: CONFIG_SPL_ENV_SUPPORT=1 CONFIG_SPL_NET_SUPPORT=1
+    + u-boot-spl.cfg: CONFIG_SPL_MMC_SUPPORT=1 CONFIG_SPL_NAND_SUPPORT=1
+    + all: CONFIG_SPL_ENV_SUPPORT=1 CONFIG_SPL_MMC_SUPPORT=1 CONFIG_SPL_NAND_SUPPORT=1 CONFIG_SPL_NET_SUPPORT=1
+    am335x_evm_usbspl :
+    + u-boot.cfg: CONFIG_SPL_ENV_SUPPORT=1 CONFIG_SPL_NET_SUPPORT=1
+    + u-boot-spl.cfg: CONFIG_SPL_MMC_SUPPORT=1 CONFIG_SPL_NAND_SUPPORT=1
+    + all: CONFIG_SPL_ENV_SUPPORT=1 CONFIG_SPL_MMC_SUPPORT=1 CONFIG_SPL_NAND_SUPPORT=1 CONFIG_SPL_NET_SUPPORT=1
+    44: Convert CONFIG_SPL_USB_HOST_SUPPORT to Kconfig
+    ...
+
+This shows that commit 44 enabled three new options for the board
+am335x_evm_usbspl which were not enabled in commit 43. There is also a
+summary for 'arm' showing all the changes detected for that architecture.
+In this case there is only one board with changes, so 'arm' output is the
+same as 'am335x_evm_usbspl'/
+
+The -K option uses the u-boot.cfg, spl/u-boot-spl.cfg and tpl/u-boot-tpl.cfg
+files which are produced by a build. If all you want is to check the
+configuration you can in fact avoid doing a full build, using -D. This tells
+buildman to configuration U-Boot and create the .cfg files, but not actually
+build the source. This is 5-10 times faster than doing a full build.
+
+By default buildman considers the follow two configuration methods
+equivalent:
+
+   #define CONFIG_SOME_OPTION
+
+   CONFIG_SOME_OPTION=y
+
+The former would appear in a header filer and the latter in a defconfig
+file. The achieve this, buildman considers 'y' to be '1' in configuration
+variables. This avoids lots of useless output when converting a CONFIG
+option to Kconfig. To disable this behaviour, use --squash-config-y.
+
+
+Checking the environment
+========================
+
+When converting CONFIG options which manipulate the default environment,
+a common requirement is to check that the default environment has not
+changed due to the conversion. Buildman supports this with the -U option,
+used after a build. This shows differences in the default environment
+between one commit and the next.
+
+For example:
+
+$ buildman -b squash brppt1 -sU
+boards.cfg is up to date. Nothing to do.
+Summary of 2 commits for 3 boards (3 threads, 3 jobs per thread)
+01: Migrate bootlimit to Kconfig
+02: Squashed commit of the following:
+   c brppt1_mmc: altbootcmd=mmc dev 1; run mmcboot0; -> mmc dev 1; run mmcboot0
+   c brppt1_spi: altbootcmd=mmc dev 1; run mmcboot0; -> mmc dev 1; run mmcboot0
+   + brppt1_nand: altbootcmd=run usbscript
+   - brppt1_nand:  altbootcmd=run usbscript
+(no errors to report)
+
+This shows that commit 2 modified the value of 'altbootcmd' for 'brppt1_mmc'
+and 'brppt1_spi', removing a trailing semicolon. 'brppt1_nand' gained an a
+value for 'altbootcmd', but lost one for ' altbootcmd'.
+
+The -U option uses the u-boot.env files which are produced by a build.
+
+Other options
+=============
+
+Buildman has various other command line options. Try --help to see them.
+
+When doing builds, Buildman's return code will reflect the overall result:
+
+    0 (success)     No errors or warnings found
+    128             Errors found
+    129             Warnings found
+
+
+How to change from MAKEALL
+==========================
+
+Buildman includes most of the features of MAKEALL and is generally faster
+and easier to use. In particular it builds entire branches: if a particular
+commit introduces an error in a particular board, buildman can easily show
+you this, even if a later commit fixes that error.
+
+The reasons to deprecate MAKEALL are:
+- We don't want to maintain two build systems
+- Buildman is typically faster
+- Buildman has a lot more features
+
+But still, many people will be sad to lose MAKEALL. If you are used to
+MAKEALL, here are a few pointers.
+
+First you need to set up your tool chains - see the 'Setting up' section
+for details. Once you have your required toolchain(s) detected then you are
+ready to go.
+
+To build the current source tree, run buildman without a -b flag:
+
+   ./tools/buildman/buildman <list of things to build>
+
+This will build the current source tree for the given boards and display
+the results and errors.
+
+However buildman usually works on entire branches, and for that you must
+specify a board flag:
+
+   ./tools/buildman/buildman -b <branch_name> <list of things to build>
+
+followed by (afterwards, or perhaps concurrently in another terminal):
+
+   ./tools/buildman/buildman -b <branch_name> -s <list of things to build>
+
+to see the results of the build. Rather than showing you all the output,
+buildman just shows a summary, with red indicating that a commit introduced
+an error and green indicating that a commit fixed an error. Use the -e
+flag to see the full errors and -l to see which boards caused which errors.
+
+If you really want to see build results as they happen, use -v when doing a
+build (and -e to see the errors/warnings too).
+
+You don't need to stick around on that branch while buildman is running. It
+checks out its own copy of the source code, so you can change branches,
+add commits, etc. without affecting the build in progress.
+
+The <list of things to build> can include board names, architectures or the
+like. There are no flags to disambiguate since ambiguities are rare. Using
+the examples from MAKEALL:
+
+Examples:
+  - build all Power Architecture boards:
+      MAKEALL -a powerpc
+      MAKEALL --arch powerpc
+      MAKEALL powerpc
+          ** buildman -b <branch> powerpc
+  - build all PowerPC boards manufactured by vendor "esd":
+      MAKEALL -a powerpc -v esd
+          ** buildman -b <branch> esd
+  - build all PowerPC boards manufactured either by "keymile" or "siemens":
+      MAKEALL -a powerpc -v keymile -v siemens
+          ** buildman -b <branch> keymile siemens
+  - build all Freescale boards with MPC83xx CPUs, plus all 4xx boards:
+      MAKEALL -c mpc83xx -v freescale 4xx
+          ** buildman -b <branch> mpc83xx freescale 4xx
+
+Buildman automatically tries to use all the CPUs in your machine. If you
+are building a lot of boards it will use one thread for every CPU core
+it detects in your machine. This is like MAKEALL's BUILD_NBUILDS option.
+You can use the -T flag to change the number of threads. If you are only
+building a few boards, buildman will automatically run make with the -j
+flag to increase the number of concurrent make tasks. It isn't normally
+that helpful to fiddle with this option, but if you use the BUILD_NCPUS
+option in MAKEALL then -j is the equivalent in buildman.
+
+Buildman puts its output in ../<branch_name> by default but you can change
+this with the -o option. Buildman normally does out-of-tree builds: use -i
+to disable that if you really want to. But be careful that once you have
+used -i you pollute buildman's copies of the source tree, and you will need
+to remove the build directory (normally ../<branch_name>) to run buildman
+in normal mode (without -i).
+
+Buildman doesn't keep the output result normally, but use the -k option to
+do this.
+
+Please read 'Theory of Operation' a few times as it will make a lot of
+things clearer.
+
+Some options you might like are:
+
+   -B shows which functions are growing/shrinking in which commit - great
+        for finding code bloat.
+   -S shows image sizes for each commit (just an overall summary)
+   -u shows boards that you haven't built yet
+   --step 0 will build just the upstream commit and the last commit of your
+        branch. This is often a quick sanity check that your branch doesn't
+        break anything. But note this does not check bisectability!
+
+
+TODO
+====
+
+This has mostly be written in my spare time as a response to my difficulties
+in testing large series of patches. Apart from tidying up there is quite a
+bit of scope for improvement. Things like better error diffs and easier
+access to log files. Also it would be nice if buildman could 'hunt' for
+problems, perhaps by building a few boards for each arch, or checking
+commits for changed files and building only boards which use those files.
+
+A specific problem to fix is that Ctrl-C does not exit buildman cleanly when
+multiple builder threads are active.
+
+Credits
+=======
+
+Thanks to Grant Grundler <grundler@chromium.org> for his ideas for improving
+the build speed by building all commits for a board instead of the other
+way around.
+
+
+Simon Glass
+sjg@chromium.org
+Halloween 2012
+Updated 12-12-12
+Updated 23-02-13
diff --git a/tools/u-boot-tools/buildman/board.py b/tools/u-boot-tools/buildman/board.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a1d021574c1396098dafd41689e62311aa9f289
--- /dev/null
+++ b/tools/u-boot-tools/buildman/board.py
@@ -0,0 +1,309 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+
+import re
+
+class Expr:
+    """A single regular expression for matching boards to build"""
+
+    def __init__(self, expr):
+        """Set up a new Expr object.
+
+        Args:
+            expr: String cotaining regular expression to store
+        """
+        self._expr = expr
+        self._re = re.compile(expr)
+
+    def Matches(self, props):
+        """Check if any of the properties match the regular expression.
+
+        Args:
+           props: List of properties to check
+        Returns:
+           True if any of the properties match the regular expression
+        """
+        for prop in props:
+            if self._re.match(prop):
+                return True
+        return False
+
+    def __str__(self):
+        return self._expr
+
+class Term:
+    """A list of expressions each of which must match with properties.
+
+    This provides a list of 'AND' expressions, meaning that each must
+    match the board properties for that board to be built.
+    """
+    def __init__(self):
+        self._expr_list = []
+        self._board_count = 0
+
+    def AddExpr(self, expr):
+        """Add an Expr object to the list to check.
+
+        Args:
+            expr: New Expr object to add to the list of those that must
+                  match for a board to be built.
+        """
+        self._expr_list.append(Expr(expr))
+
+    def __str__(self):
+        """Return some sort of useful string describing the term"""
+        return '&'.join([str(expr) for expr in self._expr_list])
+
+    def Matches(self, props):
+        """Check if any of the properties match this term
+
+        Each of the expressions in the term is checked. All must match.
+
+        Args:
+           props: List of properties to check
+        Returns:
+           True if all of the expressions in the Term match, else False
+        """
+        for expr in self._expr_list:
+            if not expr.Matches(props):
+                return False
+        return True
+
+class Board:
+    """A particular board that we can build"""
+    def __init__(self, status, arch, cpu, soc, vendor, board_name, target, options):
+        """Create a new board type.
+
+        Args:
+            status: define whether the board is 'Active' or 'Orphaned'
+            arch: Architecture name (e.g. arm)
+            cpu: Cpu name (e.g. arm1136)
+            soc: Name of SOC, or '' if none (e.g. mx31)
+            vendor: Name of vendor (e.g. armltd)
+            board_name: Name of board (e.g. integrator)
+            target: Target name (use make <target>_defconfig to configure)
+            options: board-specific options (e.g. integratorcp:CM1136)
+        """
+        self.target = target
+        self.arch = arch
+        self.cpu = cpu
+        self.board_name = board_name
+        self.vendor = vendor
+        self.soc = soc
+        self.options = options
+        self.props = [self.target, self.arch, self.cpu, self.board_name,
+                      self.vendor, self.soc, self.options]
+        self.build_it = False
+
+
+class Boards:
+    """Manage a list of boards."""
+    def __init__(self):
+        # Use a simple list here, sinc OrderedDict requires Python 2.7
+        self._boards = []
+
+    def AddBoard(self, board):
+        """Add a new board to the list.
+
+        The board's target member must not already exist in the board list.
+
+        Args:
+            board: board to add
+        """
+        self._boards.append(board)
+
+    def ReadBoards(self, fname):
+        """Read a list of boards from a board file.
+
+        Create a board object for each and add it to our _boards list.
+
+        Args:
+            fname: Filename of boards.cfg file
+        """
+        with open(fname, 'r') as fd:
+            for line in fd:
+                if line[0] == '#':
+                    continue
+                fields = line.split()
+                if not fields:
+                    continue
+                for upto in range(len(fields)):
+                    if fields[upto] == '-':
+                        fields[upto] = ''
+                while len(fields) < 8:
+                    fields.append('')
+                if len(fields) > 8:
+                    fields = fields[:8]
+
+                board = Board(*fields)
+                self.AddBoard(board)
+
+
+    def GetList(self):
+        """Return a list of available boards.
+
+        Returns:
+            List of Board objects
+        """
+        return self._boards
+
+    def GetDict(self):
+        """Build a dictionary containing all the boards.
+
+        Returns:
+            Dictionary:
+                key is board.target
+                value is board
+        """
+        board_dict = {}
+        for board in self._boards:
+            board_dict[board.target] = board
+        return board_dict
+
+    def GetSelectedDict(self):
+        """Return a dictionary containing the selected boards
+
+        Returns:
+            List of Board objects that are marked selected
+        """
+        board_dict = {}
+        for board in self._boards:
+            if board.build_it:
+                board_dict[board.target] = board
+        return board_dict
+
+    def GetSelected(self):
+        """Return a list of selected boards
+
+        Returns:
+            List of Board objects that are marked selected
+        """
+        return [board for board in self._boards if board.build_it]
+
+    def GetSelectedNames(self):
+        """Return a list of selected boards
+
+        Returns:
+            List of board names that are marked selected
+        """
+        return [board.target for board in self._boards if board.build_it]
+
+    def _BuildTerms(self, args):
+        """Convert command line arguments to a list of terms.
+
+        This deals with parsing of the arguments. It handles the '&'
+        operator, which joins several expressions into a single Term.
+
+        For example:
+            ['arm & freescale sandbox', 'tegra']
+
+        will produce 3 Terms containing expressions as follows:
+            arm, freescale
+            sandbox
+            tegra
+
+        The first Term has two expressions, both of which must match for
+        a board to be selected.
+
+        Args:
+            args: List of command line arguments
+        Returns:
+            A list of Term objects
+        """
+        syms = []
+        for arg in args:
+            for word in arg.split():
+                sym_build = []
+                for term in word.split('&'):
+                    if term:
+                        sym_build.append(term)
+                    sym_build.append('&')
+                syms += sym_build[:-1]
+        terms = []
+        term = None
+        oper = None
+        for sym in syms:
+            if sym == '&':
+                oper = sym
+            elif oper:
+                term.AddExpr(sym)
+                oper = None
+            else:
+                if term:
+                    terms.append(term)
+                term = Term()
+                term.AddExpr(sym)
+        if term:
+            terms.append(term)
+        return terms
+
+    def SelectBoards(self, args, exclude=[], boards=None):
+        """Mark boards selected based on args
+
+        Normally either boards (an explicit list of boards) or args (a list of
+        terms to match against) is used. It is possible to specify both, in
+        which case they are additive.
+
+        If boards and args are both empty, all boards are selected.
+
+        Args:
+            args: List of strings specifying boards to include, either named,
+                  or by their target, architecture, cpu, vendor or soc. If
+                  empty, all boards are selected.
+            exclude: List of boards to exclude, regardless of 'args'
+            boards: List of boards to build
+
+        Returns:
+            Tuple
+                Dictionary which holds the list of boards which were selected
+                    due to each argument, arranged by argument.
+                List of errors found
+        """
+        result = {}
+        warnings = []
+        terms = self._BuildTerms(args)
+
+        result['all'] = []
+        for term in terms:
+            result[str(term)] = []
+
+        exclude_list = []
+        for expr in exclude:
+            exclude_list.append(Expr(expr))
+
+        found = []
+        for board in self._boards:
+            matching_term = None
+            build_it = False
+            if terms:
+                match = False
+                for term in terms:
+                    if term.Matches(board.props):
+                        matching_term = str(term)
+                        build_it = True
+                        break
+            elif boards:
+                if board.target in boards:
+                    build_it = True
+                    found.append(board.target)
+            else:
+                build_it = True
+
+            # Check that it is not specifically excluded
+            for expr in exclude_list:
+                if expr.Matches(board.props):
+                    build_it = False
+                    break
+
+            if build_it:
+                board.build_it = True
+                if matching_term:
+                    result[matching_term].append(board.target)
+                result['all'].append(board.target)
+
+        if boards:
+            remaining = set(boards) - set(found)
+            if remaining:
+                warnings.append('Boards not found: %s\n' % ', '.join(remaining))
+
+        return result, warnings
diff --git a/tools/u-boot-tools/buildman/bsettings.py b/tools/u-boot-tools/buildman/bsettings.py
new file mode 100644
index 0000000000000000000000000000000000000000..03d7439aa5436f5ee143a749808ec6bb2e94830d
--- /dev/null
+++ b/tools/u-boot-tools/buildman/bsettings.py
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+
+import ConfigParser
+import os
+import StringIO
+
+
+def Setup(fname=''):
+    """Set up the buildman settings module by reading config files
+
+    Args:
+        config_fname:   Config filename to read ('' for default)
+    """
+    global settings
+    global config_fname
+
+    settings = ConfigParser.SafeConfigParser()
+    if fname is not None:
+        config_fname = fname
+        if config_fname == '':
+            config_fname = '%s/.buildman' % os.getenv('HOME')
+        if not os.path.exists(config_fname):
+            print 'No config file found ~/.buildman\nCreating one...\n'
+            CreateBuildmanConfigFile(config_fname)
+            print 'To install tool chains, please use the --fetch-arch option'
+        if config_fname:
+            settings.read(config_fname)
+
+def AddFile(data):
+    settings.readfp(StringIO.StringIO(data))
+
+def GetItems(section):
+    """Get the items from a section of the config.
+
+    Args:
+        section: name of section to retrieve
+
+    Returns:
+        List of (name, value) tuples for the section
+    """
+    try:
+        return settings.items(section)
+    except ConfigParser.NoSectionError as e:
+        return []
+    except:
+        raise
+
+def SetItem(section, tag, value):
+    """Set an item and write it back to the settings file"""
+    global settings
+    global config_fname
+
+    settings.set(section, tag, value)
+    if config_fname is not None:
+        with open(config_fname, 'w') as fd:
+            settings.write(fd)
+
+def CreateBuildmanConfigFile(config_fname):
+    """Creates a new config file with no tool chain information.
+
+    Args:
+        config_fname: Config filename to create
+
+    Returns:
+        None
+    """
+    try:
+        f = open(config_fname, 'w')
+    except IOError:
+        print "Couldn't create buildman config file '%s'\n" % config_fname
+        raise
+
+    print >>f, '''[toolchain]
+# name = path
+# e.g. x86 = /opt/gcc-4.6.3-nolibc/x86_64-linux
+
+[toolchain-prefix]
+# name = path to prefix
+# e.g. x86 = /opt/gcc-4.6.3-nolibc/x86_64-linux/bin/x86_64-linux-
+
+[toolchain-alias]
+# arch = alias
+# Indicates which toolchain should be used to build for that arch
+x86 = i386
+blackfin = bfin
+nds32 = nds32le
+openrisc = or1k
+
+[make-flags]
+# Special flags to pass to 'make' for certain boards, e.g. to pass a test
+# flag and build tag to snapper boards:
+# snapper-boards=ENABLE_AT91_TEST=1
+# snapper9260=${snapper-boards} BUILD_TAG=442
+# snapper9g45=${snapper-boards} BUILD_TAG=443
+'''
+    f.close();
diff --git a/tools/u-boot-tools/buildman/builder.py b/tools/u-boot-tools/buildman/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a6c83bf3368dfb48fa3a2a846d11002dcb45158
--- /dev/null
+++ b/tools/u-boot-tools/buildman/builder.py
@@ -0,0 +1,1582 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2013 The Chromium OS Authors.
+#
+# Bloat-o-meter code used here Copyright 2004 Matt Mackall <mpm@selenic.com>
+#
+
+import collections
+from datetime import datetime, timedelta
+import glob
+import os
+import re
+import Queue
+import shutil
+import signal
+import string
+import sys
+import threading
+import time
+
+import builderthread
+import command
+import gitutil
+import terminal
+from terminal import Print
+import toolchain
+
+
+"""
+Theory of Operation
+
+Please see README for user documentation, and you should be familiar with
+that before trying to make sense of this.
+
+Buildman works by keeping the machine as busy as possible, building different
+commits for different boards on multiple CPUs at once.
+
+The source repo (self.git_dir) contains all the commits to be built. Each
+thread works on a single board at a time. It checks out the first commit,
+configures it for that board, then builds it. Then it checks out the next
+commit and builds it (typically without re-configuring). When it runs out
+of commits, it gets another job from the builder and starts again with that
+board.
+
+Clearly the builder threads could work either way - they could check out a
+commit and then built it for all boards. Using separate directories for each
+commit/board pair they could leave their build product around afterwards
+also.
+
+The intent behind building a single board for multiple commits, is to make
+use of incremental builds. Since each commit is built incrementally from
+the previous one, builds are faster. Reconfiguring for a different board
+removes all intermediate object files.
+
+Many threads can be working at once, but each has its own working directory.
+When a thread finishes a build, it puts the output files into a result
+directory.
+
+The base directory used by buildman is normally '../<branch>', i.e.
+a directory higher than the source repository and named after the branch
+being built.
+
+Within the base directory, we have one subdirectory for each commit. Within
+that is one subdirectory for each board. Within that is the build output for
+that commit/board combination.
+
+Buildman also create working directories for each thread, in a .bm-work/
+subdirectory in the base dir.
+
+As an example, say we are building branch 'us-net' for boards 'sandbox' and
+'seaboard', and say that us-net has two commits. We will have directories
+like this:
+
+us-net/             base directory
+    01_of_02_g4ed4ebc_net--Add-tftp-speed-/
+        sandbox/
+            u-boot.bin
+        seaboard/
+            u-boot.bin
+    02_of_02_g4ed4ebc_net--Check-tftp-comp/
+        sandbox/
+            u-boot.bin
+        seaboard/
+            u-boot.bin
+    .bm-work/
+        00/         working directory for thread 0 (contains source checkout)
+            build/  build output
+        01/         working directory for thread 1
+            build/  build output
+        ...
+u-boot/             source directory
+    .git/           repository
+"""
+
+# Possible build outcomes
+OUTCOME_OK, OUTCOME_WARNING, OUTCOME_ERROR, OUTCOME_UNKNOWN = range(4)
+
+# Translate a commit subject into a valid filename (and handle unicode)
+trans_valid_chars = string.maketrans('/: ', '---')
+trans_valid_chars = trans_valid_chars.decode('latin-1')
+
+BASE_CONFIG_FILENAMES = [
+    'u-boot.cfg', 'u-boot-spl.cfg', 'u-boot-tpl.cfg'
+]
+
+EXTRA_CONFIG_FILENAMES = [
+    '.config', '.config-spl', '.config-tpl',
+    'autoconf.mk', 'autoconf-spl.mk', 'autoconf-tpl.mk',
+    'autoconf.h', 'autoconf-spl.h','autoconf-tpl.h',
+]
+
+class Config:
+    """Holds information about configuration settings for a board."""
+    def __init__(self, config_filename, target):
+        self.target = target
+        self.config = {}
+        for fname in config_filename:
+            self.config[fname] = {}
+
+    def Add(self, fname, key, value):
+        self.config[fname][key] = value
+
+    def __hash__(self):
+        val = 0
+        for fname in self.config:
+            for key, value in self.config[fname].iteritems():
+                print key, value
+                val = val ^ hash(key) & hash(value)
+        return val
+
+class Environment:
+    """Holds information about environment variables for a board."""
+    def __init__(self, target):
+        self.target = target
+        self.environment = {}
+
+    def Add(self, key, value):
+        self.environment[key] = value
+
+class Builder:
+    """Class for building U-Boot for a particular commit.
+
+    Public members: (many should ->private)
+        already_done: Number of builds already completed
+        base_dir: Base directory to use for builder
+        checkout: True to check out source, False to skip that step.
+            This is used for testing.
+        col: terminal.Color() object
+        count: Number of commits to build
+        do_make: Method to call to invoke Make
+        fail: Number of builds that failed due to error
+        force_build: Force building even if a build already exists
+        force_config_on_failure: If a commit fails for a board, disable
+            incremental building for the next commit we build for that
+            board, so that we will see all warnings/errors again.
+        force_build_failures: If a previously-built build (i.e. built on
+            a previous run of buildman) is marked as failed, rebuild it.
+        git_dir: Git directory containing source repository
+        last_line_len: Length of the last line we printed (used for erasing
+            it with new progress information)
+        num_jobs: Number of jobs to run at once (passed to make as -j)
+        num_threads: Number of builder threads to run
+        out_queue: Queue of results to process
+        re_make_err: Compiled regular expression for ignore_lines
+        queue: Queue of jobs to run
+        threads: List of active threads
+        toolchains: Toolchains object to use for building
+        upto: Current commit number we are building (0.count-1)
+        warned: Number of builds that produced at least one warning
+        force_reconfig: Reconfigure U-Boot on each comiit. This disables
+            incremental building, where buildman reconfigures on the first
+            commit for a baord, and then just does an incremental build for
+            the following commits. In fact buildman will reconfigure and
+            retry for any failing commits, so generally the only effect of
+            this option is to slow things down.
+        in_tree: Build U-Boot in-tree instead of specifying an output
+            directory separate from the source code. This option is really
+            only useful for testing in-tree builds.
+
+    Private members:
+        _base_board_dict: Last-summarised Dict of boards
+        _base_err_lines: Last-summarised list of errors
+        _base_warn_lines: Last-summarised list of warnings
+        _build_period_us: Time taken for a single build (float object).
+        _complete_delay: Expected delay until completion (timedelta)
+        _next_delay_update: Next time we plan to display a progress update
+                (datatime)
+        _show_unknown: Show unknown boards (those not built) in summary
+        _timestamps: List of timestamps for the completion of the last
+            last _timestamp_count builds. Each is a datetime object.
+        _timestamp_count: Number of timestamps to keep in our list.
+        _working_dir: Base working directory containing all threads
+    """
+    class Outcome:
+        """Records a build outcome for a single make invocation
+
+        Public Members:
+            rc: Outcome value (OUTCOME_...)
+            err_lines: List of error lines or [] if none
+            sizes: Dictionary of image size information, keyed by filename
+                - Each value is itself a dictionary containing
+                    values for 'text', 'data' and 'bss', being the integer
+                    size in bytes of each section.
+            func_sizes: Dictionary keyed by filename - e.g. 'u-boot'. Each
+                    value is itself a dictionary:
+                        key: function name
+                        value: Size of function in bytes
+            config: Dictionary keyed by filename - e.g. '.config'. Each
+                    value is itself a dictionary:
+                        key: config name
+                        value: config value
+            environment: Dictionary keyed by environment variable, Each
+                     value is the value of environment variable.
+        """
+        def __init__(self, rc, err_lines, sizes, func_sizes, config,
+                     environment):
+            self.rc = rc
+            self.err_lines = err_lines
+            self.sizes = sizes
+            self.func_sizes = func_sizes
+            self.config = config
+            self.environment = environment
+
+    def __init__(self, toolchains, base_dir, git_dir, num_threads, num_jobs,
+                 gnu_make='make', checkout=True, show_unknown=True, step=1,
+                 no_subdirs=False, full_path=False, verbose_build=False,
+                 incremental=False, per_board_out_dir=False,
+                 config_only=False, squash_config_y=False,
+                 warnings_as_errors=False):
+        """Create a new Builder object
+
+        Args:
+            toolchains: Toolchains object to use for building
+            base_dir: Base directory to use for builder
+            git_dir: Git directory containing source repository
+            num_threads: Number of builder threads to run
+            num_jobs: Number of jobs to run at once (passed to make as -j)
+            gnu_make: the command name of GNU Make.
+            checkout: True to check out source, False to skip that step.
+                This is used for testing.
+            show_unknown: Show unknown boards (those not built) in summary
+            step: 1 to process every commit, n to process every nth commit
+            no_subdirs: Don't create subdirectories when building current
+                source for a single board
+            full_path: Return the full path in CROSS_COMPILE and don't set
+                PATH
+            verbose_build: Run build with V=1 and don't use 'make -s'
+            incremental: Always perform incremental builds; don't run make
+                mrproper when configuring
+            per_board_out_dir: Build in a separate persistent directory per
+                board rather than a thread-specific directory
+            config_only: Only configure each build, don't build it
+            squash_config_y: Convert CONFIG options with the value 'y' to '1'
+            warnings_as_errors: Treat all compiler warnings as errors
+        """
+        self.toolchains = toolchains
+        self.base_dir = base_dir
+        self._working_dir = os.path.join(base_dir, '.bm-work')
+        self.threads = []
+        self.do_make = self.Make
+        self.gnu_make = gnu_make
+        self.checkout = checkout
+        self.num_threads = num_threads
+        self.num_jobs = num_jobs
+        self.already_done = 0
+        self.force_build = False
+        self.git_dir = git_dir
+        self._show_unknown = show_unknown
+        self._timestamp_count = 10
+        self._build_period_us = None
+        self._complete_delay = None
+        self._next_delay_update = datetime.now()
+        self.force_config_on_failure = True
+        self.force_build_failures = False
+        self.force_reconfig = False
+        self._step = step
+        self.in_tree = False
+        self._error_lines = 0
+        self.no_subdirs = no_subdirs
+        self.full_path = full_path
+        self.verbose_build = verbose_build
+        self.config_only = config_only
+        self.squash_config_y = squash_config_y
+        self.config_filenames = BASE_CONFIG_FILENAMES
+        if not self.squash_config_y:
+            self.config_filenames += EXTRA_CONFIG_FILENAMES
+
+        self.warnings_as_errors = warnings_as_errors
+        self.col = terminal.Color()
+
+        self._re_function = re.compile('(.*): In function.*')
+        self._re_files = re.compile('In file included from.*')
+        self._re_warning = re.compile('(.*):(\d*):(\d*): warning: .*')
+        self._re_dtb_warning = re.compile('(.*): Warning .*')
+        self._re_note = re.compile('(.*):(\d*):(\d*): note: this is the location of the previous.*')
+
+        self.queue = Queue.Queue()
+        self.out_queue = Queue.Queue()
+        for i in range(self.num_threads):
+            t = builderthread.BuilderThread(self, i, incremental,
+                    per_board_out_dir)
+            t.setDaemon(True)
+            t.start()
+            self.threads.append(t)
+
+        self.last_line_len = 0
+        t = builderthread.ResultThread(self)
+        t.setDaemon(True)
+        t.start()
+        self.threads.append(t)
+
+        ignore_lines = ['(make.*Waiting for unfinished)', '(Segmentation fault)']
+        self.re_make_err = re.compile('|'.join(ignore_lines))
+
+        # Handle existing graceful with SIGINT / Ctrl-C
+        signal.signal(signal.SIGINT, self.signal_handler)
+
+    def __del__(self):
+        """Get rid of all threads created by the builder"""
+        for t in self.threads:
+            del t
+
+    def signal_handler(self, signal, frame):
+        sys.exit(1)
+
+    def SetDisplayOptions(self, show_errors=False, show_sizes=False,
+                          show_detail=False, show_bloat=False,
+                          list_error_boards=False, show_config=False,
+                          show_environment=False):
+        """Setup display options for the builder.
+
+        show_errors: True to show summarised error/warning info
+        show_sizes: Show size deltas
+        show_detail: Show detail for each board
+        show_bloat: Show detail for each function
+        list_error_boards: Show the boards which caused each error/warning
+        show_config: Show config deltas
+        show_environment: Show environment deltas
+        """
+        self._show_errors = show_errors
+        self._show_sizes = show_sizes
+        self._show_detail = show_detail
+        self._show_bloat = show_bloat
+        self._list_error_boards = list_error_boards
+        self._show_config = show_config
+        self._show_environment = show_environment
+
+    def _AddTimestamp(self):
+        """Add a new timestamp to the list and record the build period.
+
+        The build period is the length of time taken to perform a single
+        build (one board, one commit).
+        """
+        now = datetime.now()
+        self._timestamps.append(now)
+        count = len(self._timestamps)
+        delta = self._timestamps[-1] - self._timestamps[0]
+        seconds = delta.total_seconds()
+
+        # If we have enough data, estimate build period (time taken for a
+        # single build) and therefore completion time.
+        if count > 1 and self._next_delay_update < now:
+            self._next_delay_update = now + timedelta(seconds=2)
+            if seconds > 0:
+                self._build_period = float(seconds) / count
+                todo = self.count - self.upto
+                self._complete_delay = timedelta(microseconds=
+                        self._build_period * todo * 1000000)
+                # Round it
+                self._complete_delay -= timedelta(
+                        microseconds=self._complete_delay.microseconds)
+
+        if seconds > 60:
+            self._timestamps.popleft()
+            count -= 1
+
+    def ClearLine(self, length):
+        """Clear any characters on the current line
+
+        Make way for a new line of length 'length', by outputting enough
+        spaces to clear out the old line. Then remember the new length for
+        next time.
+
+        Args:
+            length: Length of new line, in characters
+        """
+        if length < self.last_line_len:
+            Print(' ' * (self.last_line_len - length), newline=False)
+            Print('\r', newline=False)
+        self.last_line_len = length
+        sys.stdout.flush()
+
+    def SelectCommit(self, commit, checkout=True):
+        """Checkout the selected commit for this build
+        """
+        self.commit = commit
+        if checkout and self.checkout:
+            gitutil.Checkout(commit.hash)
+
+    def Make(self, commit, brd, stage, cwd, *args, **kwargs):
+        """Run make
+
+        Args:
+            commit: Commit object that is being built
+            brd: Board object that is being built
+            stage: Stage that we are at (mrproper, config, build)
+            cwd: Directory where make should be run
+            args: Arguments to pass to make
+            kwargs: Arguments to pass to command.RunPipe()
+        """
+        cmd = [self.gnu_make] + list(args)
+        result = command.RunPipe([cmd], capture=True, capture_stderr=True,
+                cwd=cwd, raise_on_error=False, infile='/dev/null', **kwargs)
+        if self.verbose_build:
+            result.stdout = '%s\n' % (' '.join(cmd)) + result.stdout
+            result.combined = '%s\n' % (' '.join(cmd)) + result.combined
+        return result
+
+    def ProcessResult(self, result):
+        """Process the result of a build, showing progress information
+
+        Args:
+            result: A CommandResult object, which indicates the result for
+                    a single build
+        """
+        col = terminal.Color()
+        if result:
+            target = result.brd.target
+
+            self.upto += 1
+            if result.return_code != 0:
+                self.fail += 1
+            elif result.stderr:
+                self.warned += 1
+            if result.already_done:
+                self.already_done += 1
+            if self._verbose:
+                Print('\r', newline=False)
+                self.ClearLine(0)
+                boards_selected = {target : result.brd}
+                self.ResetResultSummary(boards_selected)
+                self.ProduceResultSummary(result.commit_upto, self.commits,
+                                          boards_selected)
+        else:
+            target = '(starting)'
+
+        # Display separate counts for ok, warned and fail
+        ok = self.upto - self.warned - self.fail
+        line = '\r' + self.col.Color(self.col.GREEN, '%5d' % ok)
+        line += self.col.Color(self.col.YELLOW, '%5d' % self.warned)
+        line += self.col.Color(self.col.RED, '%5d' % self.fail)
+
+        name = ' /%-5d  ' % self.count
+
+        # Add our current completion time estimate
+        self._AddTimestamp()
+        if self._complete_delay:
+            name += '%s  : ' % self._complete_delay
+        # When building all boards for a commit, we can print a commit
+        # progress message.
+        if result and result.commit_upto is None:
+            name += 'commit %2d/%-3d' % (self.commit_upto + 1,
+                    self.commit_count)
+
+        name += target
+        Print(line + name, newline=False)
+        length = 16 + len(name)
+        self.ClearLine(length)
+
+    def _GetOutputDir(self, commit_upto):
+        """Get the name of the output directory for a commit number
+
+        The output directory is typically .../<branch>/<commit>.
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+        """
+        commit_dir = None
+        if self.commits:
+            commit = self.commits[commit_upto]
+            subject = commit.subject.translate(trans_valid_chars)
+            commit_dir = ('%02d_of_%02d_g%s_%s' % (commit_upto + 1,
+                    self.commit_count, commit.hash, subject[:20]))
+        elif not self.no_subdirs:
+            commit_dir = 'current'
+        if not commit_dir:
+            return self.base_dir
+        return os.path.join(self.base_dir, commit_dir)
+
+    def GetBuildDir(self, commit_upto, target):
+        """Get the name of the build directory for a commit number
+
+        The build directory is typically .../<branch>/<commit>/<target>.
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+            target: Target name
+        """
+        output_dir = self._GetOutputDir(commit_upto)
+        return os.path.join(output_dir, target)
+
+    def GetDoneFile(self, commit_upto, target):
+        """Get the name of the done file for a commit number
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+            target: Target name
+        """
+        return os.path.join(self.GetBuildDir(commit_upto, target), 'done')
+
+    def GetSizesFile(self, commit_upto, target):
+        """Get the name of the sizes file for a commit number
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+            target: Target name
+        """
+        return os.path.join(self.GetBuildDir(commit_upto, target), 'sizes')
+
+    def GetFuncSizesFile(self, commit_upto, target, elf_fname):
+        """Get the name of the funcsizes file for a commit number and ELF file
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+            target: Target name
+            elf_fname: Filename of elf image
+        """
+        return os.path.join(self.GetBuildDir(commit_upto, target),
+                            '%s.sizes' % elf_fname.replace('/', '-'))
+
+    def GetObjdumpFile(self, commit_upto, target, elf_fname):
+        """Get the name of the objdump file for a commit number and ELF file
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+            target: Target name
+            elf_fname: Filename of elf image
+        """
+        return os.path.join(self.GetBuildDir(commit_upto, target),
+                            '%s.objdump' % elf_fname.replace('/', '-'))
+
+    def GetErrFile(self, commit_upto, target):
+        """Get the name of the err file for a commit number
+
+        Args:
+            commit_upto: Commit number to use (0..self.count-1)
+            target: Target name
+        """
+        output_dir = self.GetBuildDir(commit_upto, target)
+        return os.path.join(output_dir, 'err')
+
+    def FilterErrors(self, lines):
+        """Filter out errors in which we have no interest
+
+        We should probably use map().
+
+        Args:
+            lines: List of error lines, each a string
+        Returns:
+            New list with only interesting lines included
+        """
+        out_lines = []
+        for line in lines:
+            if not self.re_make_err.search(line):
+                out_lines.append(line)
+        return out_lines
+
+    def ReadFuncSizes(self, fname, fd):
+        """Read function sizes from the output of 'nm'
+
+        Args:
+            fd: File containing data to read
+            fname: Filename we are reading from (just for errors)
+
+        Returns:
+            Dictionary containing size of each function in bytes, indexed by
+            function name.
+        """
+        sym = {}
+        for line in fd.readlines():
+            try:
+                size, type, name = line[:-1].split()
+            except:
+                Print("Invalid line in file '%s': '%s'" % (fname, line[:-1]))
+                continue
+            if type in 'tTdDbB':
+                # function names begin with '.' on 64-bit powerpc
+                if '.' in name[1:]:
+                    name = 'static.' + name.split('.')[0]
+                sym[name] = sym.get(name, 0) + int(size, 16)
+        return sym
+
+    def _ProcessConfig(self, fname):
+        """Read in a .config, autoconf.mk or autoconf.h file
+
+        This function handles all config file types. It ignores comments and
+        any #defines which don't start with CONFIG_.
+
+        Args:
+            fname: Filename to read
+
+        Returns:
+            Dictionary:
+                key: Config name (e.g. CONFIG_DM)
+                value: Config value (e.g. 1)
+        """
+        config = {}
+        if os.path.exists(fname):
+            with open(fname) as fd:
+                for line in fd:
+                    line = line.strip()
+                    if line.startswith('#define'):
+                        values = line[8:].split(' ', 1)
+                        if len(values) > 1:
+                            key, value = values
+                        else:
+                            key = values[0]
+                            value = '1' if self.squash_config_y else ''
+                        if not key.startswith('CONFIG_'):
+                            continue
+                    elif not line or line[0] in ['#', '*', '/']:
+                        continue
+                    else:
+                        key, value = line.split('=', 1)
+                    if self.squash_config_y and value == 'y':
+                        value = '1'
+                    config[key] = value
+        return config
+
+    def _ProcessEnvironment(self, fname):
+        """Read in a uboot.env file
+
+        This function reads in environment variables from a file.
+
+        Args:
+            fname: Filename to read
+
+        Returns:
+            Dictionary:
+                key: environment variable (e.g. bootlimit)
+                value: value of environment variable (e.g. 1)
+        """
+        environment = {}
+        if os.path.exists(fname):
+            with open(fname) as fd:
+                for line in fd.read().split('\0'):
+                    try:
+                        key, value = line.split('=', 1)
+                        environment[key] = value
+                    except ValueError:
+                        # ignore lines we can't parse
+                        pass
+        return environment
+
+    def GetBuildOutcome(self, commit_upto, target, read_func_sizes,
+                        read_config, read_environment):
+        """Work out the outcome of a build.
+
+        Args:
+            commit_upto: Commit number to check (0..n-1)
+            target: Target board to check
+            read_func_sizes: True to read function size information
+            read_config: True to read .config and autoconf.h files
+            read_environment: True to read uboot.env files
+
+        Returns:
+            Outcome object
+        """
+        done_file = self.GetDoneFile(commit_upto, target)
+        sizes_file = self.GetSizesFile(commit_upto, target)
+        sizes = {}
+        func_sizes = {}
+        config = {}
+        environment = {}
+        if os.path.exists(done_file):
+            with open(done_file, 'r') as fd:
+                return_code = int(fd.readline())
+                err_lines = []
+                err_file = self.GetErrFile(commit_upto, target)
+                if os.path.exists(err_file):
+                    with open(err_file, 'r') as fd:
+                        err_lines = self.FilterErrors(fd.readlines())
+
+                # Decide whether the build was ok, failed or created warnings
+                if return_code:
+                    rc = OUTCOME_ERROR
+                elif len(err_lines):
+                    rc = OUTCOME_WARNING
+                else:
+                    rc = OUTCOME_OK
+
+                # Convert size information to our simple format
+                if os.path.exists(sizes_file):
+                    with open(sizes_file, 'r') as fd:
+                        for line in fd.readlines():
+                            values = line.split()
+                            rodata = 0
+                            if len(values) > 6:
+                                rodata = int(values[6], 16)
+                            size_dict = {
+                                'all' : int(values[0]) + int(values[1]) +
+                                        int(values[2]),
+                                'text' : int(values[0]) - rodata,
+                                'data' : int(values[1]),
+                                'bss' : int(values[2]),
+                                'rodata' : rodata,
+                            }
+                            sizes[values[5]] = size_dict
+
+            if read_func_sizes:
+                pattern = self.GetFuncSizesFile(commit_upto, target, '*')
+                for fname in glob.glob(pattern):
+                    with open(fname, 'r') as fd:
+                        dict_name = os.path.basename(fname).replace('.sizes',
+                                                                    '')
+                        func_sizes[dict_name] = self.ReadFuncSizes(fname, fd)
+
+            if read_config:
+                output_dir = self.GetBuildDir(commit_upto, target)
+                for name in self.config_filenames:
+                    fname = os.path.join(output_dir, name)
+                    config[name] = self._ProcessConfig(fname)
+
+            if read_environment:
+                output_dir = self.GetBuildDir(commit_upto, target)
+                fname = os.path.join(output_dir, 'uboot.env')
+                environment = self._ProcessEnvironment(fname)
+
+            return Builder.Outcome(rc, err_lines, sizes, func_sizes, config,
+                                   environment)
+
+        return Builder.Outcome(OUTCOME_UNKNOWN, [], {}, {}, {}, {})
+
+    def GetResultSummary(self, boards_selected, commit_upto, read_func_sizes,
+                         read_config, read_environment):
+        """Calculate a summary of the results of building a commit.
+
+        Args:
+            board_selected: Dict containing boards to summarise
+            commit_upto: Commit number to summarize (0..self.count-1)
+            read_func_sizes: True to read function size information
+            read_config: True to read .config and autoconf.h files
+            read_environment: True to read uboot.env files
+
+        Returns:
+            Tuple:
+                Dict containing boards which passed building this commit.
+                    keyed by board.target
+                List containing a summary of error lines
+                Dict keyed by error line, containing a list of the Board
+                    objects with that error
+                List containing a summary of warning lines
+                Dict keyed by error line, containing a list of the Board
+                    objects with that warning
+                Dictionary keyed by board.target. Each value is a dictionary:
+                    key: filename - e.g. '.config'
+                    value is itself a dictionary:
+                        key: config name
+                        value: config value
+                Dictionary keyed by board.target. Each value is a dictionary:
+                    key: environment variable
+                    value: value of environment variable
+        """
+        def AddLine(lines_summary, lines_boards, line, board):
+            line = line.rstrip()
+            if line in lines_boards:
+                lines_boards[line].append(board)
+            else:
+                lines_boards[line] = [board]
+                lines_summary.append(line)
+
+        board_dict = {}
+        err_lines_summary = []
+        err_lines_boards = {}
+        warn_lines_summary = []
+        warn_lines_boards = {}
+        config = {}
+        environment = {}
+
+        for board in boards_selected.itervalues():
+            outcome = self.GetBuildOutcome(commit_upto, board.target,
+                                           read_func_sizes, read_config,
+                                           read_environment)
+            board_dict[board.target] = outcome
+            last_func = None
+            last_was_warning = False
+            for line in outcome.err_lines:
+                if line:
+                    if (self._re_function.match(line) or
+                            self._re_files.match(line)):
+                        last_func = line
+                    else:
+                        is_warning = (self._re_warning.match(line) or
+                                      self._re_dtb_warning.match(line))
+                        is_note = self._re_note.match(line)
+                        if is_warning or (last_was_warning and is_note):
+                            if last_func:
+                                AddLine(warn_lines_summary, warn_lines_boards,
+                                        last_func, board)
+                            AddLine(warn_lines_summary, warn_lines_boards,
+                                    line, board)
+                        else:
+                            if last_func:
+                                AddLine(err_lines_summary, err_lines_boards,
+                                        last_func, board)
+                            AddLine(err_lines_summary, err_lines_boards,
+                                    line, board)
+                        last_was_warning = is_warning
+                        last_func = None
+            tconfig = Config(self.config_filenames, board.target)
+            for fname in self.config_filenames:
+                if outcome.config:
+                    for key, value in outcome.config[fname].iteritems():
+                        tconfig.Add(fname, key, value)
+            config[board.target] = tconfig
+
+            tenvironment = Environment(board.target)
+            if outcome.environment:
+                for key, value in outcome.environment.iteritems():
+                    tenvironment.Add(key, value)
+            environment[board.target] = tenvironment
+
+        return (board_dict, err_lines_summary, err_lines_boards,
+                warn_lines_summary, warn_lines_boards, config, environment)
+
+    def AddOutcome(self, board_dict, arch_list, changes, char, color):
+        """Add an output to our list of outcomes for each architecture
+
+        This simple function adds failing boards (changes) to the
+        relevant architecture string, so we can print the results out
+        sorted by architecture.
+
+        Args:
+             board_dict: Dict containing all boards
+             arch_list: Dict keyed by arch name. Value is a string containing
+                    a list of board names which failed for that arch.
+             changes: List of boards to add to arch_list
+             color: terminal.Colour object
+        """
+        done_arch = {}
+        for target in changes:
+            if target in board_dict:
+                arch = board_dict[target].arch
+            else:
+                arch = 'unknown'
+            str = self.col.Color(color, ' ' + target)
+            if not arch in done_arch:
+                str = ' %s  %s' % (self.col.Color(color, char), str)
+                done_arch[arch] = True
+            if not arch in arch_list:
+                arch_list[arch] = str
+            else:
+                arch_list[arch] += str
+
+
+    def ColourNum(self, num):
+        color = self.col.RED if num > 0 else self.col.GREEN
+        if num == 0:
+            return '0'
+        return self.col.Color(color, str(num))
+
+    def ResetResultSummary(self, board_selected):
+        """Reset the results summary ready for use.
+
+        Set up the base board list to be all those selected, and set the
+        error lines to empty.
+
+        Following this, calls to PrintResultSummary() will use this
+        information to work out what has changed.
+
+        Args:
+            board_selected: Dict containing boards to summarise, keyed by
+                board.target
+        """
+        self._base_board_dict = {}
+        for board in board_selected:
+            self._base_board_dict[board] = Builder.Outcome(0, [], [], {}, {},
+                                                           {})
+        self._base_err_lines = []
+        self._base_warn_lines = []
+        self._base_err_line_boards = {}
+        self._base_warn_line_boards = {}
+        self._base_config = None
+        self._base_environment = None
+
+    def PrintFuncSizeDetail(self, fname, old, new):
+        grow, shrink, add, remove, up, down = 0, 0, 0, 0, 0, 0
+        delta, common = [], {}
+
+        for a in old:
+            if a in new:
+                common[a] = 1
+
+        for name in old:
+            if name not in common:
+                remove += 1
+                down += old[name]
+                delta.append([-old[name], name])
+
+        for name in new:
+            if name not in common:
+                add += 1
+                up += new[name]
+                delta.append([new[name], name])
+
+        for name in common:
+                diff = new.get(name, 0) - old.get(name, 0)
+                if diff > 0:
+                    grow, up = grow + 1, up + diff
+                elif diff < 0:
+                    shrink, down = shrink + 1, down - diff
+                delta.append([diff, name])
+
+        delta.sort()
+        delta.reverse()
+
+        args = [add, -remove, grow, -shrink, up, -down, up - down]
+        if max(args) == 0 and min(args) == 0:
+            return
+        args = [self.ColourNum(x) for x in args]
+        indent = ' ' * 15
+        Print('%s%s: add: %s/%s, grow: %s/%s bytes: %s/%s (%s)' %
+              tuple([indent, self.col.Color(self.col.YELLOW, fname)] + args))
+        Print('%s  %-38s %7s %7s %+7s' % (indent, 'function', 'old', 'new',
+                                         'delta'))
+        for diff, name in delta:
+            if diff:
+                color = self.col.RED if diff > 0 else self.col.GREEN
+                msg = '%s  %-38s %7s %7s %+7d' % (indent, name,
+                        old.get(name, '-'), new.get(name,'-'), diff)
+                Print(msg, colour=color)
+
+
+    def PrintSizeDetail(self, target_list, show_bloat):
+        """Show details size information for each board
+
+        Args:
+            target_list: List of targets, each a dict containing:
+                    'target': Target name
+                    'total_diff': Total difference in bytes across all areas
+                    <part_name>: Difference for that part
+            show_bloat: Show detail for each function
+        """
+        targets_by_diff = sorted(target_list, reverse=True,
+        key=lambda x: x['_total_diff'])
+        for result in targets_by_diff:
+            printed_target = False
+            for name in sorted(result):
+                diff = result[name]
+                if name.startswith('_'):
+                    continue
+                if diff != 0:
+                    color = self.col.RED if diff > 0 else self.col.GREEN
+                msg = ' %s %+d' % (name, diff)
+                if not printed_target:
+                    Print('%10s  %-15s:' % ('', result['_target']),
+                          newline=False)
+                    printed_target = True
+                Print(msg, colour=color, newline=False)
+            if printed_target:
+                Print()
+                if show_bloat:
+                    target = result['_target']
+                    outcome = result['_outcome']
+                    base_outcome = self._base_board_dict[target]
+                    for fname in outcome.func_sizes:
+                        self.PrintFuncSizeDetail(fname,
+                                                 base_outcome.func_sizes[fname],
+                                                 outcome.func_sizes[fname])
+
+
+    def PrintSizeSummary(self, board_selected, board_dict, show_detail,
+                         show_bloat):
+        """Print a summary of image sizes broken down by section.
+
+        The summary takes the form of one line per architecture. The
+        line contains deltas for each of the sections (+ means the section
+        got bigger, - means smaller). The nunmbers are the average number
+        of bytes that a board in this section increased by.
+
+        For example:
+           powerpc: (622 boards)   text -0.0
+          arm: (285 boards)   text -0.0
+          nds32: (3 boards)   text -8.0
+
+        Args:
+            board_selected: Dict containing boards to summarise, keyed by
+                board.target
+            board_dict: Dict containing boards for which we built this
+                commit, keyed by board.target. The value is an Outcome object.
+            show_detail: Show detail for each board
+            show_bloat: Show detail for each function
+        """
+        arch_list = {}
+        arch_count = {}
+
+        # Calculate changes in size for different image parts
+        # The previous sizes are in Board.sizes, for each board
+        for target in board_dict:
+            if target not in board_selected:
+                continue
+            base_sizes = self._base_board_dict[target].sizes
+            outcome = board_dict[target]
+            sizes = outcome.sizes
+
+            # Loop through the list of images, creating a dict of size
+            # changes for each image/part. We end up with something like
+            # {'target' : 'snapper9g45, 'data' : 5, 'u-boot-spl:text' : -4}
+            # which means that U-Boot data increased by 5 bytes and SPL
+            # text decreased by 4.
+            err = {'_target' : target}
+            for image in sizes:
+                if image in base_sizes:
+                    base_image = base_sizes[image]
+                    # Loop through the text, data, bss parts
+                    for part in sorted(sizes[image]):
+                        diff = sizes[image][part] - base_image[part]
+                        col = None
+                        if diff:
+                            if image == 'u-boot':
+                                name = part
+                            else:
+                                name = image + ':' + part
+                            err[name] = diff
+            arch = board_selected[target].arch
+            if not arch in arch_count:
+                arch_count[arch] = 1
+            else:
+                arch_count[arch] += 1
+            if not sizes:
+                pass    # Only add to our list when we have some stats
+            elif not arch in arch_list:
+                arch_list[arch] = [err]
+            else:
+                arch_list[arch].append(err)
+
+        # We now have a list of image size changes sorted by arch
+        # Print out a summary of these
+        for arch, target_list in arch_list.iteritems():
+            # Get total difference for each type
+            totals = {}
+            for result in target_list:
+                total = 0
+                for name, diff in result.iteritems():
+                    if name.startswith('_'):
+                        continue
+                    total += diff
+                    if name in totals:
+                        totals[name] += diff
+                    else:
+                        totals[name] = diff
+                result['_total_diff'] = total
+                result['_outcome'] = board_dict[result['_target']]
+
+            count = len(target_list)
+            printed_arch = False
+            for name in sorted(totals):
+                diff = totals[name]
+                if diff:
+                    # Display the average difference in this name for this
+                    # architecture
+                    avg_diff = float(diff) / count
+                    color = self.col.RED if avg_diff > 0 else self.col.GREEN
+                    msg = ' %s %+1.1f' % (name, avg_diff)
+                    if not printed_arch:
+                        Print('%10s: (for %d/%d boards)' % (arch, count,
+                              arch_count[arch]), newline=False)
+                        printed_arch = True
+                    Print(msg, colour=color, newline=False)
+
+            if printed_arch:
+                Print()
+                if show_detail:
+                    self.PrintSizeDetail(target_list, show_bloat)
+
+
+    def PrintResultSummary(self, board_selected, board_dict, err_lines,
+                           err_line_boards, warn_lines, warn_line_boards,
+                           config, environment, show_sizes, show_detail,
+                           show_bloat, show_config, show_environment):
+        """Compare results with the base results and display delta.
+
+        Only boards mentioned in board_selected will be considered. This
+        function is intended to be called repeatedly with the results of
+        each commit. It therefore shows a 'diff' between what it saw in
+        the last call and what it sees now.
+
+        Args:
+            board_selected: Dict containing boards to summarise, keyed by
+                board.target
+            board_dict: Dict containing boards for which we built this
+                commit, keyed by board.target. The value is an Outcome object.
+            err_lines: A list of errors for this commit, or [] if there is
+                none, or we don't want to print errors
+            err_line_boards: Dict keyed by error line, containing a list of
+                the Board objects with that error
+            warn_lines: A list of warnings for this commit, or [] if there is
+                none, or we don't want to print errors
+            warn_line_boards: Dict keyed by warning line, containing a list of
+                the Board objects with that warning
+            config: Dictionary keyed by filename - e.g. '.config'. Each
+                    value is itself a dictionary:
+                        key: config name
+                        value: config value
+            environment: Dictionary keyed by environment variable, Each
+                     value is the value of environment variable.
+            show_sizes: Show image size deltas
+            show_detail: Show detail for each board
+            show_bloat: Show detail for each function
+            show_config: Show config changes
+            show_environment: Show environment changes
+        """
+        def _BoardList(line, line_boards):
+            """Helper function to get a line of boards containing a line
+
+            Args:
+                line: Error line to search for
+            Return:
+                String containing a list of boards with that error line, or
+                '' if the user has not requested such a list
+            """
+            if self._list_error_boards:
+                names = []
+                for board in line_boards[line]:
+                    if not board.target in names:
+                        names.append(board.target)
+                names_str = '(%s) ' % ','.join(names)
+            else:
+                names_str = ''
+            return names_str
+
+        def _CalcErrorDelta(base_lines, base_line_boards, lines, line_boards,
+                            char):
+            better_lines = []
+            worse_lines = []
+            for line in lines:
+                if line not in base_lines:
+                    worse_lines.append(char + '+' +
+                            _BoardList(line, line_boards) + line)
+            for line in base_lines:
+                if line not in lines:
+                    better_lines.append(char + '-' +
+                            _BoardList(line, base_line_boards) + line)
+            return better_lines, worse_lines
+
+        def _CalcConfig(delta, name, config):
+            """Calculate configuration changes
+
+            Args:
+                delta: Type of the delta, e.g. '+'
+                name: name of the file which changed (e.g. .config)
+                config: configuration change dictionary
+                    key: config name
+                    value: config value
+            Returns:
+                String containing the configuration changes which can be
+                    printed
+            """
+            out = ''
+            for key in sorted(config.keys()):
+                out += '%s=%s ' % (key, config[key])
+            return '%s %s: %s' % (delta, name, out)
+
+        def _AddConfig(lines, name, config_plus, config_minus, config_change):
+            """Add changes in configuration to a list
+
+            Args:
+                lines: list to add to
+                name: config file name
+                config_plus: configurations added, dictionary
+                    key: config name
+                    value: config value
+                config_minus: configurations removed, dictionary
+                    key: config name
+                    value: config value
+                config_change: configurations changed, dictionary
+                    key: config name
+                    value: config value
+            """
+            if config_plus:
+                lines.append(_CalcConfig('+', name, config_plus))
+            if config_minus:
+                lines.append(_CalcConfig('-', name, config_minus))
+            if config_change:
+                lines.append(_CalcConfig('c', name, config_change))
+
+        def _OutputConfigInfo(lines):
+            for line in lines:
+                if not line:
+                    continue
+                if line[0] == '+':
+                    col = self.col.GREEN
+                elif line[0] == '-':
+                    col = self.col.RED
+                elif line[0] == 'c':
+                    col = self.col.YELLOW
+                Print('   ' + line, newline=True, colour=col)
+
+
+        ok_boards = []      # List of boards fixed since last commit
+        warn_boards = []    # List of boards with warnings since last commit
+        err_boards = []     # List of new broken boards since last commit
+        new_boards = []     # List of boards that didn't exist last time
+        unknown_boards = [] # List of boards that were not built
+
+        for target in board_dict:
+            if target not in board_selected:
+                continue
+
+            # If the board was built last time, add its outcome to a list
+            if target in self._base_board_dict:
+                base_outcome = self._base_board_dict[target].rc
+                outcome = board_dict[target]
+                if outcome.rc == OUTCOME_UNKNOWN:
+                    unknown_boards.append(target)
+                elif outcome.rc < base_outcome:
+                    if outcome.rc == OUTCOME_WARNING:
+                        warn_boards.append(target)
+                    else:
+                        ok_boards.append(target)
+                elif outcome.rc > base_outcome:
+                    if outcome.rc == OUTCOME_WARNING:
+                        warn_boards.append(target)
+                    else:
+                        err_boards.append(target)
+            else:
+                new_boards.append(target)
+
+        # Get a list of errors that have appeared, and disappeared
+        better_err, worse_err = _CalcErrorDelta(self._base_err_lines,
+                self._base_err_line_boards, err_lines, err_line_boards, '')
+        better_warn, worse_warn = _CalcErrorDelta(self._base_warn_lines,
+                self._base_warn_line_boards, warn_lines, warn_line_boards, 'w')
+
+        # Display results by arch
+        if any((ok_boards, warn_boards, err_boards, unknown_boards, new_boards,
+                worse_err, better_err, worse_warn, better_warn)):
+            arch_list = {}
+            self.AddOutcome(board_selected, arch_list, ok_boards, '',
+                    self.col.GREEN)
+            self.AddOutcome(board_selected, arch_list, warn_boards, 'w+',
+                    self.col.YELLOW)
+            self.AddOutcome(board_selected, arch_list, err_boards, '+',
+                    self.col.RED)
+            self.AddOutcome(board_selected, arch_list, new_boards, '*', self.col.BLUE)
+            if self._show_unknown:
+                self.AddOutcome(board_selected, arch_list, unknown_boards, '?',
+                        self.col.MAGENTA)
+            for arch, target_list in arch_list.iteritems():
+                Print('%10s: %s' % (arch, target_list))
+                self._error_lines += 1
+            if better_err:
+                Print('\n'.join(better_err), colour=self.col.GREEN)
+                self._error_lines += 1
+            if worse_err:
+                Print('\n'.join(worse_err), colour=self.col.RED)
+                self._error_lines += 1
+            if better_warn:
+                Print('\n'.join(better_warn), colour=self.col.CYAN)
+                self._error_lines += 1
+            if worse_warn:
+                Print('\n'.join(worse_warn), colour=self.col.MAGENTA)
+                self._error_lines += 1
+
+        if show_sizes:
+            self.PrintSizeSummary(board_selected, board_dict, show_detail,
+                                  show_bloat)
+
+        if show_environment and self._base_environment:
+            lines = []
+
+            for target in board_dict:
+                if target not in board_selected:
+                    continue
+
+                tbase = self._base_environment[target]
+                tenvironment = environment[target]
+                environment_plus = {}
+                environment_minus = {}
+                environment_change = {}
+                base = tbase.environment
+                for key, value in tenvironment.environment.iteritems():
+                    if key not in base:
+                        environment_plus[key] = value
+                for key, value in base.iteritems():
+                    if key not in tenvironment.environment:
+                        environment_minus[key] = value
+                for key, value in base.iteritems():
+                    new_value = tenvironment.environment.get(key)
+                    if new_value and value != new_value:
+                        desc = '%s -> %s' % (value, new_value)
+                        environment_change[key] = desc
+
+                _AddConfig(lines, target, environment_plus, environment_minus,
+                           environment_change)
+
+            _OutputConfigInfo(lines)
+
+        if show_config and self._base_config:
+            summary = {}
+            arch_config_plus = {}
+            arch_config_minus = {}
+            arch_config_change = {}
+            arch_list = []
+
+            for target in board_dict:
+                if target not in board_selected:
+                    continue
+                arch = board_selected[target].arch
+                if arch not in arch_list:
+                    arch_list.append(arch)
+
+            for arch in arch_list:
+                arch_config_plus[arch] = {}
+                arch_config_minus[arch] = {}
+                arch_config_change[arch] = {}
+                for name in self.config_filenames:
+                    arch_config_plus[arch][name] = {}
+                    arch_config_minus[arch][name] = {}
+                    arch_config_change[arch][name] = {}
+
+            for target in board_dict:
+                if target not in board_selected:
+                    continue
+
+                arch = board_selected[target].arch
+
+                all_config_plus = {}
+                all_config_minus = {}
+                all_config_change = {}
+                tbase = self._base_config[target]
+                tconfig = config[target]
+                lines = []
+                for name in self.config_filenames:
+                    if not tconfig.config[name]:
+                        continue
+                    config_plus = {}
+                    config_minus = {}
+                    config_change = {}
+                    base = tbase.config[name]
+                    for key, value in tconfig.config[name].iteritems():
+                        if key not in base:
+                            config_plus[key] = value
+                            all_config_plus[key] = value
+                    for key, value in base.iteritems():
+                        if key not in tconfig.config[name]:
+                            config_minus[key] = value
+                            all_config_minus[key] = value
+                    for key, value in base.iteritems():
+                        new_value = tconfig.config.get(key)
+                        if new_value and value != new_value:
+                            desc = '%s -> %s' % (value, new_value)
+                            config_change[key] = desc
+                            all_config_change[key] = desc
+
+                    arch_config_plus[arch][name].update(config_plus)
+                    arch_config_minus[arch][name].update(config_minus)
+                    arch_config_change[arch][name].update(config_change)
+
+                    _AddConfig(lines, name, config_plus, config_minus,
+                               config_change)
+                _AddConfig(lines, 'all', all_config_plus, all_config_minus,
+                           all_config_change)
+                summary[target] = '\n'.join(lines)
+
+            lines_by_target = {}
+            for target, lines in summary.iteritems():
+                if lines in lines_by_target:
+                    lines_by_target[lines].append(target)
+                else:
+                    lines_by_target[lines] = [target]
+
+            for arch in arch_list:
+                lines = []
+                all_plus = {}
+                all_minus = {}
+                all_change = {}
+                for name in self.config_filenames:
+                    all_plus.update(arch_config_plus[arch][name])
+                    all_minus.update(arch_config_minus[arch][name])
+                    all_change.update(arch_config_change[arch][name])
+                    _AddConfig(lines, name, arch_config_plus[arch][name],
+                               arch_config_minus[arch][name],
+                               arch_config_change[arch][name])
+                _AddConfig(lines, 'all', all_plus, all_minus, all_change)
+                #arch_summary[target] = '\n'.join(lines)
+                if lines:
+                    Print('%s:' % arch)
+                    _OutputConfigInfo(lines)
+
+            for lines, targets in lines_by_target.iteritems():
+                if not lines:
+                    continue
+                Print('%s :' % ' '.join(sorted(targets)))
+                _OutputConfigInfo(lines.split('\n'))
+
+
+        # Save our updated information for the next call to this function
+        self._base_board_dict = board_dict
+        self._base_err_lines = err_lines
+        self._base_warn_lines = warn_lines
+        self._base_err_line_boards = err_line_boards
+        self._base_warn_line_boards = warn_line_boards
+        self._base_config = config
+        self._base_environment = environment
+
+        # Get a list of boards that did not get built, if needed
+        not_built = []
+        for board in board_selected:
+            if not board in board_dict:
+                not_built.append(board)
+        if not_built:
+            Print("Boards not built (%d): %s" % (len(not_built),
+                  ', '.join(not_built)))
+
+    def ProduceResultSummary(self, commit_upto, commits, board_selected):
+            (board_dict, err_lines, err_line_boards, warn_lines,
+             warn_line_boards, config, environment) = self.GetResultSummary(
+                    board_selected, commit_upto,
+                    read_func_sizes=self._show_bloat,
+                    read_config=self._show_config,
+                    read_environment=self._show_environment)
+            if commits:
+                msg = '%02d: %s' % (commit_upto + 1,
+                        commits[commit_upto].subject)
+                Print(msg, colour=self.col.BLUE)
+            self.PrintResultSummary(board_selected, board_dict,
+                    err_lines if self._show_errors else [], err_line_boards,
+                    warn_lines if self._show_errors else [], warn_line_boards,
+                    config, environment, self._show_sizes, self._show_detail,
+                    self._show_bloat, self._show_config, self._show_environment)
+
+    def ShowSummary(self, commits, board_selected):
+        """Show a build summary for U-Boot for a given board list.
+
+        Reset the result summary, then repeatedly call GetResultSummary on
+        each commit's results, then display the differences we see.
+
+        Args:
+            commit: Commit objects to summarise
+            board_selected: Dict containing boards to summarise
+        """
+        self.commit_count = len(commits) if commits else 1
+        self.commits = commits
+        self.ResetResultSummary(board_selected)
+        self._error_lines = 0
+
+        for commit_upto in range(0, self.commit_count, self._step):
+            self.ProduceResultSummary(commit_upto, commits, board_selected)
+        if not self._error_lines:
+            Print('(no errors to report)', colour=self.col.GREEN)
+
+
+    def SetupBuild(self, board_selected, commits):
+        """Set up ready to start a build.
+
+        Args:
+            board_selected: Selected boards to build
+            commits: Selected commits to build
+        """
+        # First work out how many commits we will build
+        count = (self.commit_count + self._step - 1) / self._step
+        self.count = len(board_selected) * count
+        self.upto = self.warned = self.fail = 0
+        self._timestamps = collections.deque()
+
+    def GetThreadDir(self, thread_num):
+        """Get the directory path to the working dir for a thread.
+
+        Args:
+            thread_num: Number of thread to check.
+        """
+        return os.path.join(self._working_dir, '%02d' % thread_num)
+
+    def _PrepareThread(self, thread_num, setup_git):
+        """Prepare the working directory for a thread.
+
+        This clones or fetches the repo into the thread's work directory.
+
+        Args:
+            thread_num: Thread number (0, 1, ...)
+            setup_git: True to set up a git repo clone
+        """
+        thread_dir = self.GetThreadDir(thread_num)
+        builderthread.Mkdir(thread_dir)
+        git_dir = os.path.join(thread_dir, '.git')
+
+        # Clone the repo if it doesn't already exist
+        # TODO(sjg@chromium): Perhaps some git hackery to symlink instead, so
+        # we have a private index but uses the origin repo's contents?
+        if setup_git and self.git_dir:
+            src_dir = os.path.abspath(self.git_dir)
+            if os.path.exists(git_dir):
+                gitutil.Fetch(git_dir, thread_dir)
+            else:
+                Print('\rCloning repo for thread %d' % thread_num,
+                      newline=False)
+                gitutil.Clone(src_dir, thread_dir)
+                Print('\r%s\r' % (' ' * 30), newline=False)
+
+    def _PrepareWorkingSpace(self, max_threads, setup_git):
+        """Prepare the working directory for use.
+
+        Set up the git repo for each thread.
+
+        Args:
+            max_threads: Maximum number of threads we expect to need.
+            setup_git: True to set up a git repo clone
+        """
+        builderthread.Mkdir(self._working_dir)
+        for thread in range(max_threads):
+            self._PrepareThread(thread, setup_git)
+
+    def _PrepareOutputSpace(self):
+        """Get the output directories ready to receive files.
+
+        We delete any output directories which look like ones we need to
+        create. Having left over directories is confusing when the user wants
+        to check the output manually.
+        """
+        if not self.commits:
+            return
+        dir_list = []
+        for commit_upto in range(self.commit_count):
+            dir_list.append(self._GetOutputDir(commit_upto))
+
+        to_remove = []
+        for dirname in glob.glob(os.path.join(self.base_dir, '*')):
+            if dirname not in dir_list:
+                to_remove.append(dirname)
+        if to_remove:
+            Print('Removing %d old build directories' % len(to_remove),
+                  newline=False)
+            for dirname in to_remove:
+                shutil.rmtree(dirname)
+
+    def BuildBoards(self, commits, board_selected, keep_outputs, verbose):
+        """Build all commits for a list of boards
+
+        Args:
+            commits: List of commits to be build, each a Commit object
+            boards_selected: Dict of selected boards, key is target name,
+                    value is Board object
+            keep_outputs: True to save build output files
+            verbose: Display build results as they are completed
+        Returns:
+            Tuple containing:
+                - number of boards that failed to build
+                - number of boards that issued warnings
+        """
+        self.commit_count = len(commits) if commits else 1
+        self.commits = commits
+        self._verbose = verbose
+
+        self.ResetResultSummary(board_selected)
+        builderthread.Mkdir(self.base_dir, parents = True)
+        self._PrepareWorkingSpace(min(self.num_threads, len(board_selected)),
+                commits is not None)
+        self._PrepareOutputSpace()
+        Print('\rStarting build...', newline=False)
+        self.SetupBuild(board_selected, commits)
+        self.ProcessResult(None)
+
+        # Create jobs to build all commits for each board
+        for brd in board_selected.itervalues():
+            job = builderthread.BuilderJob()
+            job.board = brd
+            job.commits = commits
+            job.keep_outputs = keep_outputs
+            job.step = self._step
+            self.queue.put(job)
+
+        term = threading.Thread(target=self.queue.join)
+        term.setDaemon(True)
+        term.start()
+        while term.isAlive():
+            term.join(100)
+
+        # Wait until we have processed all output
+        self.out_queue.join()
+        Print()
+        self.ClearLine(0)
+        return (self.fail, self.warned)
diff --git a/tools/u-boot-tools/buildman/builderthread.py b/tools/u-boot-tools/buildman/builderthread.py
new file mode 100644
index 0000000000000000000000000000000000000000..c84ba6acf11a64a611ae2e7a8d209391da38ccd3
--- /dev/null
+++ b/tools/u-boot-tools/buildman/builderthread.py
@@ -0,0 +1,493 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2014 Google, Inc
+#
+
+import errno
+import glob
+import os
+import shutil
+import sys
+import threading
+
+import command
+import gitutil
+
+RETURN_CODE_RETRY = -1
+
+def Mkdir(dirname, parents = False):
+    """Make a directory if it doesn't already exist.
+
+    Args:
+        dirname: Directory to create
+    """
+    try:
+        if parents:
+            os.makedirs(dirname)
+        else:
+            os.mkdir(dirname)
+    except OSError as err:
+        if err.errno == errno.EEXIST:
+            if os.path.realpath('.') == os.path.realpath(dirname):
+                print "Cannot create the current working directory '%s'!" % dirname
+                sys.exit(1)
+            pass
+        else:
+            raise
+
+class BuilderJob:
+    """Holds information about a job to be performed by a thread
+
+    Members:
+        board: Board object to build
+        commits: List of commit options to build.
+    """
+    def __init__(self):
+        self.board = None
+        self.commits = []
+
+
+class ResultThread(threading.Thread):
+    """This thread processes results from builder threads.
+
+    It simply passes the results on to the builder. There is only one
+    result thread, and this helps to serialise the build output.
+    """
+    def __init__(self, builder):
+        """Set up a new result thread
+
+        Args:
+            builder: Builder which will be sent each result
+        """
+        threading.Thread.__init__(self)
+        self.builder = builder
+
+    def run(self):
+        """Called to start up the result thread.
+
+        We collect the next result job and pass it on to the build.
+        """
+        while True:
+            result = self.builder.out_queue.get()
+            self.builder.ProcessResult(result)
+            self.builder.out_queue.task_done()
+
+
+class BuilderThread(threading.Thread):
+    """This thread builds U-Boot for a particular board.
+
+    An input queue provides each new job. We run 'make' to build U-Boot
+    and then pass the results on to the output queue.
+
+    Members:
+        builder: The builder which contains information we might need
+        thread_num: Our thread number (0-n-1), used to decide on a
+                temporary directory
+    """
+    def __init__(self, builder, thread_num, incremental, per_board_out_dir):
+        """Set up a new builder thread"""
+        threading.Thread.__init__(self)
+        self.builder = builder
+        self.thread_num = thread_num
+        self.incremental = incremental
+        self.per_board_out_dir = per_board_out_dir
+
+    def Make(self, commit, brd, stage, cwd, *args, **kwargs):
+        """Run 'make' on a particular commit and board.
+
+        The source code will already be checked out, so the 'commit'
+        argument is only for information.
+
+        Args:
+            commit: Commit object that is being built
+            brd: Board object that is being built
+            stage: Stage of the build. Valid stages are:
+                        mrproper - can be called to clean source
+                        config - called to configure for a board
+                        build - the main make invocation - it does the build
+            args: A list of arguments to pass to 'make'
+            kwargs: A list of keyword arguments to pass to command.RunPipe()
+
+        Returns:
+            CommandResult object
+        """
+        return self.builder.do_make(commit, brd, stage, cwd, *args,
+                **kwargs)
+
+    def RunCommit(self, commit_upto, brd, work_dir, do_config, config_only,
+                  force_build, force_build_failures):
+        """Build a particular commit.
+
+        If the build is already done, and we are not forcing a build, we skip
+        the build and just return the previously-saved results.
+
+        Args:
+            commit_upto: Commit number to build (0...n-1)
+            brd: Board object to build
+            work_dir: Directory to which the source will be checked out
+            do_config: True to run a make <board>_defconfig on the source
+            config_only: Only configure the source, do not build it
+            force_build: Force a build even if one was previously done
+            force_build_failures: Force a bulid if the previous result showed
+                failure
+
+        Returns:
+            tuple containing:
+                - CommandResult object containing the results of the build
+                - boolean indicating whether 'make config' is still needed
+        """
+        # Create a default result - it will be overwritte by the call to
+        # self.Make() below, in the event that we do a build.
+        result = command.CommandResult()
+        result.return_code = 0
+        if self.builder.in_tree:
+            out_dir = work_dir
+        else:
+            if self.per_board_out_dir:
+                out_rel_dir = os.path.join('..', brd.target)
+            else:
+                out_rel_dir = 'build'
+            out_dir = os.path.join(work_dir, out_rel_dir)
+
+        # Check if the job was already completed last time
+        done_file = self.builder.GetDoneFile(commit_upto, brd.target)
+        result.already_done = os.path.exists(done_file)
+        will_build = (force_build or force_build_failures or
+            not result.already_done)
+        if result.already_done:
+            # Get the return code from that build and use it
+            with open(done_file, 'r') as fd:
+                result.return_code = int(fd.readline())
+
+            # Check the signal that the build needs to be retried
+            if result.return_code == RETURN_CODE_RETRY:
+                will_build = True
+            elif will_build:
+                err_file = self.builder.GetErrFile(commit_upto, brd.target)
+                if os.path.exists(err_file) and os.stat(err_file).st_size:
+                    result.stderr = 'bad'
+                elif not force_build:
+                    # The build passed, so no need to build it again
+                    will_build = False
+
+        if will_build:
+            # We are going to have to build it. First, get a toolchain
+            if not self.toolchain:
+                try:
+                    self.toolchain = self.builder.toolchains.Select(brd.arch)
+                except ValueError as err:
+                    result.return_code = 10
+                    result.stdout = ''
+                    result.stderr = str(err)
+                    # TODO(sjg@chromium.org): This gets swallowed, but needs
+                    # to be reported.
+
+            if self.toolchain:
+                # Checkout the right commit
+                if self.builder.commits:
+                    commit = self.builder.commits[commit_upto]
+                    if self.builder.checkout:
+                        git_dir = os.path.join(work_dir, '.git')
+                        gitutil.Checkout(commit.hash, git_dir, work_dir,
+                                         force=True)
+                else:
+                    commit = 'current'
+
+                # Set up the environment and command line
+                env = self.toolchain.MakeEnvironment(self.builder.full_path)
+                Mkdir(out_dir)
+                args = []
+                cwd = work_dir
+                src_dir = os.path.realpath(work_dir)
+                if not self.builder.in_tree:
+                    if commit_upto is None:
+                        # In this case we are building in the original source
+                        # directory (i.e. the current directory where buildman
+                        # is invoked. The output directory is set to this
+                        # thread's selected work directory.
+                        #
+                        # Symlinks can confuse U-Boot's Makefile since
+                        # we may use '..' in our path, so remove them.
+                        out_dir = os.path.realpath(out_dir)
+                        args.append('O=%s' % out_dir)
+                        cwd = None
+                        src_dir = os.getcwd()
+                    else:
+                        args.append('O=%s' % out_rel_dir)
+                if self.builder.verbose_build:
+                    args.append('V=1')
+                else:
+                    args.append('-s')
+                if self.builder.num_jobs is not None:
+                    args.extend(['-j', str(self.builder.num_jobs)])
+                if self.builder.warnings_as_errors:
+                    args.append('KCFLAGS=-Werror')
+                config_args = ['%s_defconfig' % brd.target]
+                config_out = ''
+                args.extend(self.builder.toolchains.GetMakeArguments(brd))
+
+                # If we need to reconfigure, do that now
+                if do_config:
+                    config_out = ''
+                    if not self.incremental:
+                        result = self.Make(commit, brd, 'mrproper', cwd,
+                                'mrproper', *args, env=env)
+                        config_out += result.combined
+                    result = self.Make(commit, brd, 'config', cwd,
+                            *(args + config_args), env=env)
+                    config_out += result.combined
+                    do_config = False   # No need to configure next time
+                if result.return_code == 0:
+                    if config_only:
+                        args.append('cfg')
+                    result = self.Make(commit, brd, 'build', cwd, *args,
+                            env=env)
+                result.stderr = result.stderr.replace(src_dir + '/', '')
+                if self.builder.verbose_build:
+                    result.stdout = config_out + result.stdout
+            else:
+                result.return_code = 1
+                result.stderr = 'No tool chain for %s\n' % brd.arch
+            result.already_done = False
+
+        result.toolchain = self.toolchain
+        result.brd = brd
+        result.commit_upto = commit_upto
+        result.out_dir = out_dir
+        return result, do_config
+
+    def _WriteResult(self, result, keep_outputs):
+        """Write a built result to the output directory.
+
+        Args:
+            result: CommandResult object containing result to write
+            keep_outputs: True to store the output binaries, False
+                to delete them
+        """
+        # Fatal error
+        if result.return_code < 0:
+            return
+
+        # If we think this might have been aborted with Ctrl-C, record the
+        # failure but not that we are 'done' with this board. A retry may fix
+        # it.
+        maybe_aborted =  result.stderr and 'No child processes' in result.stderr
+
+        if result.already_done:
+            return
+
+        # Write the output and stderr
+        output_dir = self.builder._GetOutputDir(result.commit_upto)
+        Mkdir(output_dir)
+        build_dir = self.builder.GetBuildDir(result.commit_upto,
+                result.brd.target)
+        Mkdir(build_dir)
+
+        outfile = os.path.join(build_dir, 'log')
+        with open(outfile, 'w') as fd:
+            if result.stdout:
+                # We don't want unicode characters in log files
+                fd.write(result.stdout.decode('UTF-8').encode('ASCII', 'replace'))
+
+        errfile = self.builder.GetErrFile(result.commit_upto,
+                result.brd.target)
+        if result.stderr:
+            with open(errfile, 'w') as fd:
+                # We don't want unicode characters in log files
+                fd.write(result.stderr.decode('UTF-8').encode('ASCII', 'replace'))
+        elif os.path.exists(errfile):
+            os.remove(errfile)
+
+        if result.toolchain:
+            # Write the build result and toolchain information.
+            done_file = self.builder.GetDoneFile(result.commit_upto,
+                    result.brd.target)
+            with open(done_file, 'w') as fd:
+                if maybe_aborted:
+                    # Special code to indicate we need to retry
+                    fd.write('%s' % RETURN_CODE_RETRY)
+                else:
+                    fd.write('%s' % result.return_code)
+            with open(os.path.join(build_dir, 'toolchain'), 'w') as fd:
+                print >>fd, 'gcc', result.toolchain.gcc
+                print >>fd, 'path', result.toolchain.path
+                print >>fd, 'cross', result.toolchain.cross
+                print >>fd, 'arch', result.toolchain.arch
+                fd.write('%s' % result.return_code)
+
+            # Write out the image and function size information and an objdump
+            env = result.toolchain.MakeEnvironment(self.builder.full_path)
+            lines = []
+            for fname in ['u-boot', 'spl/u-boot-spl']:
+                cmd = ['%snm' % self.toolchain.cross, '--size-sort', fname]
+                nm_result = command.RunPipe([cmd], capture=True,
+                        capture_stderr=True, cwd=result.out_dir,
+                        raise_on_error=False, env=env)
+                if nm_result.stdout:
+                    nm = self.builder.GetFuncSizesFile(result.commit_upto,
+                                    result.brd.target, fname)
+                    with open(nm, 'w') as fd:
+                        print >>fd, nm_result.stdout,
+
+                cmd = ['%sobjdump' % self.toolchain.cross, '-h', fname]
+                dump_result = command.RunPipe([cmd], capture=True,
+                        capture_stderr=True, cwd=result.out_dir,
+                        raise_on_error=False, env=env)
+                rodata_size = ''
+                if dump_result.stdout:
+                    objdump = self.builder.GetObjdumpFile(result.commit_upto,
+                                    result.brd.target, fname)
+                    with open(objdump, 'w') as fd:
+                        print >>fd, dump_result.stdout,
+                    for line in dump_result.stdout.splitlines():
+                        fields = line.split()
+                        if len(fields) > 5 and fields[1] == '.rodata':
+                            rodata_size = fields[2]
+
+                cmd = ['%ssize' % self.toolchain.cross, fname]
+                size_result = command.RunPipe([cmd], capture=True,
+                        capture_stderr=True, cwd=result.out_dir,
+                        raise_on_error=False, env=env)
+                if size_result.stdout:
+                    lines.append(size_result.stdout.splitlines()[1] + ' ' +
+                                 rodata_size)
+
+            # Extract the environment from U-Boot and dump it out
+            cmd = ['%sobjcopy' % self.toolchain.cross, '-O', 'binary',
+                   '-j', '.rodata.default_environment',
+                   'env/built-in.o', 'uboot.env']
+            command.RunPipe([cmd], capture=True,
+                            capture_stderr=True, cwd=result.out_dir,
+                            raise_on_error=False, env=env)
+            ubootenv = os.path.join(result.out_dir, 'uboot.env')
+            self.CopyFiles(result.out_dir, build_dir, '', ['uboot.env'])
+
+            # Write out the image sizes file. This is similar to the output
+            # of binutil's 'size' utility, but it omits the header line and
+            # adds an additional hex value at the end of each line for the
+            # rodata size
+            if len(lines):
+                sizes = self.builder.GetSizesFile(result.commit_upto,
+                                result.brd.target)
+                with open(sizes, 'w') as fd:
+                    print >>fd, '\n'.join(lines)
+
+        # Write out the configuration files, with a special case for SPL
+        for dirname in ['', 'spl', 'tpl']:
+            self.CopyFiles(result.out_dir, build_dir, dirname, ['u-boot.cfg',
+                'spl/u-boot-spl.cfg', 'tpl/u-boot-tpl.cfg', '.config',
+                'include/autoconf.mk', 'include/generated/autoconf.h'])
+
+        # Now write the actual build output
+        if keep_outputs:
+            self.CopyFiles(result.out_dir, build_dir, '', ['u-boot*', '*.bin',
+                '*.map', '*.img', 'MLO', 'SPL', 'include/autoconf.mk',
+                'spl/u-boot-spl*'])
+
+    def CopyFiles(self, out_dir, build_dir, dirname, patterns):
+        """Copy files from the build directory to the output.
+
+        Args:
+            out_dir: Path to output directory containing the files
+            build_dir: Place to copy the files
+            dirname: Source directory, '' for normal U-Boot, 'spl' for SPL
+            patterns: A list of filenames (strings) to copy, each relative
+               to the build directory
+        """
+        for pattern in patterns:
+            file_list = glob.glob(os.path.join(out_dir, dirname, pattern))
+            for fname in file_list:
+                target = os.path.basename(fname)
+                if dirname:
+                    base, ext = os.path.splitext(target)
+                    if ext:
+                        target = '%s-%s%s' % (base, dirname, ext)
+                shutil.copy(fname, os.path.join(build_dir, target))
+
+    def RunJob(self, job):
+        """Run a single job
+
+        A job consists of a building a list of commits for a particular board.
+
+        Args:
+            job: Job to build
+        """
+        brd = job.board
+        work_dir = self.builder.GetThreadDir(self.thread_num)
+        self.toolchain = None
+        if job.commits:
+            # Run 'make board_defconfig' on the first commit
+            do_config = True
+            commit_upto  = 0
+            force_build = False
+            for commit_upto in range(0, len(job.commits), job.step):
+                result, request_config = self.RunCommit(commit_upto, brd,
+                        work_dir, do_config, self.builder.config_only,
+                        force_build or self.builder.force_build,
+                        self.builder.force_build_failures)
+                failed = result.return_code or result.stderr
+                did_config = do_config
+                if failed and not do_config:
+                    # If our incremental build failed, try building again
+                    # with a reconfig.
+                    if self.builder.force_config_on_failure:
+                        result, request_config = self.RunCommit(commit_upto,
+                            brd, work_dir, True, False, True, False)
+                        did_config = True
+                if not self.builder.force_reconfig:
+                    do_config = request_config
+
+                # If we built that commit, then config is done. But if we got
+                # an warning, reconfig next time to force it to build the same
+                # files that created warnings this time. Otherwise an
+                # incremental build may not build the same file, and we will
+                # think that the warning has gone away.
+                # We could avoid this by using -Werror everywhere...
+                # For errors, the problem doesn't happen, since presumably
+                # the build stopped and didn't generate output, so will retry
+                # that file next time. So we could detect warnings and deal
+                # with them specially here. For now, we just reconfigure if
+                # anything goes work.
+                # Of course this is substantially slower if there are build
+                # errors/warnings (e.g. 2-3x slower even if only 10% of builds
+                # have problems).
+                if (failed and not result.already_done and not did_config and
+                        self.builder.force_config_on_failure):
+                    # If this build failed, try the next one with a
+                    # reconfigure.
+                    # Sometimes if the board_config.h file changes it can mess
+                    # with dependencies, and we get:
+                    # make: *** No rule to make target `include/autoconf.mk',
+                    #     needed by `depend'.
+                    do_config = True
+                    force_build = True
+                else:
+                    force_build = False
+                    if self.builder.force_config_on_failure:
+                        if failed:
+                            do_config = True
+                    result.commit_upto = commit_upto
+                    if result.return_code < 0:
+                        raise ValueError('Interrupt')
+
+                # We have the build results, so output the result
+                self._WriteResult(result, job.keep_outputs)
+                self.builder.out_queue.put(result)
+        else:
+            # Just build the currently checked-out build
+            result, request_config = self.RunCommit(None, brd, work_dir, True,
+                        self.builder.config_only, True,
+                        self.builder.force_build_failures)
+            result.commit_upto = 0
+            self._WriteResult(result, job.keep_outputs)
+            self.builder.out_queue.put(result)
+
+    def run(self):
+        """Our thread's run function
+
+        This thread picks a job from the queue, runs it, and then goes to the
+        next job.
+        """
+        while True:
+            job = self.builder.queue.get()
+            self.RunJob(job)
+            self.builder.queue.task_done()
diff --git a/tools/u-boot-tools/buildman/buildman b/tools/u-boot-tools/buildman/buildman
new file mode 120000
index 0000000000000000000000000000000000000000..e4fba2d4b03b785f50033c817bba292f0c8ee960
--- /dev/null
+++ b/tools/u-boot-tools/buildman/buildman
@@ -0,0 +1 @@
+buildman.py
\ No newline at end of file
diff --git a/tools/u-boot-tools/buildman/buildman.py b/tools/u-boot-tools/buildman/buildman.py
new file mode 100755
index 0000000000000000000000000000000000000000..f17aa15e7c548bbe9bb34402f072867c76c768fc
--- /dev/null
+++ b/tools/u-boot-tools/buildman/buildman.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+"""See README for more information"""
+
+import multiprocessing
+import os
+import re
+import sys
+import unittest
+
+# Bring in the patman libraries
+our_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(1, os.path.join(our_path, '../patman'))
+
+# Our modules
+import board
+import bsettings
+import builder
+import checkpatch
+import cmdline
+import control
+import doctest
+import gitutil
+import patchstream
+import terminal
+import toolchain
+
+def RunTests(skip_net_tests):
+    import func_test
+    import test
+    import doctest
+
+    result = unittest.TestResult()
+    for module in ['toolchain', 'gitutil']:
+        suite = doctest.DocTestSuite(module)
+        suite.run(result)
+
+    sys.argv = [sys.argv[0]]
+    if skip_net_tests:
+        test.use_network = False
+    for module in (test.TestBuild, func_test.TestFunctional):
+        suite = unittest.TestLoader().loadTestsFromTestCase(module)
+        suite.run(result)
+
+    print result
+    for test, err in result.errors:
+        print err
+    for test, err in result.failures:
+        print err
+
+
+options, args = cmdline.ParseArgs()
+
+# Run our meagre tests
+if options.test:
+    RunTests(options.skip_net_tests)
+
+# Build selected commits for selected boards
+else:
+    bsettings.Setup(options.config_file)
+    ret_code = control.DoBuildman(options, args)
+    sys.exit(ret_code)
diff --git a/tools/u-boot-tools/buildman/cmdline.py b/tools/u-boot-tools/buildman/cmdline.py
new file mode 100644
index 0000000000000000000000000000000000000000..93d09ca08d739f915912c1fdb609446d65b0b44f
--- /dev/null
+++ b/tools/u-boot-tools/buildman/cmdline.py
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2014 Google, Inc
+#
+
+from optparse import OptionParser
+
+def ParseArgs():
+    """Parse command line arguments from sys.argv[]
+
+    Returns:
+        tuple containing:
+            options: command line options
+            args: command lin arguments
+    """
+    parser = OptionParser()
+    parser.add_option('-b', '--branch', type='string',
+          help='Branch name to build, or range of commits to build')
+    parser.add_option('-B', '--bloat', dest='show_bloat',
+          action='store_true', default=False,
+          help='Show changes in function code size for each board')
+    parser.add_option('--boards', type='string', action='append',
+          help='List of board names to build separated by comma')
+    parser.add_option('-c', '--count', dest='count', type='int',
+          default=-1, help='Run build on the top n commits')
+    parser.add_option('-C', '--force-reconfig', dest='force_reconfig',
+          action='store_true', default=False,
+          help='Reconfigure for every commit (disable incremental build)')
+    parser.add_option('-d', '--detail', dest='show_detail',
+          action='store_true', default=False,
+          help='Show detailed information for each board in summary')
+    parser.add_option('-D', '--config-only', action='store_true', default=False,
+          help="Don't build, just configure each commit")
+    parser.add_option('-e', '--show_errors', action='store_true',
+          default=False, help='Show errors and warnings')
+    parser.add_option('-E', '--warnings-as-errors', action='store_true',
+          default=False, help='Treat all compiler warnings as errors')
+    parser.add_option('-f', '--force-build', dest='force_build',
+          action='store_true', default=False,
+          help='Force build of boards even if already built')
+    parser.add_option('-F', '--force-build-failures', dest='force_build_failures',
+          action='store_true', default=False,
+          help='Force build of previously-failed build')
+    parser.add_option('--fetch-arch', type='string',
+          help="Fetch a toolchain for architecture FETCH_ARCH ('list' to list)."
+              ' You can also fetch several toolchains separate by comma, or'
+              " 'all' to download all")
+    parser.add_option('-g', '--git', type='string',
+          help='Git repo containing branch to build', default='.')
+    parser.add_option('-G', '--config-file', type='string',
+          help='Path to buildman config file', default='')
+    parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
+          default=False, help='Display the README file')
+    parser.add_option('-i', '--in-tree', dest='in_tree',
+          action='store_true', default=False,
+          help='Build in the source tree instead of a separate directory')
+    parser.add_option('-I', '--incremental', action='store_true',
+          default=False, help='Do not run make mrproper (when reconfiguring)')
+    parser.add_option('-j', '--jobs', dest='jobs', type='int',
+          default=None, help='Number of jobs to run at once (passed to make)')
+    parser.add_option('-k', '--keep-outputs', action='store_true',
+          default=False, help='Keep all build output files (e.g. binaries)')
+    parser.add_option('-K', '--show-config', action='store_true',
+          default=False, help='Show configuration changes in summary (both board config files and Kconfig)')
+    parser.add_option('--preserve-config-y', action='store_true',
+          default=False, help="Don't convert y to 1 in configs")
+    parser.add_option('-l', '--list-error-boards', action='store_true',
+          default=False, help='Show a list of boards next to each error/warning')
+    parser.add_option('--list-tool-chains', action='store_true', default=False,
+          help='List available tool chains (use -v to see probing detail)')
+    parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
+          default=False, help="Do a dry run (describe actions, but do nothing)")
+    parser.add_option('-N', '--no-subdirs', action='store_true', dest='no_subdirs',
+          default=False, help="Don't create subdirectories when building current source for a single board")
+    parser.add_option('-o', '--output-dir', type='string',
+          dest='output_dir', default='..',
+          help='Directory where all builds happen and buildman has its workspace (default is ../)')
+    parser.add_option('-Q', '--quick', action='store_true',
+          default=False, help='Do a rough build, with limited warning resolution')
+    parser.add_option('-p', '--full-path', action='store_true',
+          default=False, help="Use full toolchain path in CROSS_COMPILE")
+    parser.add_option('-P', '--per-board-out-dir', action='store_true',
+          default=False, help="Use an O= (output) directory per board rather than per thread")
+    parser.add_option('-s', '--summary', action='store_true',
+          default=False, help='Show a build summary')
+    parser.add_option('-S', '--show-sizes', action='store_true',
+          default=False, help='Show image size variation in summary')
+    parser.add_option('--skip-net-tests', action='store_true', default=False,
+                      help='Skip tests which need the network')
+    parser.add_option('--step', type='int',
+          default=1, help='Only build every n commits (0=just first and last)')
+    parser.add_option('-t', '--test', action='store_true', dest='test',
+                      default=False, help='run tests')
+    parser.add_option('-T', '--threads', type='int',
+          default=None, help='Number of builder threads to use')
+    parser.add_option('-u', '--show_unknown', action='store_true',
+          default=False, help='Show boards with unknown build result')
+    parser.add_option('-U', '--show-environment', action='store_true',
+          default=False, help='Show environment changes in summary')
+    parser.add_option('-v', '--verbose', action='store_true',
+          default=False, help='Show build results while the build progresses')
+    parser.add_option('-V', '--verbose-build', action='store_true',
+          default=False, help='Run make with V=1, logging all output')
+    parser.add_option('-x', '--exclude', dest='exclude',
+          type='string', action='append',
+          help='Specify a list of boards to exclude, separated by comma')
+
+    parser.usage += """ [list of target/arch/cpu/board/vendor/soc to build]
+
+    Build U-Boot for all commits in a branch. Use -n to do a dry run"""
+
+    return parser.parse_args()
diff --git a/tools/u-boot-tools/buildman/control.py b/tools/u-boot-tools/buildman/control.py
new file mode 100644
index 0000000000000000000000000000000000000000..c900211510e42ef5d15e4f93f7c2b6a33f83c261
--- /dev/null
+++ b/tools/u-boot-tools/buildman/control.py
@@ -0,0 +1,347 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2013 The Chromium OS Authors.
+#
+
+import multiprocessing
+import os
+import shutil
+import sys
+
+import board
+import bsettings
+from builder import Builder
+import gitutil
+import patchstream
+import terminal
+from terminal import Print
+import toolchain
+import command
+import subprocess
+
+def GetPlural(count):
+    """Returns a plural 's' if count is not 1"""
+    return 's' if count != 1 else ''
+
+def GetActionSummary(is_summary, commits, selected, options):
+    """Return a string summarising the intended action.
+
+    Returns:
+        Summary string.
+    """
+    if commits:
+        count = len(commits)
+        count = (count + options.step - 1) / options.step
+        commit_str = '%d commit%s' % (count, GetPlural(count))
+    else:
+        commit_str = 'current source'
+    str = '%s %s for %d boards' % (
+        'Summary of' if is_summary else 'Building', commit_str,
+        len(selected))
+    str += ' (%d thread%s, %d job%s per thread)' % (options.threads,
+            GetPlural(options.threads), options.jobs, GetPlural(options.jobs))
+    return str
+
+def ShowActions(series, why_selected, boards_selected, builder, options,
+                board_warnings):
+    """Display a list of actions that we would take, if not a dry run.
+
+    Args:
+        series: Series object
+        why_selected: Dictionary where each key is a buildman argument
+                provided by the user, and the value is the list of boards
+                brought in by that argument. For example, 'arm' might bring
+                in 400 boards, so in this case the key would be 'arm' and
+                the value would be a list of board names.
+        boards_selected: Dict of selected boards, key is target name,
+                value is Board object
+        builder: The builder that will be used to build the commits
+        options: Command line options object
+        board_warnings: List of warnings obtained from board selected
+    """
+    col = terminal.Color()
+    print 'Dry run, so not doing much. But I would do this:'
+    print
+    if series:
+        commits = series.commits
+    else:
+        commits = None
+    print GetActionSummary(False, commits, boards_selected,
+            options)
+    print 'Build directory: %s' % builder.base_dir
+    if commits:
+        for upto in range(0, len(series.commits), options.step):
+            commit = series.commits[upto]
+            print '   ', col.Color(col.YELLOW, commit.hash[:8], bright=False),
+            print commit.subject
+    print
+    for arg in why_selected:
+        if arg != 'all':
+            print arg, ': %d boards' % len(why_selected[arg])
+            if options.verbose:
+                print '   %s' % ' '.join(why_selected[arg])
+    print ('Total boards to build for each commit: %d\n' %
+            len(why_selected['all']))
+    if board_warnings:
+        for warning in board_warnings:
+            print col.Color(col.YELLOW, warning)
+
+def CheckOutputDir(output_dir):
+    """Make sure that the output directory is not within the current directory
+
+    If we try to use an output directory which is within the current directory
+    (which is assumed to hold the U-Boot source) we may end up deleting the
+    U-Boot source code. Detect this and print an error in this case.
+
+    Args:
+        output_dir: Output directory path to check
+    """
+    path = os.path.realpath(output_dir)
+    cwd_path = os.path.realpath('.')
+    while True:
+        if os.path.realpath(path) == cwd_path:
+            Print("Cannot use output directory '%s' since it is within the current directtory '%s'" %
+                  (path, cwd_path))
+            sys.exit(1)
+        parent = os.path.dirname(path)
+        if parent == path:
+            break
+        path = parent
+
+def DoBuildman(options, args, toolchains=None, make_func=None, boards=None,
+               clean_dir=False):
+    """The main control code for buildman
+
+    Args:
+        options: Command line options object
+        args: Command line arguments (list of strings)
+        toolchains: Toolchains to use - this should be a Toolchains()
+                object. If None, then it will be created and scanned
+        make_func: Make function to use for the builder. This is called
+                to execute 'make'. If this is None, the normal function
+                will be used, which calls the 'make' tool with suitable
+                arguments. This setting is useful for tests.
+        board: Boards() object to use, containing a list of available
+                boards. If this is None it will be created and scanned.
+    """
+    global builder
+
+    if options.full_help:
+        pager = os.getenv('PAGER')
+        if not pager:
+            pager = 'more'
+        fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
+                             'README')
+        command.Run(pager, fname)
+        return 0
+
+    gitutil.Setup()
+    col = terminal.Color()
+
+    options.git_dir = os.path.join(options.git, '.git')
+
+    no_toolchains = toolchains is None
+    if no_toolchains:
+        toolchains = toolchain.Toolchains()
+
+    if options.fetch_arch:
+        if options.fetch_arch == 'list':
+            sorted_list = toolchains.ListArchs()
+            print col.Color(col.BLUE, 'Available architectures: %s\n' %
+                            ' '.join(sorted_list))
+            return 0
+        else:
+            fetch_arch = options.fetch_arch
+            if fetch_arch == 'all':
+                fetch_arch = ','.join(toolchains.ListArchs())
+                print col.Color(col.CYAN, '\nDownloading toolchains: %s' %
+                                fetch_arch)
+            for arch in fetch_arch.split(','):
+                print
+                ret = toolchains.FetchAndInstall(arch)
+                if ret:
+                    return ret
+            return 0
+
+    if no_toolchains:
+        toolchains.GetSettings()
+        toolchains.Scan(options.list_tool_chains and options.verbose)
+    if options.list_tool_chains:
+        toolchains.List()
+        print
+        return 0
+
+    # Work out how many commits to build. We want to build everything on the
+    # branch. We also build the upstream commit as a control so we can see
+    # problems introduced by the first commit on the branch.
+    count = options.count
+    has_range = options.branch and '..' in options.branch
+    if count == -1:
+        if not options.branch:
+            count = 1
+        else:
+            if has_range:
+                count, msg = gitutil.CountCommitsInRange(options.git_dir,
+                                                         options.branch)
+            else:
+                count, msg = gitutil.CountCommitsInBranch(options.git_dir,
+                                                          options.branch)
+            if count is None:
+                sys.exit(col.Color(col.RED, msg))
+            elif count == 0:
+                sys.exit(col.Color(col.RED, "Range '%s' has no commits" %
+                                   options.branch))
+            if msg:
+                print col.Color(col.YELLOW, msg)
+            count += 1   # Build upstream commit also
+
+    if not count:
+        str = ("No commits found to process in branch '%s': "
+               "set branch's upstream or use -c flag" % options.branch)
+        sys.exit(col.Color(col.RED, str))
+
+    # Work out what subset of the boards we are building
+    if not boards:
+        board_file = os.path.join(options.git, 'boards.cfg')
+        status = subprocess.call([os.path.join(options.git,
+                                                'tools/genboardscfg.py')])
+        if status != 0:
+                sys.exit("Failed to generate boards.cfg")
+
+        boards = board.Boards()
+        boards.ReadBoards(os.path.join(options.git, 'boards.cfg'))
+
+    exclude = []
+    if options.exclude:
+        for arg in options.exclude:
+            exclude += arg.split(',')
+
+
+    if options.boards:
+        requested_boards = []
+        for b in options.boards:
+            requested_boards += b.split(',')
+    else:
+        requested_boards = None
+    why_selected, board_warnings = boards.SelectBoards(args, exclude,
+                                                       requested_boards)
+    selected = boards.GetSelected()
+    if not len(selected):
+        sys.exit(col.Color(col.RED, 'No matching boards found'))
+
+    # Read the metadata from the commits. First look at the upstream commit,
+    # then the ones in the branch. We would like to do something like
+    # upstream/master~..branch but that isn't possible if upstream/master is
+    # a merge commit (it will list all the commits that form part of the
+    # merge)
+    # Conflicting tags are not a problem for buildman, since it does not use
+    # them. For example, Series-version is not useful for buildman. On the
+    # other hand conflicting tags will cause an error. So allow later tags
+    # to overwrite earlier ones by setting allow_overwrite=True
+    if options.branch:
+        if count == -1:
+            if has_range:
+                range_expr = options.branch
+            else:
+                range_expr = gitutil.GetRangeInBranch(options.git_dir,
+                                                      options.branch)
+            upstream_commit = gitutil.GetUpstream(options.git_dir,
+                                                  options.branch)
+            series = patchstream.GetMetaDataForList(upstream_commit,
+                options.git_dir, 1, series=None, allow_overwrite=True)
+
+            series = patchstream.GetMetaDataForList(range_expr,
+                    options.git_dir, None, series, allow_overwrite=True)
+        else:
+            # Honour the count
+            series = patchstream.GetMetaDataForList(options.branch,
+                    options.git_dir, count, series=None, allow_overwrite=True)
+    else:
+        series = None
+        if not options.dry_run:
+            options.verbose = True
+            if not options.summary:
+                options.show_errors = True
+
+    # By default we have one thread per CPU. But if there are not enough jobs
+    # we can have fewer threads and use a high '-j' value for make.
+    if not options.threads:
+        options.threads = min(multiprocessing.cpu_count(), len(selected))
+    if not options.jobs:
+        options.jobs = max(1, (multiprocessing.cpu_count() +
+                len(selected) - 1) / len(selected))
+
+    if not options.step:
+        options.step = len(series.commits) - 1
+
+    gnu_make = command.Output(os.path.join(options.git,
+            'scripts/show-gnu-make'), raise_on_error=False).rstrip()
+    if not gnu_make:
+        sys.exit('GNU Make not found')
+
+    # Create a new builder with the selected options.
+    output_dir = options.output_dir
+    if options.branch:
+        dirname = options.branch.replace('/', '_')
+        # As a special case allow the board directory to be placed in the
+        # output directory itself rather than any subdirectory.
+        if not options.no_subdirs:
+            output_dir = os.path.join(options.output_dir, dirname)
+        if clean_dir and os.path.exists(output_dir):
+            shutil.rmtree(output_dir)
+    CheckOutputDir(output_dir)
+    builder = Builder(toolchains, output_dir, options.git_dir,
+            options.threads, options.jobs, gnu_make=gnu_make, checkout=True,
+            show_unknown=options.show_unknown, step=options.step,
+            no_subdirs=options.no_subdirs, full_path=options.full_path,
+            verbose_build=options.verbose_build,
+            incremental=options.incremental,
+            per_board_out_dir=options.per_board_out_dir,
+            config_only=options.config_only,
+            squash_config_y=not options.preserve_config_y,
+            warnings_as_errors=options.warnings_as_errors)
+    builder.force_config_on_failure = not options.quick
+    if make_func:
+        builder.do_make = make_func
+
+    # For a dry run, just show our actions as a sanity check
+    if options.dry_run:
+        ShowActions(series, why_selected, selected, builder, options,
+                    board_warnings)
+    else:
+        builder.force_build = options.force_build
+        builder.force_build_failures = options.force_build_failures
+        builder.force_reconfig = options.force_reconfig
+        builder.in_tree = options.in_tree
+
+        # Work out which boards to build
+        board_selected = boards.GetSelectedDict()
+
+        if series:
+            commits = series.commits
+            # Number the commits for test purposes
+            for commit in range(len(commits)):
+                commits[commit].sequence = commit
+        else:
+            commits = None
+
+        Print(GetActionSummary(options.summary, commits, board_selected,
+                                options))
+
+        # We can't show function sizes without board details at present
+        if options.show_bloat:
+            options.show_detail = True
+        builder.SetDisplayOptions(options.show_errors, options.show_sizes,
+                                  options.show_detail, options.show_bloat,
+                                  options.list_error_boards,
+                                  options.show_config,
+                                  options.show_environment)
+        if options.summary:
+            builder.ShowSummary(commits, board_selected)
+        else:
+            fail, warned = builder.BuildBoards(commits, board_selected,
+                                options.keep_outputs, options.verbose)
+            if fail:
+                return 128
+            elif warned:
+                return 129
+    return 0
diff --git a/tools/u-boot-tools/buildman/func_test.py b/tools/u-boot-tools/buildman/func_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..119d02cbb2b64a69c0c06cc1556d318acaefb2b2
--- /dev/null
+++ b/tools/u-boot-tools/buildman/func_test.py
@@ -0,0 +1,535 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2014 Google, Inc
+#
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+import board
+import bsettings
+import cmdline
+import command
+import control
+import gitutil
+import terminal
+import toolchain
+
+settings_data = '''
+# Buildman settings file
+
+[toolchain]
+
+[toolchain-alias]
+
+[make-flags]
+src=/home/sjg/c/src
+chroot=/home/sjg/c/chroot
+vboot=VBOOT_DEBUG=1 MAKEFLAGS_VBOOT=DEBUG=1 CFLAGS_EXTRA_VBOOT=-DUNROLL_LOOPS VBOOT_SOURCE=${src}/platform/vboot_reference
+chromeos_coreboot=VBOOT=${chroot}/build/link/usr ${vboot}
+chromeos_daisy=VBOOT=${chroot}/build/daisy/usr ${vboot}
+chromeos_peach=VBOOT=${chroot}/build/peach_pit/usr ${vboot}
+'''
+
+boards = [
+    ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 1', 'board0',  ''],
+    ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 2', 'board1', ''],
+    ['Active', 'powerpc', 'powerpc', '', 'Tester', 'PowerPC board 1', 'board2', ''],
+    ['Active', 'sandbox', 'sandbox', '', 'Tester', 'Sandbox board', 'board4', ''],
+]
+
+commit_shortlog = """4aca821 patman: Avoid changing the order of tags
+39403bb patman: Use --no-pager' to stop git from forking a pager
+db6e6f2 patman: Remove the -a option
+f2ccf03 patman: Correct unit tests to run correctly
+1d097f9 patman: Fix indentation in terminal.py
+d073747 patman: Support the 'reverse' option for 'git log
+"""
+
+commit_log = ["""commit 7f6b8315d18f683c5181d0c3694818c1b2a20dcd
+Author: Masahiro Yamada <yamada.m@jp.panasonic.com>
+Date:   Fri Aug 22 19:12:41 2014 +0900
+
+    buildman: refactor help message
+
+    "buildman [options]" is displayed by default.
+
+    Append the rest of help messages to parser.usage
+    instead of replacing it.
+
+    Besides, "-b <branch>" is not mandatory since commit fea5858e.
+    Drop it from the usage.
+
+    Signed-off-by: Masahiro Yamada <yamada.m@jp.panasonic.com>
+""",
+"""commit d0737479be6baf4db5e2cdbee123e96bc5ed0ba8
+Author: Simon Glass <sjg@chromium.org>
+Date:   Thu Aug 14 16:48:25 2014 -0600
+
+    patman: Support the 'reverse' option for 'git log'
+
+    This option is currently not supported, but needs to be, for buildman to
+    operate as expected.
+
+    Series-changes: 7
+    - Add new patch to fix the 'reverse' bug
+
+    Series-version: 8
+
+    Change-Id: I79078f792e8b390b8a1272a8023537821d45feda
+    Reported-by: York Sun <yorksun@freescale.com>
+    Signed-off-by: Simon Glass <sjg@chromium.org>
+
+""",
+"""commit 1d097f9ab487c5019152fd47bda126839f3bf9fc
+Author: Simon Glass <sjg@chromium.org>
+Date:   Sat Aug 9 11:44:32 2014 -0600
+
+    patman: Fix indentation in terminal.py
+
+    This code came from a different project with 2-character indentation. Fix
+    it for U-Boot.
+
+    Series-changes: 6
+    - Add new patch to fix indentation in teminal.py
+
+    Change-Id: I5a74d2ebbb3cc12a665f5c725064009ac96e8a34
+    Signed-off-by: Simon Glass <sjg@chromium.org>
+
+""",
+"""commit f2ccf03869d1e152c836515a3ceb83cdfe04a105
+Author: Simon Glass <sjg@chromium.org>
+Date:   Sat Aug 9 11:08:24 2014 -0600
+
+    patman: Correct unit tests to run correctly
+
+    It seems that doctest behaves differently now, and some of the unit tests
+    do not run. Adjust the tests to work correctly.
+
+     ./tools/patman/patman --test
+    <unittest.result.TestResult run=10 errors=0 failures=0>
+
+    Series-changes: 6
+    - Add new patch to fix patman unit tests
+
+    Change-Id: I3d2ca588f4933e1f9d6b1665a00e4ae58269ff3b
+
+""",
+"""commit db6e6f2f9331c5a37647d6668768d4a40b8b0d1c
+Author: Simon Glass <sjg@chromium.org>
+Date:   Sat Aug 9 12:06:02 2014 -0600
+
+    patman: Remove the -a option
+
+    It seems that this is no longer needed, since checkpatch.pl will catch
+    whitespace problems in patches. Also the option is not widely used, so
+    it seems safe to just remove it.
+
+    Series-changes: 6
+    - Add new patch to remove patman's -a option
+
+    Suggested-by: Masahiro Yamada <yamada.m@jp.panasonic.com>
+    Change-Id: I5821a1c75154e532c46513486ca40b808de7e2cc
+
+""",
+"""commit 39403bb4f838153028a6f21ca30bf100f3791133
+Author: Simon Glass <sjg@chromium.org>
+Date:   Thu Aug 14 21:50:52 2014 -0600
+
+    patman: Use --no-pager' to stop git from forking a pager
+
+""",
+"""commit 4aca821e27e97925c039e69fd37375b09c6f129c
+Author: Simon Glass <sjg@chromium.org>
+Date:   Fri Aug 22 15:57:39 2014 -0600
+
+    patman: Avoid changing the order of tags
+
+    patman collects tags that it sees in the commit and places them nicely
+    sorted at the end of the patch. However, this is not really necessary and
+    in fact is apparently not desirable.
+
+    Series-changes: 9
+    - Add new patch to avoid changing the order of tags
+
+    Series-version: 9
+
+    Suggested-by: Masahiro Yamada <yamada.m@jp.panasonic.com>
+    Change-Id: Ib1518588c1a189ad5c3198aae76f8654aed8d0db
+"""]
+
+TEST_BRANCH = '__testbranch'
+
+class TestFunctional(unittest.TestCase):
+    """Functional test for buildman.
+
+    This aims to test from just below the invocation of buildman (parsing
+    of arguments) to 'make' and 'git' invocation. It is not a true
+    emd-to-end test, as it mocks git, make and the tool chain. But this
+    makes it easier to detect when the builder is doing the wrong thing,
+    since in many cases this test code will fail. For example, only a
+    very limited subset of 'git' arguments is supported - anything
+    unexpected will fail.
+    """
+    def setUp(self):
+        self._base_dir = tempfile.mkdtemp()
+        self._git_dir = os.path.join(self._base_dir, 'src')
+        self._buildman_pathname = sys.argv[0]
+        self._buildman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+        command.test_result = self._HandleCommand
+        self.setupToolchains()
+        self._toolchains.Add('arm-gcc', test=False)
+        self._toolchains.Add('powerpc-gcc', test=False)
+        bsettings.Setup(None)
+        bsettings.AddFile(settings_data)
+        self._boards = board.Boards()
+        for brd in boards:
+            self._boards.AddBoard(board.Board(*brd))
+
+        # Directories where the source been cloned
+        self._clone_dirs = []
+        self._commits = len(commit_shortlog.splitlines()) + 1
+        self._total_builds = self._commits * len(boards)
+
+        # Number of calls to make
+        self._make_calls = 0
+
+        # Map of [board, commit] to error messages
+        self._error = {}
+
+        self._test_branch = TEST_BRANCH
+
+        # Avoid sending any output and clear all terminal output
+        terminal.SetPrintTestMode()
+        terminal.GetPrintTestLines()
+
+    def tearDown(self):
+        shutil.rmtree(self._base_dir)
+
+    def setupToolchains(self):
+        self._toolchains = toolchain.Toolchains()
+        self._toolchains.Add('gcc', test=False)
+
+    def _RunBuildman(self, *args):
+        return command.RunPipe([[self._buildman_pathname] + list(args)],
+                capture=True, capture_stderr=True)
+
+    def _RunControl(self, *args, **kwargs):
+        sys.argv = [sys.argv[0]] + list(args)
+        options, args = cmdline.ParseArgs()
+        result = control.DoBuildman(options, args, toolchains=self._toolchains,
+                make_func=self._HandleMake, boards=self._boards,
+                clean_dir=kwargs.get('clean_dir', True))
+        self._builder = control.builder
+        return result
+
+    def testFullHelp(self):
+        command.test_result = None
+        result = self._RunBuildman('-H')
+        help_file = os.path.join(self._buildman_dir, 'README')
+        # Remove possible extraneous strings
+        extra = '::::::::::::::\n' + help_file + '\n::::::::::::::\n'
+        gothelp = result.stdout.replace(extra, '')
+        self.assertEqual(len(gothelp), os.path.getsize(help_file))
+        self.assertEqual(0, len(result.stderr))
+        self.assertEqual(0, result.return_code)
+
+    def testHelp(self):
+        command.test_result = None
+        result = self._RunBuildman('-h')
+        help_file = os.path.join(self._buildman_dir, 'README')
+        self.assertTrue(len(result.stdout) > 1000)
+        self.assertEqual(0, len(result.stderr))
+        self.assertEqual(0, result.return_code)
+
+    def testGitSetup(self):
+        """Test gitutils.Setup(), from outside the module itself"""
+        command.test_result = command.CommandResult(return_code=1)
+        gitutil.Setup()
+        self.assertEqual(gitutil.use_no_decorate, False)
+
+        command.test_result = command.CommandResult(return_code=0)
+        gitutil.Setup()
+        self.assertEqual(gitutil.use_no_decorate, True)
+
+    def _HandleCommandGitLog(self, args):
+        if args[-1] == '--':
+            args = args[:-1]
+        if '-n0' in args:
+            return command.CommandResult(return_code=0)
+        elif args[-1] == 'upstream/master..%s' % self._test_branch:
+            return command.CommandResult(return_code=0, stdout=commit_shortlog)
+        elif args[:3] == ['--no-color', '--no-decorate', '--reverse']:
+            if args[-1] == self._test_branch:
+                count = int(args[3][2:])
+                return command.CommandResult(return_code=0,
+                                            stdout=''.join(commit_log[:count]))
+
+        # Not handled, so abort
+        print 'git log', args
+        sys.exit(1)
+
+    def _HandleCommandGitConfig(self, args):
+        config = args[0]
+        if config == 'sendemail.aliasesfile':
+            return command.CommandResult(return_code=0)
+        elif config.startswith('branch.badbranch'):
+            return command.CommandResult(return_code=1)
+        elif config == 'branch.%s.remote' % self._test_branch:
+            return command.CommandResult(return_code=0, stdout='upstream\n')
+        elif config == 'branch.%s.merge' % self._test_branch:
+            return command.CommandResult(return_code=0,
+                                         stdout='refs/heads/master\n')
+
+        # Not handled, so abort
+        print 'git config', args
+        sys.exit(1)
+
+    def _HandleCommandGit(self, in_args):
+        """Handle execution of a git command
+
+        This uses a hacked-up parser.
+
+        Args:
+            in_args: Arguments after 'git' from the command line
+        """
+        git_args = []           # Top-level arguments to git itself
+        sub_cmd = None          # Git sub-command selected
+        args = []               # Arguments to the git sub-command
+        for arg in in_args:
+            if sub_cmd:
+                args.append(arg)
+            elif arg[0] == '-':
+                git_args.append(arg)
+            else:
+                if git_args and git_args[-1] in ['--git-dir', '--work-tree']:
+                    git_args.append(arg)
+                else:
+                    sub_cmd = arg
+        if sub_cmd == 'config':
+            return self._HandleCommandGitConfig(args)
+        elif sub_cmd == 'log':
+            return self._HandleCommandGitLog(args)
+        elif sub_cmd == 'clone':
+            return command.CommandResult(return_code=0)
+        elif sub_cmd == 'checkout':
+            return command.CommandResult(return_code=0)
+
+        # Not handled, so abort
+        print 'git', git_args, sub_cmd, args
+        sys.exit(1)
+
+    def _HandleCommandNm(self, args):
+        return command.CommandResult(return_code=0)
+
+    def _HandleCommandObjdump(self, args):
+        return command.CommandResult(return_code=0)
+
+    def _HandleCommandObjcopy(self, args):
+        return command.CommandResult(return_code=0)
+
+    def _HandleCommandSize(self, args):
+        return command.CommandResult(return_code=0)
+
+    def _HandleCommand(self, **kwargs):
+        """Handle a command execution.
+
+        The command is in kwargs['pipe-list'], as a list of pipes, each a
+        list of commands. The command should be emulated as required for
+        testing purposes.
+
+        Returns:
+            A CommandResult object
+        """
+        pipe_list = kwargs['pipe_list']
+        wc = False
+        if len(pipe_list) != 1:
+            if pipe_list[1] == ['wc', '-l']:
+                wc = True
+            else:
+                print 'invalid pipe', kwargs
+                sys.exit(1)
+        cmd = pipe_list[0][0]
+        args = pipe_list[0][1:]
+        result = None
+        if cmd == 'git':
+            result = self._HandleCommandGit(args)
+        elif cmd == './scripts/show-gnu-make':
+            return command.CommandResult(return_code=0, stdout='make')
+        elif cmd.endswith('nm'):
+            return self._HandleCommandNm(args)
+        elif cmd.endswith('objdump'):
+            return self._HandleCommandObjdump(args)
+        elif cmd.endswith('objcopy'):
+            return self._HandleCommandObjcopy(args)
+        elif cmd.endswith( 'size'):
+            return self._HandleCommandSize(args)
+
+        if not result:
+            # Not handled, so abort
+            print 'unknown command', kwargs
+            sys.exit(1)
+
+        if wc:
+            result.stdout = len(result.stdout.splitlines())
+        return result
+
+    def _HandleMake(self, commit, brd, stage, cwd, *args, **kwargs):
+        """Handle execution of 'make'
+
+        Args:
+            commit: Commit object that is being built
+            brd: Board object that is being built
+            stage: Stage that we are at (mrproper, config, build)
+            cwd: Directory where make should be run
+            args: Arguments to pass to make
+            kwargs: Arguments to pass to command.RunPipe()
+        """
+        self._make_calls += 1
+        if stage == 'mrproper':
+            return command.CommandResult(return_code=0)
+        elif stage == 'config':
+            return command.CommandResult(return_code=0,
+                    combined='Test configuration complete')
+        elif stage == 'build':
+            stderr = ''
+            if type(commit) is not str:
+                stderr = self._error.get((brd.target, commit.sequence))
+            if stderr:
+                return command.CommandResult(return_code=1, stderr=stderr)
+            return command.CommandResult(return_code=0)
+
+        # Not handled, so abort
+        print 'make', stage
+        sys.exit(1)
+
+    # Example function to print output lines
+    def print_lines(self, lines):
+        print len(lines)
+        for line in lines:
+            print line
+        #self.print_lines(terminal.GetPrintTestLines())
+
+    def testNoBoards(self):
+        """Test that buildman aborts when there are no boards"""
+        self._boards = board.Boards()
+        with self.assertRaises(SystemExit):
+            self._RunControl()
+
+    def testCurrentSource(self):
+        """Very simple test to invoke buildman on the current source"""
+        self.setupToolchains();
+        self._RunControl()
+        lines = terminal.GetPrintTestLines()
+        self.assertIn('Building current source for %d boards' % len(boards),
+                      lines[0].text)
+
+    def testBadBranch(self):
+        """Test that we can detect an invalid branch"""
+        with self.assertRaises(ValueError):
+            self._RunControl('-b', 'badbranch')
+
+    def testBadToolchain(self):
+        """Test that missing toolchains are detected"""
+        self.setupToolchains();
+        ret_code = self._RunControl('-b', TEST_BRANCH)
+        lines = terminal.GetPrintTestLines()
+
+        # Buildman always builds the upstream commit as well
+        self.assertIn('Building %d commits for %d boards' %
+                (self._commits, len(boards)), lines[0].text)
+        self.assertEqual(self._builder.count, self._total_builds)
+
+        # Only sandbox should succeed, the others don't have toolchains
+        self.assertEqual(self._builder.fail,
+                         self._total_builds - self._commits)
+        self.assertEqual(ret_code, 128)
+
+        for commit in range(self._commits):
+            for board in self._boards.GetList():
+                if board.arch != 'sandbox':
+                  errfile = self._builder.GetErrFile(commit, board.target)
+                  fd = open(errfile)
+                  self.assertEqual(fd.readlines(),
+                          ['No tool chain for %s\n' % board.arch])
+                  fd.close()
+
+    def testBranch(self):
+        """Test building a branch with all toolchains present"""
+        self._RunControl('-b', TEST_BRANCH)
+        self.assertEqual(self._builder.count, self._total_builds)
+        self.assertEqual(self._builder.fail, 0)
+
+    def testCount(self):
+        """Test building a specific number of commitst"""
+        self._RunControl('-b', TEST_BRANCH, '-c2')
+        self.assertEqual(self._builder.count, 2 * len(boards))
+        self.assertEqual(self._builder.fail, 0)
+        # Each board has a mrproper, config, and then one make per commit
+        self.assertEqual(self._make_calls, len(boards) * (2 + 2))
+
+    def testIncremental(self):
+        """Test building a branch twice - the second time should do nothing"""
+        self._RunControl('-b', TEST_BRANCH)
+
+        # Each board has a mrproper, config, and then one make per commit
+        self.assertEqual(self._make_calls, len(boards) * (self._commits + 2))
+        self._make_calls = 0
+        self._RunControl('-b', TEST_BRANCH, clean_dir=False)
+        self.assertEqual(self._make_calls, 0)
+        self.assertEqual(self._builder.count, self._total_builds)
+        self.assertEqual(self._builder.fail, 0)
+
+    def testForceBuild(self):
+        """The -f flag should force a rebuild"""
+        self._RunControl('-b', TEST_BRANCH)
+        self._make_calls = 0
+        self._RunControl('-b', TEST_BRANCH, '-f', clean_dir=False)
+        # Each board has a mrproper, config, and then one make per commit
+        self.assertEqual(self._make_calls, len(boards) * (self._commits + 2))
+
+    def testForceReconfigure(self):
+        """The -f flag should force a rebuild"""
+        self._RunControl('-b', TEST_BRANCH, '-C')
+        # Each commit has a mrproper, config and make
+        self.assertEqual(self._make_calls, len(boards) * self._commits * 3)
+
+    def testErrors(self):
+        """Test handling of build errors"""
+        self._error['board2', 1] = 'fred\n'
+        self._RunControl('-b', TEST_BRANCH)
+        self.assertEqual(self._builder.count, self._total_builds)
+        self.assertEqual(self._builder.fail, 1)
+
+        # Remove the error. This should have no effect since the commit will
+        # not be rebuilt
+        del self._error['board2', 1]
+        self._make_calls = 0
+        self._RunControl('-b', TEST_BRANCH, clean_dir=False)
+        self.assertEqual(self._builder.count, self._total_builds)
+        self.assertEqual(self._make_calls, 0)
+        self.assertEqual(self._builder.fail, 1)
+
+        # Now use the -F flag to force rebuild of the bad commit
+        self._RunControl('-b', TEST_BRANCH, '-F', clean_dir=False)
+        self.assertEqual(self._builder.count, self._total_builds)
+        self.assertEqual(self._builder.fail, 0)
+        self.assertEqual(self._make_calls, 3)
+
+    def testBranchWithSlash(self):
+        """Test building a branch with a '/' in the name"""
+        self._test_branch = '/__dev/__testbranch'
+        self._RunControl('-b', self._test_branch, clean_dir=False)
+        self.assertEqual(self._builder.count, self._total_builds)
+        self.assertEqual(self._builder.fail, 0)
+
+    def testBadOutputDir(self):
+        """Test building with an output dir the same as out current dir"""
+        self._test_branch = '/__dev/__testbranch'
+        with self.assertRaises(SystemExit):
+            self._RunControl('-b', self._test_branch, '-o', os.getcwd())
+        with self.assertRaises(SystemExit):
+            self._RunControl('-b', self._test_branch, '-o',
+                             os.path.join(os.getcwd(), 'test'))
diff --git a/tools/u-boot-tools/buildman/kconfiglib.py b/tools/u-boot-tools/buildman/kconfiglib.py
new file mode 100644
index 0000000000000000000000000000000000000000..d68af056b6bd4948e7d239bf1a946a9875a95d3a
--- /dev/null
+++ b/tools/u-boot-tools/buildman/kconfiglib.py
@@ -0,0 +1,3544 @@
+# SPDX-License-Identifier: ISC
+#
+# Author: Ulf Magnusson
+#   https://github.com/ulfalizer/Kconfiglib
+
+# This is Kconfiglib, a Python library for scripting, debugging, and extracting
+# information from Kconfig-based configuration systems. To view the
+# documentation, run
+#
+#  $ pydoc kconfiglib
+#
+# or, if you prefer HTML,
+#
+#  $ pydoc -w kconfiglib
+#
+# The examples/ subdirectory contains examples, to be run with e.g.
+#
+#  $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
+#
+# Look in testsuite.py for the test suite.
+
+"""
+Kconfiglib is a Python library for scripting and extracting information from
+Kconfig-based configuration systems. Features include the following:
+
+ - Symbol values and properties can be looked up and values assigned
+   programmatically.
+ - .config files can be read and written.
+ - Expressions can be evaluated in the context of a Kconfig configuration.
+ - Relations between symbols can be quickly determined, such as finding all
+   symbols that reference a particular symbol.
+ - Highly compatible with the scripts/kconfig/*conf utilities. The test suite
+   automatically compares outputs between Kconfiglib and the C implementation
+   for a large number of cases.
+
+For the Linux kernel, scripts are run using
+
+ $ make scriptconfig [ARCH=<arch>] SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
+
+Using the 'scriptconfig' target ensures that required environment variables
+(SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
+
+Scripts receive the name of the Kconfig file to load in sys.argv[1]. As of
+Linux 4.1.0-rc5, this is always "Kconfig" from the kernel top-level directory.
+If an argument is provided with SCRIPT_ARG, it appears as sys.argv[2].
+
+To get an interactive Python prompt with Kconfiglib preloaded and a Config
+object 'c' created, run
+
+ $ make iscriptconfig [ARCH=<arch>]
+
+Kconfiglib supports both Python 2 and Python 3. For (i)scriptconfig, the Python
+interpreter to use can be passed in PYTHONCMD, which defaults to 'python'. PyPy
+works well too, and might give a nice speedup for long-running jobs.
+
+The examples/ directory contains short example scripts, which can be run with
+e.g.
+
+ $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
+
+or
+
+ $ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG=kernel
+
+testsuite.py contains the test suite. See the top of the script for how to run
+it.
+
+Credits: Written by Ulf "Ulfalizer" Magnusson
+
+Send bug reports, suggestions and other feedback to ulfalizer a.t Google's
+email service. Don't wrestle with internal APIs. Tell me what you need and I
+might add it in a safe way as a client API instead."""
+
+import os
+import platform
+import re
+import sys
+
+# File layout:
+#
+# Public classes
+# Public functions
+# Internal classes
+# Internal functions
+# Internal global constants
+
+# Line length: 79 columns
+
+#
+# Public classes
+#
+
+class Config(object):
+
+    """Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
+    set of symbols and other items appearing in the configuration together with
+    their values. Creating any number of Config objects -- including for
+    different architectures -- is safe; Kconfiglib has no global state."""
+
+    #
+    # Public interface
+    #
+
+    def __init__(self, filename="Kconfig", base_dir=None, print_warnings=True,
+                 print_undef_assign=False):
+        """Creates a new Config object, representing a Kconfig configuration.
+        Raises Kconfig_Syntax_Error on syntax errors.
+
+        filename (default: "Kconfig"): The base Kconfig file of the
+           configuration. For the Linux kernel, you'll probably want "Kconfig"
+           from the top-level directory, as environment variables will make
+           sure the right Kconfig is included from there
+           (arch/<architecture>/Kconfig). If you are using Kconfiglib via 'make
+           scriptconfig', the filename of the base base Kconfig file will be in
+           sys.argv[1].
+
+        base_dir (default: None): The base directory relative to which 'source'
+           statements within Kconfig files will work. For the Linux kernel this
+           should be the top-level directory of the kernel tree. $-references
+           to existing environment variables will be expanded.
+
+           If None (the default), the environment variable 'srctree' will be
+           used if set, and the current directory otherwise. 'srctree' is set
+           by the Linux makefiles to the top-level kernel directory. A default
+           of "." would not work with an alternative build directory.
+
+        print_warnings (default: True): Set to True if warnings related to this
+           configuration should be printed to stderr. This can be changed later
+           with Config.set_print_warnings(). It is provided as a constructor
+           argument since warnings might be generated during parsing.
+
+        print_undef_assign (default: False): Set to True if informational
+           messages related to assignments to undefined symbols should be
+           printed to stderr for this configuration. Can be changed later with
+           Config.set_print_undef_assign()."""
+
+        # The set of all symbols, indexed by name (a string)
+        self.syms = {}
+        # Python 2/3 compatibility hack. This is the only one needed.
+        self.syms_iter = self.syms.values if sys.version_info[0] >= 3 else \
+                         self.syms.itervalues
+
+        # The set of all defined symbols in the configuration in the order they
+        # appear in the Kconfig files. This excludes the special symbols n, m,
+        # and y as well as symbols that are referenced but never defined.
+        self.kconfig_syms = []
+
+        # The set of all named choices (yes, choices can have names), indexed
+        # by name (a string)
+        self.named_choices = {}
+
+        # Lists containing all choices, menus and comments in the configuration
+        self.choices = []
+        self.menus = []
+        self.comments = []
+
+        def register_special_symbol(type_, name, val):
+            sym = Symbol()
+            sym.is_special_ = True
+            sym.is_defined_ = True
+            sym.config = self
+            sym.name = name
+            sym.type = type_
+            sym.cached_val = val
+            self.syms[name] = sym
+            return sym
+
+        # The special symbols n, m and y, used as shorthand for "n", "m" and
+        # "y"
+        self.n = register_special_symbol(TRISTATE, "n", "n")
+        self.m = register_special_symbol(TRISTATE, "m", "m")
+        self.y = register_special_symbol(TRISTATE, "y", "y")
+        # DEFCONFIG_LIST uses this
+        register_special_symbol(STRING, "UNAME_RELEASE", platform.uname()[2])
+
+        # The symbol with "option defconfig_list" set, containing a list of
+        # default .config files
+        self.defconfig_sym = None
+
+        # See Symbol.get_(src)arch()
+        self.arch = os.environ.get("ARCH")
+        self.srcarch = os.environ.get("SRCARCH")
+
+        # If you set CONFIG_ in the environment, Kconfig will prefix all symbols
+        # with its value when saving the configuration, instead of using the default, "CONFIG_".
+        self.config_prefix = os.environ.get("CONFIG_")
+        if self.config_prefix is None:
+            self.config_prefix = "CONFIG_"
+
+        # See Config.__init__(). We need this for get_defconfig_filename().
+        self.srctree = os.environ.get("srctree")
+        if self.srctree is None:
+            self.srctree = "."
+
+        self.filename = filename
+        self.base_dir = self.srctree if base_dir is None else \
+                        os.path.expandvars(base_dir)
+
+        # The 'mainmenu' text
+        self.mainmenu_text = None
+
+        # The filename of the most recently loaded .config file
+        self.config_filename = None
+        # The textual header of the most recently loaded .config, uncommented
+        self.config_header = None
+
+        self.print_warnings = print_warnings
+        self.print_undef_assign = print_undef_assign
+        self._warnings = []
+
+        # For parsing routines that stop when finding a line belonging to a
+        # different construct, these holds that line and the tokenized version
+        # of that line. The purpose is to avoid having to re-tokenize the line,
+        # which is inefficient and causes problems when recording references to
+        # symbols.
+        self.end_line = None
+        self.end_line_tokens = None
+
+        # See the comment in _parse_expr().
+        self._cur_item = None
+        self._line = None
+        self._filename = None
+        self._linenr = None
+        self._transform_m = None
+
+        # Parse the Kconfig files
+        self.top_block = []
+        self._parse_file(filename, None, None, None, self.top_block)
+
+        # Build Symbol.dep for all symbols
+        self._build_dep()
+
+    def get_arch(self):
+        """Returns the value the environment variable ARCH had at the time the
+        Config instance was created, or None if ARCH was not set. For the
+        kernel, this corresponds to the architecture being built for, with
+        values such as "i386" or "mips"."""
+        return self.arch
+
+    def get_srcarch(self):
+        """Returns the value the environment variable SRCARCH had at the time
+        the Config instance was created, or None if SRCARCH was not set. For
+        the kernel, this corresponds to the particular arch/ subdirectory
+        containing architecture-specific code."""
+        return self.srcarch
+
+    def get_srctree(self):
+        """Returns the value the environment variable srctree had at the time
+        the Config instance was created, or None if srctree was not defined.
+        This variable points to the source directory and is used when building
+        in a separate directory."""
+        return self.srctree
+
+    def get_base_dir(self):
+        """Returns the base directory relative to which 'source' statements
+        will work, passed as an argument to Config.__init__()."""
+        return self.base_dir
+
+    def get_kconfig_filename(self):
+        """Returns the name of the (base) kconfig file this configuration was
+        loaded from."""
+        return self.filename
+
+    def get_config_filename(self):
+        """Returns the filename of the most recently loaded configuration file,
+        or None if no configuration has been loaded."""
+        return self.config_filename
+
+    def get_config_header(self):
+        """Returns the (uncommented) textual header of the .config file most
+        recently loaded with load_config(). Returns None if no .config file has
+        been loaded or if the most recently loaded .config file has no header.
+        The header consists of all lines up to but not including the first line
+        that either
+
+        1. Does not start with "#"
+        2. Has the form "# CONFIG_FOO is not set."
+        """
+        return self.config_header
+
+    def get_mainmenu_text(self):
+        """Returns the text of the 'mainmenu' statement (with $-references to
+        symbols replaced by symbol values), or None if the configuration has no
+        'mainmenu' statement."""
+        return None if self.mainmenu_text is None else \
+          self._expand_sym_refs(self.mainmenu_text)
+
+    def get_defconfig_filename(self):
+        """Returns the name of the defconfig file, which is the first existing
+        file in the list given in a symbol having 'option defconfig_list' set.
+        $-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
+        FOO has the value "foo"). Returns None in case of no defconfig file.
+        Setting 'option defconfig_list' on multiple symbols currently results
+        in undefined behavior.
+
+        If the environment variable 'srctree' was set when the Config was
+        created, get_defconfig_filename() will first look relative to that
+        directory before looking in the current directory; see
+        Config.__init__().
+
+        WARNING: A wart here is that scripts/kconfig/Makefile sometimes uses
+        the --defconfig=<defconfig> option when calling the C implementation of
+        e.g. 'make defconfig'. This option overrides the 'option
+        defconfig_list' symbol, meaning the result from
+        get_defconfig_filename() might not match what 'make defconfig' would
+        use. That probably ought to be worked around somehow, so that this
+        function always gives the "expected" result."""
+        if self.defconfig_sym is None:
+            return None
+        for filename, cond_expr in self.defconfig_sym.def_exprs:
+            if self._eval_expr(cond_expr) == "y":
+                filename = self._expand_sym_refs(filename)
+                # We first look in $srctree. os.path.join() won't work here as
+                # an absolute path in filename would override $srctree.
+                srctree_filename = os.path.normpath(self.srctree + "/" +
+                                                    filename)
+                if os.path.exists(srctree_filename):
+                    return srctree_filename
+                if os.path.exists(filename):
+                    return filename
+        return None
+
+    def get_symbol(self, name):
+        """Returns the symbol with name 'name', or None if no such symbol
+        appears in the configuration. An alternative shorthand is conf[name],
+        where conf is a Config instance, though that will instead raise
+        KeyError if the symbol does not exist."""
+        return self.syms.get(name)
+
+    def __getitem__(self, name):
+        """Returns the symbol with name 'name'. Raises KeyError if the symbol
+        does not appear in the configuration."""
+        return self.syms[name]
+
+    def get_symbols(self, all_symbols=True):
+        """Returns a list of symbols from the configuration. An alternative for
+        iterating over all defined symbols (in the order of definition) is
+
+        for sym in config:
+            ...
+
+        which relies on Config implementing __iter__() and is equivalent to
+
+        for sym in config.get_symbols(False):
+            ...
+
+        all_symbols (default: True): If True, all symbols -- including special
+           and undefined symbols -- will be included in the result, in an
+           undefined order. If False, only symbols actually defined and not
+           merely referred to in the configuration will be included in the
+           result, and will appear in the order that they are defined within
+           the Kconfig configuration files."""
+        return list(self.syms.values()) if all_symbols else self.kconfig_syms
+
+    def __iter__(self):
+        """Convenience function for iterating over the set of all defined
+        symbols in the configuration, used like
+
+        for sym in conf:
+            ...
+
+        The iteration happens in the order of definition within the Kconfig
+        configuration files. Symbols only referred to but not defined will not
+        be included, nor will the special symbols n, m, and y. If you want to
+        include such symbols as well, see config.get_symbols()."""
+        return iter(self.kconfig_syms)
+
+    def get_choices(self):
+        """Returns a list containing all choice statements in the
+        configuration, in the order they appear in the Kconfig files."""
+        return self.choices
+
+    def get_menus(self):
+        """Returns a list containing all menus in the configuration, in the
+        order they appear in the Kconfig files."""
+        return self.menus
+
+    def get_comments(self):
+        """Returns a list containing all comments in the configuration, in the
+        order they appear in the Kconfig files."""
+        return self.comments
+
+    def get_top_level_items(self):
+        """Returns a list containing the items (symbols, menus, choices, and
+        comments) at the top level of the configuration -- that is, all items
+        that do not appear within a menu or choice. The items appear in the
+        same order as within the configuration."""
+        return self.top_block
+
+    def load_config(self, filename, replace=True):
+        """Loads symbol values from a file in the familiar .config format.
+        Equivalent to calling Symbol.set_user_value() to set each of the
+        values.
+
+        "# CONFIG_FOO is not set" within a .config file is treated specially
+        and sets the user value of FOO to 'n'. The C implementation works the
+        same way.
+
+        filename: The .config file to load. $-references to existing
+          environment variables will be expanded. For scripts to work even when
+          an alternative build directory is used with the Linux kernel, you
+          need to refer to the top-level kernel directory with "$srctree".
+
+        replace (default: True): True if the configuration should replace the
+           old configuration; False if it should add to it.
+
+        Returns a list or warnings (hopefully empty)
+        """
+
+        self._warnings = []
+        # Regular expressions for parsing .config files
+        _set_re_match = re.compile(r"{}(\w+)=(.*)".format(self.config_prefix)).match
+        _unset_re_match = re.compile(r"# {}(\w+) is not set".format(self.config_prefix)).match
+
+        # Put this first so that a missing file doesn't screw up our state
+        filename = os.path.expandvars(filename)
+        line_feeder = _FileFeed(filename)
+
+        self.config_filename = filename
+
+        #
+        # Read header
+        #
+
+        def is_header_line(line):
+            return line is not None and line.startswith("#") and \
+                   not _unset_re_match(line)
+
+        self.config_header = None
+
+        line = line_feeder.peek_next()
+        if is_header_line(line):
+            self.config_header = ""
+            while is_header_line(line_feeder.peek_next()):
+                self.config_header += line_feeder.get_next()[1:]
+            # Remove trailing newline
+            if self.config_header.endswith("\n"):
+                self.config_header = self.config_header[:-1]
+
+        #
+        # Read assignments. Hotspot for some workloads.
+        #
+
+        def warn_override(filename, linenr, name, old_user_val, new_user_val):
+            self._warn('overriding the value of {0}. '
+                       'Old value: "{1}", new value: "{2}".'
+                       .format(name, old_user_val, new_user_val),
+                       filename, linenr)
+
+        # Invalidate everything to keep things simple. It might be possible to
+        # improve performance for the case where multiple configurations are
+        # loaded by only invalidating a symbol (and its dependent symbols) if
+        # the new user value differs from the old. One complication would be
+        # that symbols not mentioned in the .config must lose their user value
+        # when replace = True, which is the usual case.
+        if replace:
+            self.unset_user_values()
+        else:
+            self._invalidate_all()
+
+        while 1:
+            line = line_feeder.get_next()
+            if line is None:
+                return self._warnings
+
+            line = line.rstrip()
+
+            set_match = _set_re_match(line)
+            if set_match:
+                name, val = set_match.groups()
+
+                if val.startswith('"'):
+                    if len(val) < 2 or val[-1] != '"':
+                        _parse_error(line, "malformed string literal",
+                                     line_feeder.filename, line_feeder.linenr)
+                    # Strip quotes and remove escapings. The unescaping
+                    # procedure should be safe since " can only appear as \"
+                    # inside the string.
+                    val = val[1:-1].replace('\\"', '"').replace("\\\\", "\\")
+
+                if name in self.syms:
+                    sym = self.syms[name]
+                    if sym.user_val is not None:
+                        warn_override(line_feeder.filename, line_feeder.linenr,
+                                      name, sym.user_val, val)
+
+                    if sym.is_choice_sym:
+                        user_mode = sym.parent.user_mode
+                        if user_mode is not None and user_mode != val:
+                            self._warn("assignment to {0} changes mode of "
+                                       'containing choice from "{1}" to "{2}".'
+                                       .format(name, val, user_mode),
+                                       line_feeder.filename,
+                                       line_feeder.linenr)
+
+                    sym._set_user_value_no_invalidate(val, True)
+                else:
+                    if self.print_undef_assign:
+                        _stderr_msg('note: attempt to assign the value "{0}" '
+                                    "to the undefined symbol {1}."
+                                    .format(val, name),
+                                    line_feeder.filename, line_feeder.linenr)
+            else:
+                unset_match = _unset_re_match(line)
+                if unset_match:
+                    name = unset_match.group(1)
+                    if name in self.syms:
+                        sym = self.syms[name]
+                        if sym.user_val is not None:
+                            warn_override(line_feeder.filename,
+                                          line_feeder.linenr,
+                                          name, sym.user_val, "n")
+
+                        sym._set_user_value_no_invalidate("n", True)
+
+    def write_config(self, filename, header=None):
+        """Writes out symbol values in the familiar .config format.
+
+        Kconfiglib makes sure the format matches what the C implementation
+        would generate, down to whitespace. This eases testing.
+
+        filename: The filename under which to save the configuration.
+
+        header (default: None): A textual header that will appear at the
+           beginning of the file, with each line commented out automatically.
+           None means no header."""
+
+        for sym in self.syms_iter():
+            sym.already_written = False
+
+        with open(filename, "w") as f:
+            # Write header
+            if header is not None:
+                f.write(_comment(header) + "\n")
+
+            # Build and write configuration
+            conf_strings = []
+            _make_block_conf(self.top_block, conf_strings.append)
+            f.write("\n".join(conf_strings) + "\n")
+
+    def eval(self, s):
+        """Returns the value of the expression 's' -- where 's' is represented
+        as a string -- in the context of the configuration. Raises
+        Kconfig_Syntax_Error if syntax errors are detected in 's'.
+
+        For example, if FOO and BAR are tristate symbols at least one of which
+        has the value "y", then config.eval("y && (FOO || BAR)") => "y"
+
+        This function always yields a tristate value. To get the value of
+        non-bool, non-tristate symbols, use Symbol.get_value().
+
+        The result of this function is consistent with how evaluation works for
+        conditional expressions in the configuration as well as in the C
+        implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
+        MODULES', respectively, and a result of "m" will get promoted to "y" if
+        we're running without modules.
+
+        Syntax checking is somewhat lax, partly to be compatible with lax
+        parsing in the C implementation."""
+        return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
+                                                None, # Current symbol/choice
+                                                s))   # line
+
+    def unset_user_values(self):
+        """Resets the values of all symbols, as if Config.load_config() or
+        Symbol.set_user_value() had never been called."""
+        for sym in self.syms_iter():
+            sym._unset_user_value_no_recursive_invalidate()
+
+    def set_print_warnings(self, print_warnings):
+        """Determines whether warnings related to this configuration (for
+        things like attempting to assign illegal values to symbols with
+        Symbol.set_user_value()) should be printed to stderr.
+
+        print_warnings: True if warnings should be printed."""
+        self.print_warnings = print_warnings
+
+    def set_print_undef_assign(self, print_undef_assign):
+        """Determines whether informational messages related to assignments to
+        undefined symbols should be printed to stderr for this configuration.
+
+        print_undef_assign: If True, such messages will be printed."""
+        self.print_undef_assign = print_undef_assign
+
+    def __str__(self):
+        """Returns a string containing various information about the Config."""
+        return _lines("Configuration",
+                      "File                                   : " +
+                        self.filename,
+                      "Base directory                         : " +
+                        self.base_dir,
+                      "Value of $ARCH at creation time        : " +
+                        ("(not set)" if self.arch is None else self.arch),
+                      "Value of $SRCARCH at creation time     : " +
+                        ("(not set)" if self.srcarch is None else
+                                        self.srcarch),
+                      "Source tree (derived from $srctree;",
+                      "defaults to '.' if $srctree isn't set) : " +
+                        self.srctree,
+                      "Most recently loaded .config           : " +
+                        ("(no .config loaded)"
+                          if self.config_filename is None else
+                             self.config_filename),
+                      "Print warnings                         : " +
+                        BOOL_STR[self.print_warnings],
+                      "Print assignments to undefined symbols : " +
+                        BOOL_STR[self.print_undef_assign])
+
+    #
+    # Private methods
+    #
+
+    #
+    # Kconfig parsing
+    #
+
+    def _parse_file(self, filename, parent, deps, visible_if_deps, block):
+        """Parses the Kconfig file 'filename'. Appends the Items in the file
+        (and any file it sources) to the list passed in the 'block' parameter.
+        See _parse_block() for the meaning of the parameters."""
+        self._parse_block(_FileFeed(filename), None, parent, deps,
+                          visible_if_deps, block)
+
+    def _parse_block(self, line_feeder, end_marker, parent, deps,
+                     visible_if_deps, block):
+        """Parses a block, which is the contents of either a file or an if,
+        menu, or choice statement. Appends the Items to the list passed in the
+        'block' parameter.
+
+        line_feeder: A _FileFeed instance feeding lines from a file. The
+          Kconfig language is line-based in practice.
+
+        end_marker: The token that ends the block, e.g. T_ENDIF ("endif") for
+           ifs. None for files.
+
+        parent: The enclosing menu or choice, or None if we're at the top
+           level.
+
+        deps: Dependencies from enclosing menus, choices and ifs.
+
+        visible_if_deps (default: None): 'visible if' dependencies from
+           enclosing menus.
+
+        block: The list to add items to."""
+
+        while 1:
+            # Do we already have a tokenized line that we determined wasn't
+            # part of whatever we were parsing earlier? See comment in
+            # Config.__init__().
+            if self.end_line is not None:
+                line = self.end_line
+                tokens = self.end_line_tokens
+                tokens.unget_all()
+
+                self.end_line = None
+                self.end_line_tokens = None
+            else:
+                line = line_feeder.get_next()
+                if line is None:
+                    if end_marker is not None:
+                        raise Kconfig_Syntax_Error("Unexpected end of file {0}"
+                                                 .format(line_feeder.filename))
+                    return
+
+                tokens = self._tokenize(line, False, line_feeder.filename,
+                                        line_feeder.linenr)
+
+            t0 = tokens.get_next()
+            if t0 is None:
+                continue
+
+            # Cases are ordered roughly by frequency, which speeds things up a
+            # bit
+
+            if t0 == T_CONFIG or t0 == T_MENUCONFIG:
+                # The tokenizer will automatically allocate a new Symbol object
+                # for any new names it encounters, so we don't need to worry
+                # about that here.
+                sym = tokens.get_next()
+
+                # Symbols defined in multiple places get the parent of their
+                # first definition. However, for symbols whose parents are
+                # choice statements, the choice statement takes precedence.
+                if not sym.is_defined_ or isinstance(parent, Choice):
+                    sym.parent = parent
+                sym.is_defined_ = True
+
+                self._parse_properties(line_feeder, sym, deps, visible_if_deps)
+
+                self.kconfig_syms.append(sym)
+                block.append(sym)
+
+            elif t0 == T_SOURCE:
+                kconfig_file = tokens.get_next()
+                exp_kconfig_file = self._expand_sym_refs(kconfig_file)
+                f = os.path.join(self.base_dir, exp_kconfig_file)
+                if not os.path.exists(f):
+                    raise IOError('{0}:{1}: sourced file "{2}" (expands to '
+                                  '"{3}") not found. Perhaps base_dir '
+                                  '(argument to Config.__init__(), currently '
+                                  '"{4}") is set to the wrong value.'
+                                  .format(line_feeder.filename,
+                                          line_feeder.linenr,
+                                          kconfig_file, exp_kconfig_file,
+                                          self.base_dir))
+                # Add items to the same block
+                self._parse_file(f, parent, deps, visible_if_deps, block)
+
+            elif t0 == end_marker:
+                # We have reached the end of the block
+                return
+
+            elif t0 == T_IF:
+                # If statements are treated as syntactic sugar for adding
+                # dependencies to enclosed items and do not have an explicit
+                # object representation.
+
+                dep_expr = self._parse_expr(tokens, None, line,
+                                            line_feeder.filename,
+                                            line_feeder.linenr)
+                # Add items to the same block
+                self._parse_block(line_feeder, T_ENDIF, parent,
+                                  _make_and(dep_expr, deps),
+                                  visible_if_deps, block)
+
+            elif t0 == T_COMMENT:
+                comment = Comment()
+                comment.config = self
+                comment.parent = parent
+                comment.filename = line_feeder.filename
+                comment.linenr = line_feeder.linenr
+                comment.text = tokens.get_next()
+
+                self._parse_properties(line_feeder, comment, deps,
+                                       visible_if_deps)
+
+                self.comments.append(comment)
+                block.append(comment)
+
+            elif t0 == T_MENU:
+                menu = Menu()
+                menu.config = self
+                menu.parent = parent
+                menu.filename = line_feeder.filename
+                menu.linenr = line_feeder.linenr
+                menu.title = tokens.get_next()
+
+                self._parse_properties(line_feeder, menu, deps,
+                                       visible_if_deps)
+
+                # This needs to go before _parse_block() so that we get the
+                # proper menu ordering in the case of nested functions
+                self.menus.append(menu)
+                # Parse contents and put Items in menu.block
+                self._parse_block(line_feeder, T_ENDMENU, menu, menu.dep_expr,
+                                  _make_and(visible_if_deps,
+                                            menu.visible_if_expr),
+                                  menu.block)
+
+                block.append(menu)
+
+            elif t0 == T_CHOICE:
+                name = tokens.get_next()
+                if name is None:
+                    choice = Choice()
+                    self.choices.append(choice)
+                else:
+                    # Named choice
+                    choice = self.named_choices.get(name)
+                    if choice is None:
+                        choice = Choice()
+                        choice.name = name
+                        self.named_choices[name] = choice
+                        self.choices.append(choice)
+
+                choice.config = self
+                choice.parent = parent
+
+                choice.def_locations.append((line_feeder.filename,
+                                             line_feeder.linenr))
+
+                self._parse_properties(line_feeder, choice, deps,
+                                       visible_if_deps)
+
+                # Parse contents and put Items in choice.block
+                self._parse_block(line_feeder, T_ENDCHOICE, choice, deps,
+                                  visible_if_deps, choice.block)
+
+                choice._determine_actual_symbols()
+
+                # If no type is specified for the choice, its type is that of
+                # the first choice item with a specified type
+                if choice.type == UNKNOWN:
+                    for item in choice.actual_symbols:
+                        if item.type != UNKNOWN:
+                            choice.type = item.type
+                            break
+
+                # Each choice item of UNKNOWN type gets the type of the choice
+                for item in choice.actual_symbols:
+                    if item.type == UNKNOWN:
+                        item.type = choice.type
+
+                block.append(choice)
+
+            elif t0 == T_MAINMENU:
+                text = tokens.get_next()
+                if self.mainmenu_text is not None:
+                    self._warn("overriding 'mainmenu' text. "
+                               'Old value: "{0}", new value: "{1}".'
+                               .format(self.mainmenu_text, text),
+                               line_feeder.filename, line_feeder.linenr)
+                self.mainmenu_text = text
+
+            else:
+                _parse_error(line, "unrecognized construct",
+                             line_feeder.filename, line_feeder.linenr)
+
+    def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
+        """Parsing of properties for symbols, menus, choices, and comments.
+        Takes care of propagating dependencies from enclosing menus and ifs."""
+
+        def parse_val_and_cond(tokens, line, filename, linenr):
+            """Parses '<expr1> if <expr2>' constructs, where the 'if' part is
+            optional. Returns a tuple containing the parsed expressions, with
+            None as the second element if the 'if' part is missing."""
+            return (self._parse_expr(tokens, stmt, line, filename, linenr,
+                                     False),
+                    self._parse_expr(tokens, stmt, line, filename, linenr)
+                    if tokens.check(T_IF) else None)
+
+        # In case the symbol is defined in multiple locations, we need to
+        # remember what prompts, defaults, selects, and implies are new for
+        # this definition, as "depends on" should only apply to the local
+        # definition.
+        new_prompt = None
+        new_def_exprs = []
+        new_selects = []
+        new_implies = []
+
+        # Dependencies from 'depends on' statements
+        depends_on_expr = None
+
+        while 1:
+            line = line_feeder.get_next()
+            if line is None:
+                break
+
+            filename = line_feeder.filename
+            linenr = line_feeder.linenr
+
+            tokens = self._tokenize(line, False, filename, linenr)
+
+            t0 = tokens.get_next()
+            if t0 is None:
+                continue
+
+            # Cases are ordered roughly by frequency, which speeds things up a
+            # bit
+
+            if t0 == T_DEPENDS:
+                if not tokens.check(T_ON):
+                    _parse_error(line, 'expected "on" after "depends"',
+                                 filename, linenr)
+
+                parsed_deps = self._parse_expr(tokens, stmt, line, filename,
+                                               linenr)
+
+                if isinstance(stmt, (Menu, Comment)):
+                    stmt.orig_deps = _make_and(stmt.orig_deps, parsed_deps)
+                else:
+                    depends_on_expr = _make_and(depends_on_expr, parsed_deps)
+
+            elif t0 == T_HELP:
+                # Find first non-blank (not all-space) line and get its
+                # indentation
+                line = line_feeder.next_nonblank()
+                if line is None:
+                    stmt.help = ""
+                    break
+                indent = _indentation(line)
+                if indent == 0:
+                    # If the first non-empty lines has zero indent, there is no
+                    # help text
+                    stmt.help = ""
+                    line_feeder.unget()
+                    break
+
+                # The help text goes on till the first non-empty line with less
+                # indent
+                help_lines = [_deindent(line, indent)]
+                while 1:
+                    line = line_feeder.get_next()
+                    if line is None or \
+                       (not line.isspace() and _indentation(line) < indent):
+                        stmt.help = "".join(help_lines)
+                        break
+                    help_lines.append(_deindent(line, indent))
+
+                if line is None:
+                    break
+
+                line_feeder.unget()
+
+            elif t0 == T_SELECT:
+                target = tokens.get_next()
+
+                stmt.referenced_syms.add(target)
+                stmt.selected_syms.add(target)
+
+                new_selects.append(
+                    (target,
+                     self._parse_expr(tokens, stmt, line, filename, linenr)
+                     if tokens.check(T_IF) else None))
+
+            elif t0 == T_IMPLY:
+                target = tokens.get_next()
+
+                stmt.referenced_syms.add(target)
+                stmt.implied_syms.add(target)
+
+                new_implies.append(
+                    (target,
+                     self._parse_expr(tokens, stmt, line, filename, linenr)
+                     if tokens.check(T_IF) else None))
+
+            elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
+                stmt.type = TOKEN_TO_TYPE[t0]
+                if tokens.peek_next() is not None:
+                    new_prompt = parse_val_and_cond(tokens, line, filename,
+                                                    linenr)
+
+            elif t0 == T_DEFAULT:
+                new_def_exprs.append(parse_val_and_cond(tokens, line, filename,
+                                                        linenr))
+
+            elif t0 == T_DEF_BOOL:
+                stmt.type = BOOL
+                if tokens.peek_next() is not None:
+                    new_def_exprs.append(parse_val_and_cond(tokens, line,
+                                                            filename, linenr))
+
+            elif t0 == T_PROMPT:
+                # 'prompt' properties override each other within a single
+                # definition of a symbol, but additional prompts can be added
+                # by defining the symbol multiple times; hence 'new_prompt'
+                # instead of 'prompt'.
+                new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
+
+            elif t0 == T_RANGE:
+                low = tokens.get_next()
+                high = tokens.get_next()
+                stmt.referenced_syms.add(low)
+                stmt.referenced_syms.add(high)
+
+                stmt.ranges.append(
+                    (low, high,
+                     self._parse_expr(tokens, stmt, line, filename, linenr)
+                     if tokens.check(T_IF) else None))
+
+            elif t0 == T_DEF_TRISTATE:
+                stmt.type = TRISTATE
+                if tokens.peek_next() is not None:
+                    new_def_exprs.append(parse_val_and_cond(tokens, line,
+                                                            filename, linenr))
+
+            elif t0 == T_OPTION:
+                if tokens.check(T_ENV) and tokens.check(T_EQUAL):
+                    env_var = tokens.get_next()
+
+                    stmt.is_special_ = True
+                    stmt.is_from_env = True
+
+                    if env_var not in os.environ:
+                        self._warn("The symbol {0} references the "
+                                   "non-existent environment variable {1} and "
+                                   "will get the empty string as its value. "
+                                   "If you're using Kconfiglib via "
+                                   "'make (i)scriptconfig', it should have "
+                                   "set up the environment correctly for you. "
+                                   "If you still got this message, that "
+                                   "might be an error, and you should email "
+                                   "ulfalizer a.t Google's email service."""
+                                   .format(stmt.name, env_var),
+                                   filename, linenr)
+
+                        stmt.cached_val = ""
+                    else:
+                        stmt.cached_val = os.environ[env_var]
+
+                elif tokens.check(T_DEFCONFIG_LIST):
+                    self.defconfig_sym = stmt
+
+                elif tokens.check(T_MODULES):
+                    # To reduce warning spam, only warn if 'option modules' is
+                    # set on some symbol that isn't MODULES, which should be
+                    # safe. I haven't run into any projects that make use
+                    # modules besides the kernel yet, and there it's likely to
+                    # keep being called "MODULES".
+                    if stmt.name != "MODULES":
+                        self._warn("the 'modules' option is not supported. "
+                                   "Let me know if this is a problem for you; "
+                                   "it shouldn't be that hard to implement. "
+                                   "(Note that modules are still supported -- "
+                                   "Kconfiglib just assumes the symbol name "
+                                   "MODULES, like older versions of the C "
+                                   "implementation did when 'option modules' "
+                                   "wasn't used.)",
+                                   filename, linenr)
+
+                elif tokens.check(T_ALLNOCONFIG_Y):
+                    if not isinstance(stmt, Symbol):
+                        _parse_error(line,
+                                     "the 'allnoconfig_y' option is only "
+                                     "valid for symbols",
+                                     filename, linenr)
+                    stmt.allnoconfig_y = True
+
+                else:
+                    _parse_error(line, "unrecognized option", filename, linenr)
+
+            elif t0 == T_VISIBLE:
+                if not tokens.check(T_IF):
+                    _parse_error(line, 'expected "if" after "visible"',
+                                 filename, linenr)
+                if not isinstance(stmt, Menu):
+                    _parse_error(line,
+                                 "'visible if' is only valid for menus",
+                                 filename, linenr)
+
+                parsed_deps = self._parse_expr(tokens, stmt, line, filename,
+                                               linenr)
+                stmt.visible_if_expr = _make_and(stmt.visible_if_expr,
+                                                 parsed_deps)
+
+            elif t0 == T_OPTIONAL:
+                if not isinstance(stmt, Choice):
+                    _parse_error(line,
+                                 '"optional" is only valid for choices',
+                                 filename,
+                                 linenr)
+                stmt.optional = True
+
+            else:
+                # See comment in Config.__init__()
+                self.end_line = line
+                self.end_line_tokens = tokens
+                break
+
+        # Done parsing properties. Now propagate 'depends on' and enclosing
+        # menu/if dependencies to expressions.
+
+        # The set of symbols referenced directly by the statement plus all
+        # symbols referenced by enclosing menus and ifs
+        stmt.all_referenced_syms = stmt.referenced_syms | _get_expr_syms(deps)
+
+        # Save original dependencies from enclosing menus and ifs
+        stmt.deps_from_containing = deps
+
+        if isinstance(stmt, (Menu, Comment)):
+            stmt.dep_expr = _make_and(stmt.orig_deps, deps)
+        else:
+            # Symbol or Choice
+
+            # See comment for 'menu_dep'
+            stmt.menu_dep = _make_and(deps, depends_on_expr)
+
+            # Propagate dependencies to prompts
+
+            if new_prompt is not None:
+                prompt, cond_expr = new_prompt
+                # Propagate 'visible if' dependencies from menus and local
+                # 'depends on' dependencies
+                cond_expr = _make_and(_make_and(cond_expr, visible_if_deps),
+                                      depends_on_expr)
+                # Save original
+                stmt.orig_prompts.append((prompt, cond_expr))
+                # Finalize with dependencies from enclosing menus and ifs
+                stmt.prompts.append((prompt, _make_and(cond_expr, deps)))
+
+            # Propagate dependencies to defaults
+
+            # Propagate 'depends on' dependencies
+            new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
+                             for val_expr, cond_expr in new_def_exprs]
+            # Save original
+            stmt.orig_def_exprs.extend(new_def_exprs)
+            # Finalize with dependencies from enclosing menus and ifs
+            stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
+                                   for val_expr, cond_expr in new_def_exprs])
+
+            # Propagate dependencies to selects and implies
+
+            # Only symbols can select and imply
+            if isinstance(stmt, Symbol):
+                # Propagate 'depends on' dependencies
+                new_selects = [(target, _make_and(cond_expr, depends_on_expr))
+                               for target, cond_expr in new_selects]
+                new_implies = [(target, _make_and(cond_expr, depends_on_expr))
+                               for target, cond_expr in new_implies]
+                # Save original
+                stmt.orig_selects.extend(new_selects)
+                stmt.orig_implies.extend(new_implies)
+                # Finalize with dependencies from enclosing menus and ifs
+                for target, cond in new_selects:
+                    target.rev_dep = \
+                        _make_or(target.rev_dep,
+                                 _make_and(stmt, _make_and(cond, deps)))
+                for target, cond in new_implies:
+                    target.weak_rev_dep = \
+                        _make_or(target.weak_rev_dep,
+                                 _make_and(stmt, _make_and(cond, deps)))
+
+    def _parse_expr(self, feed, cur_item, line, filename=None, linenr=None,
+                    transform_m=True):
+        """Parses an expression from the tokens in 'feed' using a simple
+        top-down approach. The result has the form
+        '(<operator>, [<parsed operands>])', where <operator> is e.g.
+        kconfiglib.AND. If there is only one operand (i.e., no && or ||), then
+        the operand is returned directly. This also goes for subexpressions.
+
+        feed: _Feed instance containing the tokens for the expression.
+
+        cur_item: The item (Symbol, Choice, Menu, or Comment) currently being
+           parsed, or None if we're not parsing an item. Used for recording
+           references to symbols.
+
+        line: The line containing the expression being parsed.
+
+        filename (default: None): The file containing the expression.
+
+        linenr (default: None): The line number containing the expression.
+
+        transform_m (default: False): Determines if 'm' should be rewritten to
+           'm && MODULES' -- see parse_val_and_cond().
+
+        Expression grammar, in decreasing order of precedence:
+
+        <expr> -> <symbol>
+                  <symbol> '=' <symbol>
+                  <symbol> '!=' <symbol>
+                  '(' <expr> ')'
+                  '!' <expr>
+                  <expr> '&&' <expr>
+                  <expr> '||' <expr>"""
+
+        # Use instance variables to avoid having to pass these as arguments
+        # through the top-down parser in _parse_expr_rec(), which is tedious
+        # and obfuscates the code. A profiler run shows no noticeable
+        # performance difference.
+        self._cur_item = cur_item
+        self._transform_m = transform_m
+        self._line = line
+        self._filename = filename
+        self._linenr = linenr
+
+        return self._parse_expr_rec(feed)
+
+    def _parse_expr_rec(self, feed):
+        or_term = self._parse_or_term(feed)
+        if not feed.check(T_OR):
+            # Common case -- no need for an OR node since it's just a single
+            # operand
+            return or_term
+        or_terms = [or_term, self._parse_or_term(feed)]
+        while feed.check(T_OR):
+            or_terms.append(self._parse_or_term(feed))
+        return (OR, or_terms)
+
+    def _parse_or_term(self, feed):
+        and_term = self._parse_factor(feed)
+        if not feed.check(T_AND):
+            # Common case -- no need for an AND node since it's just a single
+            # operand
+            return and_term
+        and_terms = [and_term, self._parse_factor(feed)]
+        while feed.check(T_AND):
+            and_terms.append(self._parse_factor(feed))
+        return (AND, and_terms)
+
+    def _parse_factor(self, feed):
+        token = feed.get_next()
+
+        if isinstance(token, (Symbol, str)):
+            if self._cur_item is not None and isinstance(token, Symbol):
+                self._cur_item.referenced_syms.add(token)
+
+            next_token = feed.peek_next()
+            # For conditional expressions ('depends on <expr>',
+            # '... if <expr>', # etc.), "m" and m are rewritten to
+            # "m" && MODULES.
+            if next_token != T_EQUAL and next_token != T_UNEQUAL:
+                if self._transform_m and (token is self.m or token == "m"):
+                    return (AND, ["m", self._sym_lookup("MODULES")])
+                return token
+
+            relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
+            token_2 = feed.get_next()
+            if self._cur_item is not None and isinstance(token_2, Symbol):
+                self._cur_item.referenced_syms.add(token_2)
+            return (relation, token, token_2)
+
+        if token == T_NOT:
+            return (NOT, self._parse_factor(feed))
+
+        if token == T_OPEN_PAREN:
+            expr_parse = self._parse_expr_rec(feed)
+            if not feed.check(T_CLOSE_PAREN):
+                _parse_error(self._line, "missing end parenthesis",
+                             self._filename, self._linenr)
+            return expr_parse
+
+        _parse_error(self._line, "malformed expression", self._filename,
+                     self._linenr)
+
+    def _tokenize(self, s, for_eval, filename=None, linenr=None):
+        """Returns a _Feed instance containing tokens derived from the string
+        's'. Registers any new symbols encountered (via _sym_lookup()).
+
+        (I experimented with a pure regular expression implementation, but it
+        came out slower, less readable, and wouldn't have been as flexible.)
+
+        for_eval: True when parsing an expression for a call to Config.eval(),
+           in which case we should not treat the first token specially nor
+           register new symbols."""
+
+        s = s.strip()
+        if s == "" or s[0] == "#":
+            return _Feed([])
+
+        if for_eval:
+            previous = None # The previous token seen
+            tokens = []
+            i = 0 # The current index in the string being tokenized
+
+        else:
+            # The initial word on a line is parsed specially. Let
+            # command_chars = [A-Za-z0-9_]. Then
+            #  - leading non-command_chars characters are ignored, and
+            #  - the first token consists the following one or more
+            #    command_chars characters.
+            # This is why things like "----help--" are accepted.
+            initial_token_match = _initial_token_re_match(s)
+            if initial_token_match is None:
+                return _Feed([])
+            keyword = _get_keyword(initial_token_match.group(1))
+            if keyword == T_HELP:
+                # Avoid junk after "help", e.g. "---", being registered as a
+                # symbol
+                return _Feed([T_HELP])
+            if keyword is None:
+                # We expect a keyword as the first token
+                _tokenization_error(s, filename, linenr)
+
+            previous = keyword
+            tokens = [keyword]
+            # The current index in the string being tokenized
+            i = initial_token_match.end()
+
+        # _tokenize() is a hotspot during parsing, and this speeds things up a
+        # bit
+        strlen = len(s)
+        append = tokens.append
+
+        # Main tokenization loop. (Handles tokens past the first one.)
+        while i < strlen:
+            # Test for an identifier/keyword preceded by whitespace first; this
+            # is the most common case.
+            id_keyword_match = _id_keyword_re_match(s, i)
+            if id_keyword_match:
+                # We have an identifier or keyword. The above also stripped any
+                # whitespace for us.
+                name = id_keyword_match.group(1)
+                # Jump past it
+                i = id_keyword_match.end()
+
+                keyword = _get_keyword(name)
+                if keyword is not None:
+                    # It's a keyword
+                    append(keyword)
+                elif previous in STRING_LEX:
+                    # What would ordinarily be considered an identifier is
+                    # treated as a string after certain tokens
+                    append(name)
+                else:
+                    # It's a symbol name. _sym_lookup() will take care of
+                    # allocating a new Symbol instance if it's the first time
+                    # we see it.
+                    sym = self._sym_lookup(name, for_eval)
+
+                    if previous == T_CONFIG or previous == T_MENUCONFIG:
+                        # If the previous token is T_(MENU)CONFIG
+                        # ("(menu)config"), we're tokenizing the first line of
+                        # a symbol definition, and should remember this as a
+                        # location where the symbol is defined
+                        sym.def_locations.append((filename, linenr))
+                    else:
+                        # Otherwise, it's a reference to the symbol
+                        sym.ref_locations.append((filename, linenr))
+
+                    append(sym)
+
+            else:
+                # Not an identifier/keyword
+
+                while i < strlen and s[i].isspace():
+                    i += 1
+                if i == strlen:
+                    break
+                c = s[i]
+                i += 1
+
+                # String literal (constant symbol)
+                if c == '"' or c == "'":
+                    if "\\" in s:
+                        # Slow path: This could probably be sped up, but it's a
+                        # very unusual case anyway.
+                        quote = c
+                        val = ""
+                        while 1:
+                            if i >= len(s):
+                                _tokenization_error(s, filename, linenr)
+                            c = s[i]
+                            if c == quote:
+                                break
+                            if c == "\\":
+                                if i + 1 >= len(s):
+                                    _tokenization_error(s, filename, linenr)
+                                val += s[i + 1]
+                                i += 2
+                            else:
+                                val += c
+                                i += 1
+                        i += 1
+                        append(val)
+                    else:
+                        # Fast path: If the string contains no backslashes
+                        # (almost always) we can simply look for the matching
+                        # quote.
+                        end = s.find(c, i)
+                        if end == -1:
+                            _tokenization_error(s, filename, linenr)
+                        append(s[i:end])
+                        i = end + 1
+
+                elif c == "&":
+                    # Invalid characters are ignored
+                    if i >= len(s) or s[i] != "&": continue
+                    append(T_AND)
+                    i += 1
+
+                elif c == "|":
+                    # Invalid characters are ignored
+                    if i >= len(s) or s[i] != "|": continue
+                    append(T_OR)
+                    i += 1
+
+                elif c == "!":
+                    if i < len(s) and s[i] == "=":
+                        append(T_UNEQUAL)
+                        i += 1
+                    else:
+                        append(T_NOT)
+
+                elif c == "=": append(T_EQUAL)
+                elif c == "(": append(T_OPEN_PAREN)
+                elif c == ")": append(T_CLOSE_PAREN)
+                elif c == "#": break # Comment
+
+                else: continue # Invalid characters are ignored
+
+            previous = tokens[-1]
+
+        return _Feed(tokens)
+
+    def _sym_lookup(self, name, for_eval=False):
+        """Fetches the symbol 'name' from the symbol table, creating and
+        registering it if it does not exist. If 'for_eval' is True, the symbol
+        won't be added to the symbol table if it does not exist -- this is for
+        Config.eval()."""
+        if name in self.syms:
+            return self.syms[name]
+
+        new_sym = Symbol()
+        new_sym.config = self
+        new_sym.name = name
+        if for_eval:
+            self._warn("no symbol {0} in configuration".format(name))
+        else:
+            self.syms[name] = new_sym
+        return new_sym
+
+    #
+    # Expression evaluation
+    #
+
+    def _eval_expr(self, expr):
+        """Evaluates an expression to "n", "m", or "y"."""
+
+        # Handles e.g. an "x if y" condition where the "if y" part is missing.
+        if expr is None:
+            return "y"
+
+        res = self._eval_expr_rec(expr)
+        if res == "m":
+            # Promote "m" to "y" if we're running without modules.
+            #
+            # Internally, "m" is often rewritten to "m" && MODULES by both the
+            # C implementation and Kconfiglib, which takes care of cases where
+            # "m" should be demoted to "n" instead.
+            modules_sym = self.syms.get("MODULES")
+            if modules_sym is None or modules_sym.get_value() != "y":
+                return "y"
+        return res
+
+    def _eval_expr_rec(self, expr):
+        if isinstance(expr, Symbol):
+            # Non-bool/tristate symbols are always "n" in a tristate sense,
+            # regardless of their value
+            if expr.type != BOOL and expr.type != TRISTATE:
+                return "n"
+            return expr.get_value()
+
+        if isinstance(expr, str):
+            return expr if (expr == "y" or expr == "m") else "n"
+
+        # Ordered by frequency
+
+        if expr[0] == AND:
+            res = "y"
+            for subexpr in expr[1]:
+                ev = self._eval_expr_rec(subexpr)
+                # Return immediately upon discovering an "n" term
+                if ev == "n":
+                    return "n"
+                if ev == "m":
+                    res = "m"
+            # 'res' is either "m" or "y" here; we already handled the
+            # short-circuiting "n" case in the loop.
+            return res
+
+        if expr[0] == NOT:
+            ev = self._eval_expr_rec(expr[1])
+            if ev == "y":
+                return "n"
+            return "y" if (ev == "n") else "m"
+
+        if expr[0] == OR:
+            res = "n"
+            for subexpr in expr[1]:
+                ev = self._eval_expr_rec(subexpr)
+                # Return immediately upon discovering a "y" term
+                if ev == "y":
+                    return "y"
+                if ev == "m":
+                    res = "m"
+            # 'res' is either "n" or "m" here; we already handled the
+            # short-circuiting "y" case in the loop.
+            return res
+
+        if expr[0] == EQUAL:
+            return "y" if (_str_val(expr[1]) == _str_val(expr[2])) else "n"
+
+        if expr[0] == UNEQUAL:
+            return "y" if (_str_val(expr[1]) != _str_val(expr[2])) else "n"
+
+        _internal_error("Internal error while evaluating expression: "
+                        "unknown operation {0}.".format(expr[0]))
+
+    def _eval_min(self, e1, e2):
+        """Returns the minimum value of the two expressions. Equates None with
+        'y'."""
+        e1_eval = self._eval_expr(e1)
+        e2_eval = self._eval_expr(e2)
+        return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
+
+    def _eval_max(self, e1, e2):
+        """Returns the maximum value of the two expressions. Equates None with
+        'y'."""
+        e1_eval = self._eval_expr(e1)
+        e2_eval = self._eval_expr(e2)
+        return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
+
+    #
+    # Dependency tracking (for caching and invalidation)
+    #
+
+    def _build_dep(self):
+        """Populates the Symbol.dep sets, linking the symbol to the symbols
+        that immediately depend on it in the sense that changing the value of
+        the symbol might affect the values of those other symbols. This is used
+        for caching/invalidation purposes. The calculated sets might be larger
+        than necessary as we don't do any complicated analysis of the
+        expressions."""
+
+        # Adds 'sym' as a directly dependent symbol to all symbols that appear
+        # in the expression 'e'
+        def add_expr_deps(e, sym):
+            for s in _get_expr_syms(e):
+                s.dep.add(sym)
+
+        # The directly dependent symbols of a symbol are:
+        #  - Any symbols whose prompts, default values, rev_dep (select
+        #    condition), weak_rev_dep (imply condition) or ranges depend on the
+        #    symbol
+        #  - Any symbols that belong to the same choice statement as the symbol
+        #    (these won't be included in 'dep' as that makes the dependency
+        #    graph unwieldy, but Symbol._get_dependent() will include them)
+        #  - Any symbols in a choice statement that depends on the symbol
+        for sym in self.syms_iter():
+            for _, e in sym.prompts:
+                add_expr_deps(e, sym)
+
+            for v, e in sym.def_exprs:
+                add_expr_deps(v, sym)
+                add_expr_deps(e, sym)
+
+            add_expr_deps(sym.rev_dep, sym)
+            add_expr_deps(sym.weak_rev_dep, sym)
+
+            for l, u, e in sym.ranges:
+                add_expr_deps(l, sym)
+                add_expr_deps(u, sym)
+                add_expr_deps(e, sym)
+
+            if sym.is_choice_sym:
+                choice = sym.parent
+                for _, e in choice.prompts:
+                    add_expr_deps(e, sym)
+                for _, e in choice.def_exprs:
+                    add_expr_deps(e, sym)
+
+    def _eq_to_sym(self, eq):
+        """_expr_depends_on() helper. For (in)equalities of the form sym = y/m
+        or sym != n, returns sym. For other (in)equalities, returns None."""
+        relation, left, right = eq
+
+        def transform_y_m_n(item):
+            if item is self.y: return "y"
+            if item is self.m: return "m"
+            if item is self.n: return "n"
+            return item
+
+        left = transform_y_m_n(left)
+        right = transform_y_m_n(right)
+
+        # Make sure the symbol (if any) appears to the left
+        if not isinstance(left, Symbol):
+            left, right = right, left
+        if not isinstance(left, Symbol):
+            return None
+        if (relation == EQUAL and (right == "y" or right == "m")) or \
+           (relation == UNEQUAL and right == "n"):
+            return left
+        return None
+
+    def _expr_depends_on(self, expr, sym):
+        """Reimplementation of expr_depends_symbol() from mconf.c. Used to
+        determine if a submenu should be implicitly created, which influences
+        what items inside choice statements are considered choice items."""
+        if expr is None:
+            return False
+
+        def rec(expr):
+            if isinstance(expr, str):
+                return False
+            if isinstance(expr, Symbol):
+                return expr is sym
+
+            if expr[0] in (EQUAL, UNEQUAL):
+                return self._eq_to_sym(expr) is sym
+            if expr[0] == AND:
+                for and_expr in expr[1]:
+                    if rec(and_expr):
+                        return True
+            return False
+
+        return rec(expr)
+
+    def _invalidate_all(self):
+        for sym in self.syms_iter():
+            sym._invalidate()
+
+    #
+    # Printing and misc.
+    #
+
+    def _expand_sym_refs(self, s):
+        """Expands $-references to symbols in 's' to symbol values, or to the
+        empty string for undefined symbols."""
+
+        while 1:
+            sym_ref_match = _sym_ref_re_search(s)
+            if sym_ref_match is None:
+                return s
+
+            sym_name = sym_ref_match.group(0)[1:]
+            sym = self.syms.get(sym_name)
+            expansion = "" if sym is None else sym.get_value()
+
+            s = s[:sym_ref_match.start()] + \
+                expansion + \
+                s[sym_ref_match.end():]
+
+    def _expr_val_str(self, expr, no_value_str="(none)",
+                      get_val_instead_of_eval=False):
+        """Printing helper. Returns a string with 'expr' and its value.
+
+        no_value_str: String to return when 'expr' is missing (None).
+
+        get_val_instead_of_eval: Assume 'expr' is a symbol or string (constant
+          symbol) and get its value directly instead of evaluating it to a
+          tristate value."""
+
+        if expr is None:
+            return no_value_str
+
+        if get_val_instead_of_eval:
+            if isinstance(expr, str):
+                return _expr_to_str(expr)
+            val = expr.get_value()
+        else:
+            val = self._eval_expr(expr)
+
+        return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
+
+    def _get_sym_or_choice_str(self, sc):
+        """Symbols and choices have many properties in common, so we factor out
+        common __str__() stuff here. "sc" is short for "symbol or choice"."""
+
+        # As we deal a lot with string representations here, use some
+        # convenient shorthand:
+        s = _expr_to_str
+
+        #
+        # Common symbol/choice properties
+        #
+
+        user_val_str = "(no user value)" if sc.user_val is None else \
+                       s(sc.user_val)
+
+        # Build prompts string
+        if not sc.prompts:
+            prompts_str = " (no prompts)"
+        else:
+            prompts_str_rows = []
+            for prompt, cond_expr in sc.orig_prompts:
+                prompts_str_rows.append(
+                    ' "{0}"'.format(prompt) if cond_expr is None else
+                    ' "{0}" if {1}'.format(prompt,
+                                           self._expr_val_str(cond_expr)))
+            prompts_str = "\n".join(prompts_str_rows)
+
+        # Build locations string
+        locations_str = "(no locations)" if not sc.def_locations else \
+                        " ".join(["{0}:{1}".format(filename, linenr) for
+                                  filename, linenr in sc.def_locations])
+
+        # Build additional-dependencies-from-menus-and-ifs string
+        additional_deps_str = " " + \
+          self._expr_val_str(sc.deps_from_containing,
+                             "(no additional dependencies)")
+
+        #
+        # Symbol-specific stuff
+        #
+
+        if isinstance(sc, Symbol):
+            # Build ranges string
+            if isinstance(sc, Symbol):
+                if not sc.ranges:
+                    ranges_str = " (no ranges)"
+                else:
+                    ranges_str_rows = []
+                    for l, u, cond_expr in sc.ranges:
+                        ranges_str_rows.append(
+                            " [{0}, {1}]".format(s(l), s(u))
+                            if cond_expr is None else
+                            " [{0}, {1}] if {2}"
+                            .format(s(l), s(u), self._expr_val_str(cond_expr)))
+                    ranges_str = "\n".join(ranges_str_rows)
+
+            # Build default values string
+            if not sc.def_exprs:
+                defaults_str = " (no default values)"
+            else:
+                defaults_str_rows = []
+                for val_expr, cond_expr in sc.orig_def_exprs:
+                    row_str = " " + self._expr_val_str(val_expr, "(none)",
+                                                       sc.type == STRING)
+                    defaults_str_rows.append(row_str)
+                    defaults_str_rows.append("  Condition: " +
+                                               self._expr_val_str(cond_expr))
+                defaults_str = "\n".join(defaults_str_rows)
+
+            # Build selects string
+            if not sc.orig_selects:
+                selects_str = " (no selects)"
+            else:
+                selects_str_rows = []
+                for target, cond_expr in sc.orig_selects:
+                    selects_str_rows.append(
+                        " {0}".format(target.name) if cond_expr is None else
+                        " {0} if {1}".format(target.name,
+                                             self._expr_val_str(cond_expr)))
+                selects_str = "\n".join(selects_str_rows)
+
+            # Build implies string
+            if not sc.orig_implies:
+                implies_str = " (no implies)"
+            else:
+                implies_str_rows = []
+                for target, cond_expr in sc.orig_implies:
+                    implies_str_rows.append(
+                        " {0}".format(target.name) if cond_expr is None else
+                        " {0} if {1}".format(target.name,
+                                             self._expr_val_str(cond_expr)))
+                implies_str = "\n".join(implies_str_rows)
+
+            res = _lines("Symbol " +
+                           ("(no name)" if sc.name is None else sc.name),
+                         "Type           : " + TYPENAME[sc.type],
+                         "Value          : " + s(sc.get_value()),
+                         "User value     : " + user_val_str,
+                         "Visibility     : " + s(_get_visibility(sc)),
+                         "Is choice item : " + BOOL_STR[sc.is_choice_sym],
+                         "Is defined     : " + BOOL_STR[sc.is_defined_],
+                         "Is from env.   : " + BOOL_STR[sc.is_from_env],
+                         "Is special     : " + BOOL_STR[sc.is_special_] + "\n")
+            if sc.ranges:
+                res += _lines("Ranges:", ranges_str + "\n")
+            res += _lines("Prompts:",
+                          prompts_str,
+                          "Default values:",
+                          defaults_str,
+                          "Selects:",
+                          selects_str,
+                          "Implies:",
+                          implies_str,
+                          "Reverse (select-related) dependencies:",
+                          " (no reverse dependencies)"
+                          if sc.rev_dep == "n"
+                          else " " + self._expr_val_str(sc.rev_dep),
+                          "Weak reverse (imply-related) dependencies:",
+                          " (no weak reverse dependencies)"
+                          if sc.weak_rev_dep == "n"
+                          else " " + self._expr_val_str(sc.weak_rev_dep),
+                          "Additional dependencies from enclosing menus "
+                            "and ifs:",
+                          additional_deps_str,
+                          "Locations: " + locations_str)
+
+            return res
+
+        #
+        # Choice-specific stuff
+        #
+
+        # Build selected symbol string
+        sel = sc.get_selection()
+        sel_str = "(no selection)" if sel is None else sel.name
+
+        # Build default values string
+        if not sc.def_exprs:
+            defaults_str = " (no default values)"
+        else:
+            defaults_str_rows = []
+            for sym, cond_expr in sc.orig_def_exprs:
+                defaults_str_rows.append(
+                    " {0}".format(sym.name) if cond_expr is None else
+                    " {0} if {1}".format(sym.name,
+                                         self._expr_val_str(cond_expr)))
+            defaults_str = "\n".join(defaults_str_rows)
+
+        # Build contained symbols string
+        names = [sym.name for sym in sc.actual_symbols]
+        syms_string = " ".join(names) if names else "(empty)"
+
+        return _lines("Choice",
+                      "Name (for named choices): " +
+                        ("(no name)" if sc.name is None else sc.name),
+                      "Type            : " + TYPENAME[sc.type],
+                      "Selected symbol : " + sel_str,
+                      "User value      : " + user_val_str,
+                      "Mode            : " + s(sc.get_mode()),
+                      "Visibility      : " + s(_get_visibility(sc)),
+                      "Optional        : " + BOOL_STR[sc.optional],
+                      "Prompts:",
+                      prompts_str,
+                      "Defaults:",
+                      defaults_str,
+                      "Choice symbols:",
+                      " " + syms_string,
+                      "Additional dependencies from enclosing menus and "
+                        "ifs:",
+                      additional_deps_str,
+                      "Locations: " + locations_str)
+
+    def _warn(self, msg, filename=None, linenr=None):
+        """For printing warnings to stderr."""
+        msg = _build_msg("warning: " + msg, filename, linenr)
+        if self.print_warnings:
+            sys.stderr.write(msg + "\n")
+        self._warnings.append(msg)
+
+class Item(object):
+
+    """Base class for symbols and other Kconfig constructs. Subclasses are
+    Symbol, Choice, Menu, and Comment."""
+
+    def is_symbol(self):
+        """Returns True if the item is a symbol. Short for
+        isinstance(item, kconfiglib.Symbol)."""
+        return isinstance(self, Symbol)
+
+    def is_choice(self):
+        """Returns True if the item is a choice. Short for
+        isinstance(item, kconfiglib.Choice)."""
+        return isinstance(self, Choice)
+
+    def is_menu(self):
+        """Returns True if the item is a menu. Short for
+        isinstance(item, kconfiglib.Menu)."""
+        return isinstance(self, Menu)
+
+    def is_comment(self):
+        """Returns True if the item is a comment. Short for
+        isinstance(item, kconfiglib.Comment)."""
+        return isinstance(self, Comment)
+
+class Symbol(Item):
+
+    """Represents a configuration symbol - e.g. FOO for
+
+    config FOO
+        ..."""
+
+    #
+    # Public interface
+    #
+
+    def get_config(self):
+        """Returns the Config instance this symbol is from."""
+        return self.config
+
+    def get_name(self):
+        """Returns the name of the symbol."""
+        return self.name
+
+    def get_type(self):
+        """Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
+        STRING, HEX, or INT. These are defined at the top level of the module,
+        so you'd do something like
+
+        if sym.get_type() == kconfiglib.STRING:
+            ..."""
+        return self.type
+
+    def get_prompts(self):
+        """Returns a list of prompts defined for the symbol, in the order they
+        appear in the configuration files. Returns the empty list for symbols
+        with no prompt.
+
+        This list will have a single entry for the vast majority of symbols
+        having prompts, but having multiple prompts for a single symbol is
+        possible through having multiple 'config' entries for it."""
+        return [prompt for prompt, _ in self.orig_prompts]
+
+    def get_help(self):
+        """Returns the help text of the symbol, or None if the symbol has no
+        help text."""
+        return self.help
+
+    def get_parent(self):
+        """Returns the menu or choice statement that contains the symbol, or
+        None if the symbol is at the top level. Note that if statements are
+        treated as syntactic and do not have an explicit class
+        representation."""
+        return self.parent
+
+    def get_def_locations(self):
+        """Returns a list of (filename, linenr) tuples, where filename (string)
+        and linenr (int) represent a location where the symbol is defined. For
+        the vast majority of symbols this list will only contain one element.
+        For the following Kconfig, FOO would get two entries: the lines marked
+        with *.
+
+        config FOO *
+            bool "foo prompt 1"
+
+        config FOO *
+            bool "foo prompt 2"
+        """
+        return self.def_locations
+
+    def get_ref_locations(self):
+        """Returns a list of (filename, linenr) tuples, where filename (string)
+        and linenr (int) represent a location where the symbol is referenced in
+        the configuration. For example, the lines marked by * would be included
+        for FOO below:
+
+        config A
+            bool
+            default BAR || FOO *
+
+        config B
+            tristate
+            depends on FOO *
+            default m if FOO *
+
+        if FOO *
+            config A
+                bool "A"
+        endif
+
+        config FOO (definition not included)
+            bool
+        """
+        return self.ref_locations
+
+    def get_value(self):
+        """Calculate and return the value of the symbol. See also
+        Symbol.set_user_value()."""
+
+        if self.cached_val is not None:
+            return self.cached_val
+
+        # As a quirk of Kconfig, undefined symbols get their name as their
+        # value. This is why things like "FOO = bar" work for seeing if FOO has
+        # the value "bar".
+        if self.type == UNKNOWN:
+            self.cached_val = self.name
+            return self.name
+
+        new_val = DEFAULT_VALUE[self.type]
+        vis = _get_visibility(self)
+
+        # This is easiest to calculate together with the value
+        self.write_to_conf = False
+
+        if self.type == BOOL or self.type == TRISTATE:
+            # The visibility and mode (modules-only or single-selection) of
+            # choice items will be taken into account in _get_visibility()
+            if self.is_choice_sym:
+                if vis != "n":
+                    choice = self.parent
+                    mode = choice.get_mode()
+
+                    self.write_to_conf = (mode != "n")
+
+                    if mode == "y":
+                        new_val = "y" if choice.get_selection() is self \
+                                  else "n"
+                    elif mode == "m":
+                        if self.user_val == "m" or self.user_val == "y":
+                            new_val = "m"
+
+            else:
+                # If the symbol is visible and has a user value, use that.
+                # Otherwise, look at defaults and weak reverse dependencies
+                # (implies).
+                use_defaults_and_weak_rev_deps = True
+
+                if vis != "n":
+                    self.write_to_conf = True
+                    if self.user_val is not None:
+                        new_val = self.config._eval_min(self.user_val, vis)
+                        use_defaults_and_weak_rev_deps = False
+
+                if use_defaults_and_weak_rev_deps:
+                    for val_expr, cond_expr in self.def_exprs:
+                        cond_eval = self.config._eval_expr(cond_expr)
+                        if cond_eval != "n":
+                            self.write_to_conf = True
+                            new_val = self.config._eval_min(val_expr,
+                                                            cond_eval)
+                            break
+
+                    weak_rev_dep_val = \
+                        self.config._eval_expr(self.weak_rev_dep)
+                    if weak_rev_dep_val != "n":
+                        self.write_to_conf = True
+                        new_val = self.config._eval_max(new_val,
+                                                        weak_rev_dep_val)
+
+                # Reverse (select-related) dependencies take precedence
+                rev_dep_val = self.config._eval_expr(self.rev_dep)
+                if rev_dep_val != "n":
+                    self.write_to_conf = True
+                    new_val = self.config._eval_max(new_val, rev_dep_val)
+
+            # We need to promote "m" to "y" in two circumstances:
+            #  1) If our type is boolean
+            #  2) If our weak_rev_dep (from IMPLY) is "y"
+            if new_val == "m" and \
+               (self.type == BOOL or
+                self.config._eval_expr(self.weak_rev_dep) == "y"):
+                new_val = "y"
+
+        elif self.type == INT or self.type == HEX:
+            has_active_range = False
+            low = None
+            high = None
+            use_defaults = True
+
+            base = 16 if self.type == HEX else 10
+
+            for l, h, cond_expr in self.ranges:
+                if self.config._eval_expr(cond_expr) != "n":
+                    has_active_range = True
+
+                    low_str = _str_val(l)
+                    high_str = _str_val(h)
+                    low = int(low_str, base) if \
+                      _is_base_n(low_str, base) else 0
+                    high = int(high_str, base) if \
+                      _is_base_n(high_str, base) else 0
+
+                    break
+
+            if vis != "n":
+                self.write_to_conf = True
+
+                if self.user_val is not None and \
+                   _is_base_n(self.user_val, base) and \
+                   (not has_active_range or
+                    low <= int(self.user_val, base) <= high):
+
+                    # If the user value is OK, it is stored in exactly the same
+                    # form as specified in the assignment (with or without
+                    # "0x", etc).
+
+                    use_defaults = False
+                    new_val = self.user_val
+
+            if use_defaults:
+                for val_expr, cond_expr in self.def_exprs:
+                    if self.config._eval_expr(cond_expr) != "n":
+                        self.write_to_conf = True
+
+                        # If the default value is OK, it is stored in exactly
+                        # the same form as specified. Otherwise, it is clamped
+                        # to the range, and the output has "0x" as appropriate
+                        # for the type.
+
+                        new_val = _str_val(val_expr)
+
+                        if _is_base_n(new_val, base):
+                            new_val_num = int(new_val, base)
+                            if has_active_range:
+                                clamped_val = None
+
+                                if new_val_num < low:
+                                    clamped_val = low
+                                elif new_val_num > high:
+                                    clamped_val = high
+
+                                if clamped_val is not None:
+                                    new_val = (hex(clamped_val) if \
+                                      self.type == HEX else str(clamped_val))
+
+                            break
+                else: # For the for loop
+                    # If no user value or default kicks in but the hex/int has
+                    # an active range, then the low end of the range is used,
+                    # provided it's > 0, with "0x" prepended as appropriate.
+                    if has_active_range and low > 0:
+                        new_val = (hex(low) if self.type == HEX else str(low))
+
+        elif self.type == STRING:
+            use_defaults = True
+
+            if vis != "n":
+                self.write_to_conf = True
+                if self.user_val is not None:
+                    new_val = self.user_val
+                    use_defaults = False
+
+            if use_defaults:
+                for val_expr, cond_expr in self.def_exprs:
+                    if self.config._eval_expr(cond_expr) != "n":
+                        self.write_to_conf = True
+                        new_val = _str_val(val_expr)
+                        break
+
+        self.cached_val = new_val
+        return new_val
+
+    def get_user_value(self):
+        """Returns the value assigned to the symbol in a .config or via
+        Symbol.set_user_value() (provided the value was valid for the type of
+        the symbol). Returns None in case of no user value."""
+        return self.user_val
+
+    def get_upper_bound(self):
+        """For string/hex/int symbols and for bool and tristate symbols that
+        cannot be modified (see is_modifiable()), returns None.
+
+        Otherwise, returns the highest value the symbol can be set to with
+        Symbol.set_user_value() (that will not be truncated): one of "m" or
+        "y", arranged from lowest to highest. This corresponds to the highest
+        value the symbol could be given in e.g. the 'make menuconfig'
+        interface.
+
+        See also the tri_less*() and tri_greater*() functions, which could come
+        in handy."""
+        if self.type != BOOL and self.type != TRISTATE:
+            return None
+        rev_dep = self.config._eval_expr(self.rev_dep)
+        # A bool selected to "m" gets promoted to "y", pinning it
+        if rev_dep == "m" and self.type == BOOL:
+            return None
+        vis = _get_visibility(self)
+        if TRI_TO_INT[vis] > TRI_TO_INT[rev_dep]:
+            return vis
+        return None
+
+    def get_lower_bound(self):
+        """For string/hex/int symbols and for bool and tristate symbols that
+        cannot be modified (see is_modifiable()), returns None.
+
+        Otherwise, returns the lowest value the symbol can be set to with
+        Symbol.set_user_value() (that will not be truncated): one of "n" or
+        "m", arranged from lowest to highest. This corresponds to the lowest
+        value the symbol could be given in e.g. the 'make menuconfig'
+        interface.
+
+        See also the tri_less*() and tri_greater*() functions, which could come
+        in handy."""
+        if self.type != BOOL and self.type != TRISTATE:
+            return None
+        rev_dep = self.config._eval_expr(self.rev_dep)
+        # A bool selected to "m" gets promoted to "y", pinning it
+        if rev_dep == "m" and self.type == BOOL:
+            return None
+        if TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]:
+            return rev_dep
+        return None
+
+    def get_assignable_values(self):
+        """For string/hex/int symbols and for bool and tristate symbols that
+        cannot be modified (see is_modifiable()), returns the empty list.
+
+        Otherwise, returns a list containing the user values that can be
+        assigned to the symbol (that won't be truncated). Usage example:
+
+        if "m" in sym.get_assignable_values():
+            sym.set_user_value("m")
+
+        This is basically a more convenient interface to
+        get_lower/upper_bound() when wanting to test if a particular tristate
+        value can be assigned."""
+        if self.type != BOOL and self.type != TRISTATE:
+            return []
+        rev_dep = self.config._eval_expr(self.rev_dep)
+        # A bool selected to "m" gets promoted to "y", pinning it
+        if rev_dep == "m" and self.type == BOOL:
+            return []
+        res = ["n", "m", "y"][TRI_TO_INT[rev_dep] :
+                              TRI_TO_INT[_get_visibility(self)] + 1]
+        return res if len(res) > 1 else []
+
+    def get_visibility(self):
+        """Returns the visibility of the symbol: one of "n", "m" or "y". For
+        bool and tristate symbols, this is an upper bound on the value users
+        can set for the symbol. For other types of symbols, a visibility of "n"
+        means the user value will be ignored. A visibility of "n" corresponds
+        to not being visible in the 'make *config' interfaces.
+
+        Example (assuming we're running with modules enabled -- i.e., MODULES
+        set to 'y'):
+
+        # Assume this has been assigned 'n'
+        config N_SYM
+            tristate "N_SYM"
+
+        # Assume this has been assigned 'm'
+        config M_SYM
+            tristate "M_SYM"
+
+        # Has visibility 'n'
+        config A
+            tristate "A"
+            depends on N_SYM
+
+        # Has visibility 'm'
+        config B
+            tristate "B"
+            depends on M_SYM
+
+        # Has visibility 'y'
+        config C
+            tristate "C"
+
+        # Has no prompt, and hence visibility 'n'
+        config D
+            tristate
+
+        Having visibility be tri-valued ensures that e.g. a symbol cannot be
+        set to "y" by the user if it depends on a symbol with value "m", which
+        wouldn't be safe.
+
+        You should probably look at get_lower/upper_bound(),
+        get_assignable_values() and is_modifiable() before using this."""
+        return _get_visibility(self)
+
+    def get_referenced_symbols(self, refs_from_enclosing=False):
+        """Returns the set() of all symbols referenced by this symbol. For
+        example, the symbol defined by
+
+        config FOO
+            bool
+            prompt "foo" if A && B
+            default C if D
+            depends on E
+            select F if G
+
+        references the symbols A through G.
+
+        refs_from_enclosing (default: False): If True, the symbols referenced
+           by enclosing menus and ifs will be included in the result."""
+        return self.all_referenced_syms if refs_from_enclosing else \
+               self.referenced_syms
+
+    def get_selected_symbols(self):
+        """Returns the set() of all symbols X for which this symbol has a
+        'select X' or 'select X if Y' (regardless of whether Y is satisfied or
+        not). This is a subset of the symbols returned by
+        get_referenced_symbols()."""
+        return self.selected_syms
+
+    def get_implied_symbols(self):
+        """Returns the set() of all symbols X for which this symbol has an
+        'imply X' or 'imply X if Y' (regardless of whether Y is satisfied or
+        not). This is a subset of the symbols returned by
+        get_referenced_symbols()."""
+        return self.implied_syms
+
+    def set_user_value(self, v):
+        """Sets the user value of the symbol.
+
+        Equal in effect to assigning the value to the symbol within a .config
+        file. Use get_lower/upper_bound() or get_assignable_values() to find
+        the range of currently assignable values for bool and tristate symbols;
+        setting values outside this range will cause the user value to differ
+        from the result of Symbol.get_value() (be truncated). Values that are
+        invalid for the type (such as a_bool.set_user_value("foo")) are
+        ignored, and a warning is emitted if an attempt is made to assign such
+        a value.
+
+        For any type of symbol, is_modifiable() can be used to check if a user
+        value will currently have any effect on the symbol, as determined by
+        its visibility and range of assignable values. Any value that is valid
+        for the type (bool, tristate, etc.) will end up being reflected in
+        get_user_value() though, and might have an effect later if conditions
+        change. To get rid of the user value, use unset_user_value().
+
+        Any symbols dependent on the symbol are (recursively) invalidated, so
+        things will just work with regards to dependencies.
+
+        v: The user value to give to the symbol."""
+        self._set_user_value_no_invalidate(v, False)
+
+        # There might be something more efficient you could do here, but play
+        # it safe.
+        if self.name == "MODULES":
+            self.config._invalidate_all()
+            return
+
+        self._invalidate()
+        self._invalidate_dependent()
+
+    def unset_user_value(self):
+        """Resets the user value of the symbol, as if the symbol had never
+        gotten a user value via Config.load_config() or
+        Symbol.set_user_value()."""
+        self._unset_user_value_no_recursive_invalidate()
+        self._invalidate_dependent()
+
+    def is_modifiable(self):
+        """Returns True if the value of the symbol could be modified by calling
+        Symbol.set_user_value().
+
+        For bools and tristates, this corresponds to the symbol being visible
+        in the 'make menuconfig' interface and not already being pinned to a
+        specific value (e.g. because it is selected by another symbol).
+
+        For strings and numbers, this corresponds to just being visible. (See
+        Symbol.get_visibility().)"""
+        if self.is_special_:
+            return False
+        if self.type == BOOL or self.type == TRISTATE:
+            rev_dep = self.config._eval_expr(self.rev_dep)
+            # A bool selected to "m" gets promoted to "y", pinning it
+            if rev_dep == "m" and self.type == BOOL:
+                return False
+            return TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]
+        return _get_visibility(self) != "n"
+
+    def is_defined(self):
+        """Returns False if the symbol is referred to in the Kconfig but never
+        actually defined."""
+        return self.is_defined_
+
+    def is_special(self):
+        """Returns True if the symbol is one of the special symbols n, m, y, or
+        UNAME_RELEASE, or gets its value from the environment."""
+        return self.is_special_
+
+    def is_from_environment(self):
+        """Returns True if the symbol gets its value from the environment."""
+        return self.is_from_env
+
+    def has_ranges(self):
+        """Returns True if the symbol is of type INT or HEX and has ranges that
+        limit what values it can take on."""
+        return bool(self.ranges)
+
+    def is_choice_symbol(self):
+        """Returns True if the symbol is in a choice statement and is an actual
+        choice symbol (see Choice.get_symbols())."""
+        return self.is_choice_sym
+
+    def is_choice_selection(self):
+        """Returns True if the symbol is contained in a choice statement and is
+        the selected item. Equivalent to
+
+        sym.is_choice_symbol() and sym.get_parent().get_selection() is sym"""
+        return self.is_choice_sym and self.parent.get_selection() is self
+
+    def is_allnoconfig_y(self):
+        """Returns True if the symbol has the 'allnoconfig_y' option set."""
+        return self.allnoconfig_y
+
+    def __str__(self):
+        """Returns a string containing various information about the symbol."""
+        return self.config._get_sym_or_choice_str(self)
+
+    #
+    # Private methods
+    #
+
+    def __init__(self):
+        """Symbol constructor -- not intended to be called directly by
+        Kconfiglib clients."""
+
+        self.name = None
+        self.type = UNKNOWN
+        self.prompts = []
+        self.def_exprs = [] # 'default' properties
+        self.ranges = [] # 'range' properties (for int and hex)
+        self.help = None # Help text
+        self.rev_dep = "n" # Reverse (select-related) dependencies
+        self.weak_rev_dep = "n" # Weak reverse (imply-related) dependencies
+        self.config = None
+        self.parent = None
+
+        self.user_val = None # Value set by user
+
+        # The prompt, default value, select, and imply conditions without any
+        # dependencies from menus and ifs propagated to them
+        self.orig_prompts = []
+        self.orig_def_exprs = []
+        self.orig_selects = []
+        self.orig_implies = []
+
+        # Dependencies inherited from containing menus and ifs
+        self.deps_from_containing = None
+        # The set of symbols referenced by this symbol (see
+        # get_referenced_symbols())
+        self.referenced_syms = set()
+        # The set of symbols selected by this symbol (see
+        # get_selected_symbols())
+        self.selected_syms = set()
+        # The set of symbols implied by this symbol (see get_implied_symbols())
+        self.implied_syms = set()
+        # Like 'referenced_syms', but includes symbols from
+        # dependencies inherited from enclosing menus and ifs
+        self.all_referenced_syms = set()
+
+        # This records only dependencies from enclosing ifs and menus together
+        # with local 'depends on' dependencies. Needed when determining actual
+        # choice items (hrrrr...). See Choice._determine_actual_symbols().
+        self.menu_dep = None
+
+        # See Symbol.get_ref/def_locations().
+        self.def_locations = []
+        self.ref_locations = []
+
+        # Populated in Config._build_dep() after parsing. Links the symbol to
+        # the symbols that immediately depend on it (in a caching/invalidation
+        # sense). The total set of dependent symbols for the symbol (the
+        # transitive closure) is calculated on an as-needed basis in
+        # _get_dependent().
+        self.dep = set()
+
+        # Cached values
+
+        # Caches the calculated value
+        self.cached_val = None
+        # Caches the visibility, which acts as an upper bound on the value
+        self.cached_visibility = None
+        # Caches the total list of dependent symbols. Calculated in
+        # _get_dependent().
+        self.cached_deps = None
+
+        # Flags
+
+        # Does the symbol have an entry in the Kconfig file? The trailing
+        # underscore avoids a collision with is_defined().
+        self.is_defined_ = False
+        # Should the symbol get an entry in .config?
+        self.write_to_conf = False
+        # Set to true when _make_conf() is called on a symbol, so that symbols
+        # defined in multiple locations only get one .config entry. We need to
+        # reset it prior to writing out a new .config.
+        self.already_written = False
+        # This is set to True for "actual" choice symbols; see
+        # Choice._determine_actual_symbols().
+        self.is_choice_sym = False
+        # Does the symbol get its value in some special way, e.g. from the
+        # environment or by being one of the special symbols n, m, and y? If
+        # so, the value is stored in self.cached_val, which is never
+        # invalidated. The trailing underscore avoids a collision with
+        # is_special().
+        self.is_special_ = False
+        # Does the symbol get its value from the environment?
+        self.is_from_env = False
+        # Does the symbol have the 'allnoconfig_y' option set?
+        self.allnoconfig_y = False
+
+    def _invalidate(self):
+        if self.is_special_:
+            return
+
+        if self.is_choice_sym:
+            self.parent._invalidate()
+
+        self.cached_val = None
+        self.cached_visibility = None
+
+    def _invalidate_dependent(self):
+        for sym in self._get_dependent():
+            sym._invalidate()
+
+    def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
+        """Like set_user_value(), but does not invalidate any symbols.
+
+        suppress_load_warnings: some warnings are annoying when loading a
+           .config that can be helpful when manually invoking set_user_value().
+           This flag is set to True to suppress such warnings.
+
+           Perhaps this could be made optional for load_config() instead."""
+
+        if self.is_special_:
+            if self.is_from_env:
+                self.config._warn('attempt to assign the value "{0}" to the '
+                                  'symbol {1}, which gets its value from the '
+                                  'environment. Assignment ignored.'
+                                  .format(v, self.name))
+            else:
+                self.config._warn('attempt to assign the value "{0}" to the '
+                                  'special symbol {1}. Assignment ignored.'
+                                  .format(v, self.name))
+            return
+
+        if not self.is_defined_:
+            filename, linenr = self.ref_locations[0]
+            if self.config.print_undef_assign:
+                _stderr_msg('note: attempt to assign the value "{0}" to {1}, '
+                            "which is referenced at {2}:{3} but never "
+                            "defined. Assignment ignored."
+                            .format(v, self.name, filename, linenr))
+            return
+
+        # Check if the value is valid for our type
+        if not ((self.type == BOOL     and (v == "y" or v == "n")   ) or
+                (self.type == TRISTATE and (v == "y" or v == "m" or
+                                            v == "n")               ) or
+                (self.type == STRING                                ) or
+                (self.type == INT      and _is_base_n(v, 10)        ) or
+                (self.type == HEX      and _is_base_n(v, 16)        )):
+            self.config._warn('the value "{0}" is invalid for {1}, which has '
+                              "type {2}. Assignment ignored."
+                              .format(v, self.name, TYPENAME[self.type]))
+            return
+
+        if not self.prompts and not suppress_load_warnings:
+            self.config._warn('assigning "{0}" to the symbol {1} which '
+                              'lacks prompts and thus has visibility "n". '
+                              'The assignment will have no effect.'
+                              .format(v, self.name))
+
+        self.user_val = v
+
+        if self.is_choice_sym and (self.type == BOOL or self.type == TRISTATE):
+            choice = self.parent
+            if v == "y":
+                choice.user_val = self
+                choice.user_mode = "y"
+            elif v == "m":
+                choice.user_val = None
+                choice.user_mode = "m"
+
+    def _unset_user_value_no_recursive_invalidate(self):
+        self._invalidate()
+        self.user_val = None
+
+        if self.is_choice_sym:
+            self.parent._unset_user_value()
+
+    def _make_conf(self, append_fn):
+        if self.already_written:
+            return
+
+        self.already_written = True
+
+        # Note: write_to_conf is determined in get_value()
+        val = self.get_value()
+        if not self.write_to_conf:
+            return
+
+        if self.type == BOOL or self.type == TRISTATE:
+            append_fn("{0}{1}={2}".format(self.config.config_prefix, self.name, val)
+                      if val == "y" or val == "m" else
+                      "# {0}{1} is not set".format(self.config.config_prefix, self.name))
+
+        elif self.type == INT or self.type == HEX:
+            append_fn("{0}{1}={2}".format(self.config.config_prefix, self.name, val))
+
+        elif self.type == STRING:
+            # Escape \ and "
+            append_fn('{0}{1}="{2}"'
+                      .format(self.config.config_prefix, self.name,
+                              val.replace("\\", "\\\\").replace('"', '\\"')))
+
+        else:
+            _internal_error("Internal error while creating .config: unknown "
+                            'type "{0}".'.format(self.type))
+
+    def _get_dependent(self):
+        """Returns the set of symbols that should be invalidated if the value
+        of the symbol changes, because they might be affected by the change.
+        Note that this is an internal API -- it's probably of limited
+        usefulness to clients."""
+        if self.cached_deps is not None:
+            return self.cached_deps
+
+        res = set(self.dep)
+        for s in self.dep:
+            res |= s._get_dependent()
+
+        if self.is_choice_sym:
+            # Choice symbols also depend (recursively) on their siblings. The
+            # siblings are not included in 'dep' to avoid dependency loops.
+            for sibling in self.parent.actual_symbols:
+                if sibling is not self:
+                    res.add(sibling)
+                    res |= sibling.dep
+                    for s in sibling.dep:
+                        res |= s._get_dependent()
+
+        self.cached_deps = res
+        return res
+
+    def _has_auto_menu_dep_on(self, on):
+        """See Choice._determine_actual_symbols()."""
+        if not isinstance(self.parent, Choice):
+            _internal_error("Attempt to determine auto menu dependency for "
+                            "symbol ouside of choice.")
+
+        if not self.prompts:
+            # If we have no prompt, use the menu dependencies instead (what was
+            # specified with 'depends on')
+            return self.menu_dep is not None and \
+                   self.config._expr_depends_on(self.menu_dep, on)
+
+        for _, cond_expr in self.prompts:
+            if self.config._expr_depends_on(cond_expr, on):
+                return True
+
+        return False
+
+class Menu(Item):
+
+    """Represents a menu statement."""
+
+    #
+    # Public interface
+    #
+
+    def get_config(self):
+        """Return the Config instance this menu is from."""
+        return self.config
+
+    def get_title(self):
+        """Returns the title text of the menu."""
+        return self.title
+
+    def get_parent(self):
+        """Returns the menu or choice statement that contains the menu, or
+        None if the menu is at the top level. Note that if statements are
+        treated as syntactic sugar and do not have an explicit class
+        representation."""
+        return self.parent
+
+    def get_location(self):
+        """Returns the location of the menu as a (filename, linenr) tuple,
+        where filename is a string and linenr an int."""
+        return (self.filename, self.linenr)
+
+    def get_items(self, recursive=False):
+        """Returns a list containing the items (symbols, menus, choice
+        statements and comments) in in the menu, in the same order that the
+        items appear within the menu.
+
+        recursive (default: False): True if items contained in items within the
+           menu should be included recursively (preorder)."""
+
+        if not recursive:
+            return self.block
+
+        res = []
+        for item in self.block:
+            res.append(item)
+            if isinstance(item, Menu):
+                res.extend(item.get_items(True))
+            elif isinstance(item, Choice):
+                res.extend(item.get_items())
+        return res
+
+    def get_symbols(self, recursive=False):
+        """Returns a list containing the symbols in the menu, in the same order
+        that they appear within the menu.
+
+        recursive (default: False): True if symbols contained in items within
+           the menu should be included recursively."""
+
+        return [item for item in self.get_items(recursive) if
+                isinstance(item, Symbol)]
+
+    def get_visibility(self):
+        """Returns the visibility of the menu. This also affects the visibility
+        of subitems. See also Symbol.get_visibility()."""
+        return self.config._eval_expr(self.dep_expr)
+
+    def get_visible_if_visibility(self):
+        """Returns the visibility the menu gets from its 'visible if'
+        condition. "y" if the menu has no 'visible if' condition."""
+        return self.config._eval_expr(self.visible_if_expr)
+
+    def get_referenced_symbols(self, refs_from_enclosing=False):
+        """See Symbol.get_referenced_symbols()."""
+        return self.all_referenced_syms if refs_from_enclosing else \
+               self.referenced_syms
+
+    def __str__(self):
+        """Returns a string containing various information about the menu."""
+        depends_on_str = self.config._expr_val_str(self.orig_deps,
+                                                   "(no dependencies)")
+        visible_if_str = self.config._expr_val_str(self.visible_if_expr,
+                                                   "(no dependencies)")
+
+        additional_deps_str = " " + \
+          self.config._expr_val_str(self.deps_from_containing,
+                                    "(no additional dependencies)")
+
+        return _lines("Menu",
+                      "Title                     : " + self.title,
+                      "'depends on' dependencies : " + depends_on_str,
+                      "'visible if' dependencies : " + visible_if_str,
+                      "Additional dependencies from enclosing menus and "
+                        "ifs:",
+                      additional_deps_str,
+                      "Location: {0}:{1}".format(self.filename, self.linenr))
+
+    #
+    # Private methods
+    #
+
+    def __init__(self):
+        """Menu constructor -- not intended to be called directly by
+        Kconfiglib clients."""
+
+        self.title = None
+        self.dep_expr = None
+        self.visible_if_expr = None
+        self.block = [] # List of contained items
+        self.config = None
+        self.parent = None
+
+        # Dependency expression without dependencies from enclosing menus and
+        # ifs propagated
+        self.orig_deps = None
+
+        # Dependencies inherited from containing menus and ifs
+        self.deps_from_containing = None
+        # The set of symbols referenced by this menu (see
+        # get_referenced_symbols())
+        self.referenced_syms = set()
+        # Like 'referenced_syms', but includes symbols from
+        # dependencies inherited from enclosing menus and ifs
+        self.all_referenced_syms = None
+
+        self.filename = None
+        self.linenr = None
+
+    def _make_conf(self, append_fn):
+        if self.config._eval_expr(self.dep_expr) != "n" and \
+           self.config._eval_expr(self.visible_if_expr) != "n":
+            append_fn("\n#\n# {0}\n#".format(self.title))
+        _make_block_conf(self.block, append_fn)
+
+class Choice(Item):
+
+    """Represents a choice statement. A choice can be in one of three modes:
+
+    "n" - The choice is not visible and no symbols can be selected.
+
+    "m" - Any number of symbols can be set to "m". The rest will be "n". This
+          is safe since potentially conflicting options don't actually get
+          compiled into the kernel simultaneously with "m".
+
+    "y" - One symbol will be "y" while the rest are "n".
+
+    Only tristate choices can be in "m" mode, and the visibility of the choice
+    is an upper bound on the mode, so that e.g. a choice that depends on a
+    symbol with value "m" will be in "m" mode.
+
+    The mode changes automatically when a value is assigned to a symbol within
+    the choice.
+
+    See Symbol.get_visibility() too."""
+
+    #
+    # Public interface
+    #
+
+    def get_config(self):
+        """Returns the Config instance this choice is from."""
+        return self.config
+
+    def get_name(self):
+        """For named choices, returns the name. Returns None for unnamed
+        choices. No named choices appear anywhere in the kernel Kconfig files
+        as of Linux 3.7.0-rc8."""
+        return self.name
+
+    def get_type(self):
+        """Returns the type of the choice. See Symbol.get_type()."""
+        return self.type
+
+    def get_prompts(self):
+        """Returns a list of prompts defined for the choice, in the order they
+        appear in the configuration files. Returns the empty list for choices
+        with no prompt.
+
+        This list will have a single entry for the vast majority of choices
+        having prompts, but having multiple prompts for a single choice is
+        possible through having multiple 'choice' entries for it (though I'm
+        not sure if that ever happens in practice)."""
+        return [prompt for prompt, _ in self.orig_prompts]
+
+    def get_help(self):
+        """Returns the help text of the choice, or None if the choice has no
+        help text."""
+        return self.help
+
+    def get_parent(self):
+        """Returns the menu or choice statement that contains the choice, or
+        None if the choice is at the top level. Note that if statements are
+        treated as syntactic sugar and do not have an explicit class
+        representation."""
+        return self.parent
+
+    def get_def_locations(self):
+        """Returns a list of (filename, linenr) tuples, where filename (string)
+        and linenr (int) represent a location where the choice is defined. For
+        the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
+        list will only contain one element, but its possible for named choices
+        to be defined in multiple locations."""
+        return self.def_locations
+
+    def get_selection(self):
+        """Returns the symbol selected (either by the user or through
+        defaults), or None if either no symbol is selected or the mode is not
+        "y"."""
+        if self.cached_selection is not None:
+            if self.cached_selection == NO_SELECTION:
+                return None
+            return self.cached_selection
+
+        if self.get_mode() != "y":
+            return self._cache_ret(None)
+
+        # User choice available?
+        if self.user_val is not None and _get_visibility(self.user_val) == "y":
+            return self._cache_ret(self.user_val)
+
+        if self.optional:
+            return self._cache_ret(None)
+
+        return self._cache_ret(self.get_selection_from_defaults())
+
+    def get_selection_from_defaults(self):
+        """Like Choice.get_selection(), but acts as if no symbol has been
+        selected by the user and no 'optional' flag is in effect."""
+
+        if not self.actual_symbols:
+            return None
+
+        for symbol, cond_expr in self.def_exprs:
+            if self.config._eval_expr(cond_expr) != "n":
+                chosen_symbol = symbol
+                break
+        else:
+            chosen_symbol = self.actual_symbols[0]
+
+        # Is the chosen symbol visible?
+        if _get_visibility(chosen_symbol) != "n":
+            return chosen_symbol
+        # Otherwise, pick the first visible symbol
+        for sym in self.actual_symbols:
+            if _get_visibility(sym) != "n":
+                return sym
+        return None
+
+    def get_user_selection(self):
+        """If the choice is in "y" mode and has a user-selected symbol, returns
+        that symbol. Otherwise, returns None."""
+        return self.user_val
+
+    def get_items(self):
+        """Gets all items contained in the choice in the same order as within
+        the configuration ("items" instead of "symbols" since choices and
+        comments might appear within choices. This only happens in one place as
+        of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
+        return self.block
+
+    def get_symbols(self):
+        """Returns a list containing the choice's symbols.
+
+        A quirk (perhaps a bug) of Kconfig is that you can put items within a
+        choice that will not be considered members of the choice insofar as
+        selection is concerned. This happens for example if one symbol within a
+        choice 'depends on' the symbol preceding it, or if you put non-symbol
+        items within choices.
+
+        As of Linux 3.7.0-rc8, this seems to be used intentionally in one
+        place: drivers/usb/gadget/Kconfig.
+
+        This function returns the "proper" symbols of the choice in the order
+        they appear in the choice, excluding such items. If you want all items
+        in the choice, use get_items()."""
+        return self.actual_symbols
+
+    def get_referenced_symbols(self, refs_from_enclosing=False):
+        """See Symbol.get_referenced_symbols()."""
+        return self.all_referenced_syms if refs_from_enclosing else \
+               self.referenced_syms
+
+    def get_visibility(self):
+        """Returns the visibility of the choice statement: one of "n", "m" or
+        "y". This acts as an upper limit on the mode of the choice (though bool
+        choices can only have the mode "y"). See the class documentation for an
+        explanation of modes."""
+        return _get_visibility(self)
+
+    def get_mode(self):
+        """Returns the mode of the choice. See the class documentation for
+        an explanation of modes."""
+        minimum_mode = "n" if self.optional else "m"
+        mode = self.user_mode if self.user_mode is not None else minimum_mode
+        mode = self.config._eval_min(mode, _get_visibility(self))
+
+        # Promote "m" to "y" for boolean choices
+        if mode == "m" and self.type == BOOL:
+            return "y"
+
+        return mode
+
+    def is_optional(self):
+        """Returns True if the choice has the 'optional' flag set (and so will
+        default to "n" mode)."""
+        return self.optional
+
+    def __str__(self):
+        """Returns a string containing various information about the choice
+        statement."""
+        return self.config._get_sym_or_choice_str(self)
+
+    #
+    # Private methods
+    #
+
+    def __init__(self):
+        """Choice constructor -- not intended to be called directly by
+        Kconfiglib clients."""
+
+        self.name = None # Yes, choices can be named
+        self.type = UNKNOWN
+        self.prompts = []
+        self.def_exprs = [] # 'default' properties
+        self.help = None # Help text
+        self.block = [] # List of contained items
+        self.config = None
+        self.parent = None
+
+        self.user_val = None
+        self.user_mode = None
+
+        # We need to filter out symbols that appear within the choice block but
+        # are not considered choice items (see
+        # Choice._determine_actual_symbols()) This list holds the "actual"
+        # choice items.
+        self.actual_symbols = []
+
+        # The prompts and default values without any dependencies from
+        # enclosing menus and ifs propagated
+        self.orig_prompts = []
+        self.orig_def_exprs = []
+
+        # Dependencies inherited from containing menus and ifs
+        self.deps_from_containing = None
+        # The set of symbols referenced by this choice (see
+        # get_referenced_symbols())
+        self.referenced_syms = set()
+        # Like 'referenced_syms', but includes symbols from
+        # dependencies inherited from enclosing menus and ifs
+        self.all_referenced_syms = set()
+
+        # See Choice.get_def_locations()
+        self.def_locations = []
+
+        # Cached values
+        self.cached_selection = None
+        self.cached_visibility = None
+
+        self.optional = False
+
+    def _determine_actual_symbols(self):
+        """If a symbol's visibility depends on the preceding symbol within a
+        choice, it is no longer viewed as a choice item. (This is quite
+        possibly a bug, but some things consciously use it... ugh. It stems
+        from automatic submenu creation.) In addition, it's possible to have
+        choices and comments within choices, and those shouldn't be considered
+        choice items either. Only drivers/usb/gadget/Kconfig seems to depend on
+        any of this. This method computes the "actual" items in the choice and
+        sets the is_choice_sym flag on them (retrieved via is_choice_symbol()).
+
+        Don't let this scare you: an earlier version simply checked for a
+        sequence of symbols where all symbols after the first appeared in the
+        'depends on' expression of the first, and that worked fine.  The added
+        complexity is to be future-proof in the event that
+        drivers/usb/gadget/Kconfig turns even more sinister. It might very well
+        be overkilling things (especially if that file is refactored ;)."""
+
+        # Items might depend on each other in a tree structure, so we need a
+        # stack to keep track of the current tentative parent
+        stack = []
+
+        for item in self.block:
+            if not isinstance(item, Symbol):
+                stack = []
+                continue
+
+            while stack:
+                if item._has_auto_menu_dep_on(stack[-1]):
+                    # The item should not be viewed as a choice item, so don't
+                    # set item.is_choice_sym
+                    stack.append(item)
+                    break
+                else:
+                    stack.pop()
+            else:
+                item.is_choice_sym = True
+                self.actual_symbols.append(item)
+                stack.append(item)
+
+    def _cache_ret(self, selection):
+        # As None is used to indicate the lack of a cached value we can't use
+        # that to cache the fact that the choice has no selection. Instead, we
+        # use the symbolic constant NO_SELECTION.
+        if selection is None:
+            self.cached_selection = NO_SELECTION
+        else:
+            self.cached_selection = selection
+
+        return selection
+
+    def _invalidate(self):
+        self.cached_selection = None
+        self.cached_visibility = None
+
+    def _unset_user_value(self):
+        self._invalidate()
+        self.user_val = None
+        self.user_mode = None
+
+    def _make_conf(self, append_fn):
+        _make_block_conf(self.block, append_fn)
+
+class Comment(Item):
+
+    """Represents a comment statement."""
+
+    #
+    # Public interface
+    #
+
+    def get_config(self):
+        """Returns the Config instance this comment is from."""
+        return self.config
+
+    def get_text(self):
+        """Returns the text of the comment."""
+        return self.text
+
+    def get_parent(self):
+        """Returns the menu or choice statement that contains the comment, or
+        None if the comment is at the top level. Note that if statements are
+        treated as syntactic sugar and do not have an explicit class
+        representation."""
+        return self.parent
+
+    def get_location(self):
+        """Returns the location of the comment as a (filename, linenr) tuple,
+        where filename is a string and linenr an int."""
+        return (self.filename, self.linenr)
+
+    def get_visibility(self):
+        """Returns the visibility of the comment. See also
+        Symbol.get_visibility()."""
+        return self.config._eval_expr(self.dep_expr)
+
+    def get_referenced_symbols(self, refs_from_enclosing=False):
+        """See Symbol.get_referenced_symbols()."""
+        return self.all_referenced_syms if refs_from_enclosing else \
+               self.referenced_syms
+
+    def __str__(self):
+        """Returns a string containing various information about the
+        comment."""
+        dep_str = self.config._expr_val_str(self.orig_deps,
+                                            "(no dependencies)")
+
+        additional_deps_str = " " + \
+          self.config._expr_val_str(self.deps_from_containing,
+                                    "(no additional dependencies)")
+
+        return _lines("Comment",
+                      "Text: "         + str(self.text),
+                      "Dependencies: " + dep_str,
+                      "Additional dependencies from enclosing menus and "
+                        "ifs:",
+                      additional_deps_str,
+                      "Location: {0}:{1}".format(self.filename, self.linenr))
+
+    #
+    # Private methods
+    #
+
+    def __init__(self):
+        """Comment constructor -- not intended to be called directly by
+        Kconfiglib clients."""
+
+        self.text = None
+        self.dep_expr = None
+        self.config = None
+        self.parent = None
+
+        # Dependency expression without dependencies from enclosing menus and
+        # ifs propagated
+        self.orig_deps = None
+
+        # Dependencies inherited from containing menus and ifs
+        self.deps_from_containing = None
+        # The set of symbols referenced by this comment (see
+        # get_referenced_symbols())
+        self.referenced_syms = set()
+        # Like 'referenced_syms', but includes symbols from
+        # dependencies inherited from enclosing menus and ifs
+        self.all_referenced_syms = None
+
+        self.filename = None
+        self.linenr = None
+
+    def _make_conf(self, append_fn):
+        if self.config._eval_expr(self.dep_expr) != "n":
+            append_fn("\n#\n# {0}\n#".format(self.text))
+
+class Kconfig_Syntax_Error(Exception):
+    """Exception raised for syntax errors."""
+    pass
+
+class Internal_Error(Exception):
+    """Exception raised for internal errors."""
+    pass
+
+#
+# Public functions
+#
+
+def tri_less(v1, v2):
+    """Returns True if the tristate v1 is less than the tristate v2, where "n",
+    "m" and "y" are ordered from lowest to highest."""
+    return TRI_TO_INT[v1] < TRI_TO_INT[v2]
+
+def tri_less_eq(v1, v2):
+    """Returns True if the tristate v1 is less than or equal to the tristate
+    v2, where "n", "m" and "y" are ordered from lowest to highest."""
+    return TRI_TO_INT[v1] <= TRI_TO_INT[v2]
+
+def tri_greater(v1, v2):
+    """Returns True if the tristate v1 is greater than the tristate v2, where
+    "n", "m" and "y" are ordered from lowest to highest."""
+    return TRI_TO_INT[v1] > TRI_TO_INT[v2]
+
+def tri_greater_eq(v1, v2):
+    """Returns True if the tristate v1 is greater than or equal to the tristate
+    v2, where "n", "m" and "y" are ordered from lowest to highest."""
+    return TRI_TO_INT[v1] >= TRI_TO_INT[v2]
+
+#
+# Internal classes
+#
+
+class _Feed(object):
+
+    """Class for working with sequences in a stream-like fashion; handy for
+    tokens."""
+
+    # This would be more helpful on the item classes, but would remove some
+    # flexibility
+    __slots__ = ['items', 'length', 'i']
+
+    def __init__(self, items):
+        self.items = items
+        self.length = len(self.items)
+        self.i = 0
+
+    def get_next(self):
+        if self.i >= self.length:
+            return None
+        item = self.items[self.i]
+        self.i += 1
+        return item
+
+    def peek_next(self):
+        return None if self.i >= self.length else self.items[self.i]
+
+    def check(self, token):
+        """Check if the next token is 'token'. If so, remove it from the token
+        feed and return True. Otherwise, leave it in and return False."""
+        if self.i < self.length and self.items[self.i] == token:
+            self.i += 1
+            return True
+        return False
+
+    def unget_all(self):
+        self.i = 0
+
+class _FileFeed(object):
+
+    """Feeds lines from a file. Keeps track of the filename and current line
+    number. Joins any line ending in \\ with the following line. We need to be
+    careful to get the line number right in the presence of continuation
+    lines."""
+
+    __slots__ = ['filename', 'lines', 'length', 'linenr']
+
+    def __init__(self, filename):
+        self.filename = _clean_up_path(filename)
+        with open(filename, "r") as f:
+            # No interleaving of I/O and processing yet. Don't know if it would
+            # help.
+            self.lines = f.readlines()
+        self.length = len(self.lines)
+        self.linenr = 0
+
+    def get_next(self):
+        if self.linenr >= self.length:
+            return None
+        line = self.lines[self.linenr]
+        self.linenr += 1
+        while line.endswith("\\\n"):
+            line = line[:-2] + self.lines[self.linenr]
+            self.linenr += 1
+        return line
+
+    def peek_next(self):
+        linenr = self.linenr
+        if linenr >= self.length:
+            return None
+        line = self.lines[linenr]
+        while line.endswith("\\\n"):
+            linenr += 1
+            line = line[:-2] + self.lines[linenr]
+        return line
+
+    def unget(self):
+        self.linenr -= 1
+        while self.lines[self.linenr].endswith("\\\n"):
+            self.linenr -= 1
+
+    def next_nonblank(self):
+        """Removes lines up to and including the next non-blank (not all-space)
+        line and returns it. Returns None if there are no more non-blank
+        lines."""
+        while 1:
+            line = self.get_next()
+            if line is None or not line.isspace():
+                return line
+
+#
+# Internal functions
+#
+
+def _get_visibility(sc):
+    """Symbols and Choices have a "visibility" that acts as an upper bound on
+    the values a user can set for them, corresponding to the visibility in e.g.
+    'make menuconfig'. This function calculates the visibility for the Symbol
+    or Choice 'sc' -- the logic is nearly identical."""
+    if sc.cached_visibility is None:
+        vis = "n"
+        for _, cond_expr in sc.prompts:
+            vis = sc.config._eval_max(vis, cond_expr)
+
+        if isinstance(sc, Symbol) and sc.is_choice_sym:
+            if sc.type == TRISTATE and vis == "m" and \
+               sc.parent.get_mode() == "y":
+                # Choice symbols with visibility "m" are not visible if the
+                # choice has mode "y"
+                vis = "n"
+            else:
+                vis = sc.config._eval_min(vis, _get_visibility(sc.parent))
+
+        # Promote "m" to "y" if we're dealing with a non-tristate
+        if vis == "m" and sc.type != TRISTATE:
+            vis = "y"
+
+        sc.cached_visibility = vis
+
+    return sc.cached_visibility
+
+def _make_and(e1, e2):
+    """Constructs an AND (&&) expression. Performs trivial simplification.
+    Nones equate to 'y'.
+
+    Note: returns None if e1 == e2 == None."""
+    if e1 is None or e1 == "y":
+        return e2
+    if e2 is None or e2 == "y":
+        return e1
+
+    # Prefer to merge argument lists if possible to reduce the number of nodes
+
+    if isinstance(e1, tuple) and e1[0] == AND:
+        if isinstance(e2, tuple) and e2[0] == AND:
+            return (AND, e1[1] + e2[1])
+        return (AND, e1[1] + [e2])
+
+    if isinstance(e2, tuple) and e2[0] == AND:
+        return (AND, e2[1] + [e1])
+
+    return (AND, [e1, e2])
+
+def _make_or(e1, e2):
+    """Constructs an OR (||) expression. Performs trivial simplification and
+    avoids Nones. Nones equate to 'y', which is usually what we want, but needs
+    to be kept in mind."""
+
+    # Perform trivial simplification and avoid None's (which
+    # correspond to y's)
+    if e1 is None or e2 is None or e1 == "y" or e2 == "y":
+        return "y"
+    if e1 == "n":
+        return e2
+
+    # Prefer to merge argument lists if possible to reduce the number of nodes
+
+    if isinstance(e1, tuple) and e1[0] == OR:
+        if isinstance(e2, tuple) and e2[0] == OR:
+            return (OR, e1[1] + e2[1])
+        return (OR, e1[1] + [e2])
+
+    if isinstance(e2, tuple) and e2[0] == OR:
+        return (OR, e2[1] + [e1])
+
+    return (OR, [e1, e2])
+
+def _get_expr_syms_rec(expr, res):
+    """_get_expr_syms() helper. Recurses through expressions."""
+    if isinstance(expr, Symbol):
+        res.add(expr)
+    elif isinstance(expr, str):
+        return
+    elif expr[0] == AND or expr[0] == OR:
+        for term in expr[1]:
+            _get_expr_syms_rec(term, res)
+    elif expr[0] == NOT:
+        _get_expr_syms_rec(expr[1], res)
+    elif expr[0] == EQUAL or expr[0] == UNEQUAL:
+        if isinstance(expr[1], Symbol):
+            res.add(expr[1])
+        if isinstance(expr[2], Symbol):
+            res.add(expr[2])
+    else:
+        _internal_error("Internal error while fetching symbols from an "
+                        "expression with token stream {0}.".format(expr))
+
+def _get_expr_syms(expr):
+    """Returns the set() of symbols appearing in expr."""
+    res = set()
+    if expr is not None:
+        _get_expr_syms_rec(expr, res)
+    return res
+
+def _str_val(obj):
+    """Returns the value of obj as a string. If obj is not a string (constant
+    symbol), it must be a Symbol."""
+    return obj if isinstance(obj, str) else obj.get_value()
+
+def _make_block_conf(block, append_fn):
+    """Returns a list of .config strings for a block (list) of items."""
+
+    # Collect the substrings in a list and later use join() instead of += to
+    # build the final .config contents. With older Python versions, this yields
+    # linear instead of quadratic complexity.
+    for item in block:
+        item._make_conf(append_fn)
+
+def _sym_str_string(sym_or_str):
+    if isinstance(sym_or_str, str):
+        return '"' + sym_or_str + '"'
+    return sym_or_str.name
+
+def _intersperse(lst, op):
+    """_expr_to_str() helper. Gets the string representation of each expression
+    in lst and produces a list where op has been inserted between the
+    elements."""
+    if not lst:
+        return ""
+
+    res = []
+
+    def handle_sub_expr(expr):
+        no_parens = isinstance(expr, (str, Symbol)) or \
+                    expr[0] in (EQUAL, UNEQUAL) or \
+                    PRECEDENCE[op] <= PRECEDENCE[expr[0]]
+        if not no_parens:
+            res.append("(")
+        res.extend(_expr_to_str_rec(expr))
+        if not no_parens:
+            res.append(")")
+
+    op_str = OP_TO_STR[op]
+
+    handle_sub_expr(lst[0])
+    for expr in lst[1:]:
+        res.append(op_str)
+        handle_sub_expr(expr)
+
+    return res
+
+def _expr_to_str_rec(expr):
+    if expr is None:
+        return [""]
+
+    if isinstance(expr, (Symbol, str)):
+        return [_sym_str_string(expr)]
+
+    if expr[0] in (AND, OR):
+        return _intersperse(expr[1], expr[0])
+
+    if expr[0] == NOT:
+        need_parens = not isinstance(expr[1], (str, Symbol))
+
+        res = ["!"]
+        if need_parens:
+            res.append("(")
+        res.extend(_expr_to_str_rec(expr[1]))
+        if need_parens:
+            res.append(")")
+        return res
+
+    if expr[0] in (EQUAL, UNEQUAL):
+        return [_sym_str_string(expr[1]),
+                OP_TO_STR[expr[0]],
+                _sym_str_string(expr[2])]
+
+def _expr_to_str(expr):
+    return "".join(_expr_to_str_rec(expr))
+
+def _indentation(line):
+    """Returns the length of the line's leading whitespace, treating tab stops
+    as being spaced 8 characters apart."""
+    line = line.expandtabs()
+    return len(line) - len(line.lstrip())
+
+def _deindent(line, indent):
+    """Deindent 'line' by 'indent' spaces."""
+    line = line.expandtabs()
+    if len(line) <= indent:
+        return line
+    return line[indent:]
+
+def _is_base_n(s, n):
+    try:
+        int(s, n)
+        return True
+    except ValueError:
+        return False
+
+def _lines(*args):
+    """Returns a string consisting of all arguments, with newlines inserted
+    between them."""
+    return "\n".join(args)
+
+def _comment(s):
+    """Returns a new string with "#" inserted before each line in 's'."""
+    if not s:
+        return "#"
+    res = "".join(["#" + line for line in s.splitlines(True)])
+    if s.endswith("\n"):
+        return res + "#"
+    return res
+
+def _clean_up_path(path):
+    """Strips an initial "./" and any trailing slashes from 'path'."""
+    if path.startswith("./"):
+        path = path[2:]
+    return path.rstrip("/")
+
+def _build_msg(msg, filename, linenr):
+    if filename is not None:
+        msg = "{0}:{1}: ".format(_clean_up_path(filename), linenr) + msg
+    return msg
+
+def _stderr_msg(msg, filename, linenr):
+    sys.stderr.write(_build_msg(msg, filename, linenr) + "\n")
+
+def _tokenization_error(s, filename, linenr):
+    loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
+    raise Kconfig_Syntax_Error("{0}Couldn't tokenize '{1}'"
+                               .format(loc, s.strip()))
+
+def _parse_error(s, msg, filename, linenr):
+    loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
+    raise Kconfig_Syntax_Error("{0}Couldn't parse '{1}'{2}"
+                               .format(loc, s.strip(),
+                                       "." if msg is None else ": " + msg))
+
+def _internal_error(msg):
+    raise Internal_Error(msg +
+      "\nSorry! You may want to send an email to ulfalizer a.t Google's "
+      "email service to tell me about this. Include the message above and the "
+      "stack trace and describe what you were doing.")
+
+#
+# Internal global constants
+#
+
+# Tokens
+(T_AND, T_OR, T_NOT,
+ T_OPEN_PAREN, T_CLOSE_PAREN,
+ T_EQUAL, T_UNEQUAL,
+ T_MAINMENU, T_MENU, T_ENDMENU,
+ T_SOURCE, T_CHOICE, T_ENDCHOICE,
+ T_COMMENT, T_CONFIG, T_MENUCONFIG,
+ T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
+ T_OPTIONAL, T_PROMPT, T_DEFAULT,
+ T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
+ T_DEF_BOOL, T_DEF_TRISTATE,
+ T_SELECT, T_IMPLY, T_RANGE, T_OPTION, T_ALLNOCONFIG_Y, T_ENV,
+ T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(40)
+
+# The leading underscore before the function assignments below prevent pydoc
+# from listing them. The constants could be hidden too, but they're fairly
+# obviously internal anyway, so don't bother spamming the code.
+
+# Keyword to token map. Note that the get() method is assigned directly as a
+# small optimization.
+_get_keyword = \
+  {"mainmenu": T_MAINMENU, "menu": T_MENU, "endmenu": T_ENDMENU,
+   "endif": T_ENDIF, "endchoice": T_ENDCHOICE, "source": T_SOURCE,
+   "choice": T_CHOICE, "config": T_CONFIG, "comment": T_COMMENT,
+   "menuconfig": T_MENUCONFIG, "help": T_HELP, "if": T_IF,
+   "depends": T_DEPENDS, "on": T_ON, "optional": T_OPTIONAL,
+   "prompt": T_PROMPT, "default": T_DEFAULT, "bool": T_BOOL, "boolean": T_BOOL,
+   "tristate": T_TRISTATE, "int": T_INT, "hex": T_HEX, "def_bool": T_DEF_BOOL,
+   "def_tristate": T_DEF_TRISTATE, "string": T_STRING, "select": T_SELECT,
+   "imply" : T_IMPLY, "range": T_RANGE, "option": T_OPTION,
+   "allnoconfig_y": T_ALLNOCONFIG_Y, "env": T_ENV,
+   "defconfig_list": T_DEFCONFIG_LIST, "modules": T_MODULES,
+   "visible": T_VISIBLE}.get
+
+# Strings to use for True and False
+BOOL_STR = {False: "false", True: "true"}
+
+# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
+# is included to avoid symbols being registered for named choices.
+STRING_LEX = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
+                        T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
+
+# Matches the initial token on a line; see _tokenize(). Also eats trailing
+# whitespace as an optimization.
+_initial_token_re_match = re.compile(r"[^\w]*(\w+)\s*").match
+
+# Matches an identifier/keyword optionally preceded by whitespace. Also eats
+# trailing whitespace as an optimization.
+_id_keyword_re_match = re.compile(r"\s*([\w./-]+)\s*").match
+
+# Regular expression for finding $-references to symbols in strings
+_sym_ref_re_search = re.compile(r"\$[A-Za-z0-9_]+").search
+
+# Integers representing symbol types
+UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)
+
+# Strings to use for types
+TYPENAME = {UNKNOWN: "unknown", BOOL: "bool", TRISTATE: "tristate",
+            STRING: "string", HEX: "hex", INT: "int"}
+
+# Token to type mapping
+TOKEN_TO_TYPE = {T_BOOL: BOOL, T_TRISTATE: TRISTATE, T_STRING: STRING,
+                 T_INT: INT, T_HEX: HEX}
+
+# Default values for symbols of different types (the value the symbol gets if
+# it is not assigned a user value and none of its 'default' clauses kick in)
+DEFAULT_VALUE = {BOOL: "n", TRISTATE: "n", STRING: "", INT: "", HEX: ""}
+
+# Indicates that no item is selected in a choice statement
+NO_SELECTION = 0
+
+# Integers representing expression types
+AND, OR, NOT, EQUAL, UNEQUAL = range(5)
+
+# Map from tristate values to integers
+TRI_TO_INT = {"n": 0, "m": 1, "y": 2}
+
+# Printing-related stuff
+
+OP_TO_STR = {AND: " && ", OR: " || ", EQUAL: " = ", UNEQUAL: " != "}
+PRECEDENCE = {OR: 0, AND: 1, NOT: 2}
diff --git a/tools/u-boot-tools/buildman/test.py b/tools/u-boot-tools/buildman/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..de02f61be6589f48ed933f43293d2563bc9548d3
--- /dev/null
+++ b/tools/u-boot-tools/buildman/test.py
@@ -0,0 +1,464 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+import os
+import shutil
+import sys
+import tempfile
+import time
+import unittest
+
+# Bring in the patman libraries
+our_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(our_path, '../patman'))
+
+import board
+import bsettings
+import builder
+import control
+import command
+import commit
+import terminal
+import test_util
+import toolchain
+
+use_network = True
+
+settings_data = '''
+# Buildman settings file
+
+[toolchain]
+main: /usr/sbin
+
+[toolchain-alias]
+x86: i386 x86_64
+'''
+
+errors = [
+    '''main.c: In function 'main_loop':
+main.c:260:6: warning: unused variable 'joe' [-Wunused-variable]
+''',
+    '''main.c: In function 'main_loop2':
+main.c:295:2: error: 'fred' undeclared (first use in this function)
+main.c:295:2: note: each undeclared identifier is reported only once for each function it appears in
+make[1]: *** [main.o] Error 1
+make: *** [common/libcommon.o] Error 2
+Make failed
+''',
+    '''arch/arm/dts/socfpga_arria10_socdk_sdmmc.dtb: Warning \
+(avoid_unnecessary_addr_size): /clocks: unnecessary #address-cells/#size-cells \
+without "ranges" or child "reg" property
+''',
+    '''powerpc-linux-ld: warning: dot moved backwards before `.bss'
+powerpc-linux-ld: warning: dot moved backwards before `.bss'
+powerpc-linux-ld: u-boot: section .text lma 0xfffc0000 overlaps previous sections
+powerpc-linux-ld: u-boot: section .rodata lma 0xfffef3ec overlaps previous sections
+powerpc-linux-ld: u-boot: section .reloc lma 0xffffa400 overlaps previous sections
+powerpc-linux-ld: u-boot: section .data lma 0xffffcd38 overlaps previous sections
+powerpc-linux-ld: u-boot: section .u_boot_cmd lma 0xffffeb40 overlaps previous sections
+powerpc-linux-ld: u-boot: section .bootpg lma 0xfffff198 overlaps previous sections
+''',
+   '''In file included from %(basedir)sarch/sandbox/cpu/cpu.c:9:0:
+%(basedir)sarch/sandbox/include/asm/state.h:44:0: warning: "xxxx" redefined [enabled by default]
+%(basedir)sarch/sandbox/include/asm/state.h:43:0: note: this is the location of the previous definition
+%(basedir)sarch/sandbox/cpu/cpu.c: In function 'do_reset':
+%(basedir)sarch/sandbox/cpu/cpu.c:27:1: error: unknown type name 'blah'
+%(basedir)sarch/sandbox/cpu/cpu.c:28:12: error: expected declaration specifiers or '...' before numeric constant
+make[2]: *** [arch/sandbox/cpu/cpu.o] Error 1
+make[1]: *** [arch/sandbox/cpu] Error 2
+make[1]: *** Waiting for unfinished jobs....
+In file included from %(basedir)scommon/board_f.c:55:0:
+%(basedir)sarch/sandbox/include/asm/state.h:44:0: warning: "xxxx" redefined [enabled by default]
+%(basedir)sarch/sandbox/include/asm/state.h:43:0: note: this is the location of the previous definition
+make: *** [sub-make] Error 2
+'''
+]
+
+
+# hash, subject, return code, list of errors/warnings
+commits = [
+    ['1234', 'upstream/master, ok', 0, []],
+    ['5678', 'Second commit, a warning', 0, errors[0:1]],
+    ['9012', 'Third commit, error', 1, errors[0:2]],
+    ['3456', 'Fourth commit, warning', 0, [errors[0], errors[2]]],
+    ['7890', 'Fifth commit, link errors', 1, [errors[0], errors[3]]],
+    ['abcd', 'Sixth commit, fixes all errors', 0, []],
+    ['ef01', 'Seventh commit, check directory suppression', 1, [errors[4]]],
+]
+
+boards = [
+    ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 1', 'board0',  ''],
+    ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 2', 'board1', ''],
+    ['Active', 'powerpc', 'powerpc', '', 'Tester', 'PowerPC board 1', 'board2', ''],
+    ['Active', 'powerpc', 'mpc83xx', '', 'Tester', 'PowerPC board 2', 'board3', ''],
+    ['Active', 'sandbox', 'sandbox', '', 'Tester', 'Sandbox board', 'board4', ''],
+]
+
+BASE_DIR = 'base'
+
+OUTCOME_OK, OUTCOME_WARN, OUTCOME_ERR = range(3)
+
+class Options:
+    """Class that holds build options"""
+    pass
+
+class TestBuild(unittest.TestCase):
+    """Test buildman
+
+    TODO: Write tests for the rest of the functionality
+    """
+    def setUp(self):
+        # Set up commits to build
+        self.commits = []
+        sequence = 0
+        for commit_info in commits:
+            comm = commit.Commit(commit_info[0])
+            comm.subject = commit_info[1]
+            comm.return_code = commit_info[2]
+            comm.error_list = commit_info[3]
+            comm.sequence = sequence
+            sequence += 1
+            self.commits.append(comm)
+
+        # Set up boards to build
+        self.boards = board.Boards()
+        for brd in boards:
+            self.boards.AddBoard(board.Board(*brd))
+        self.boards.SelectBoards([])
+
+        # Add some test settings
+        bsettings.Setup(None)
+        bsettings.AddFile(settings_data)
+
+        # Set up the toolchains
+        self.toolchains = toolchain.Toolchains()
+        self.toolchains.Add('arm-linux-gcc', test=False)
+        self.toolchains.Add('sparc-linux-gcc', test=False)
+        self.toolchains.Add('powerpc-linux-gcc', test=False)
+        self.toolchains.Add('gcc', test=False)
+
+        # Avoid sending any output
+        terminal.SetPrintTestMode()
+        self._col = terminal.Color()
+
+    def Make(self, commit, brd, stage, *args, **kwargs):
+        global base_dir
+
+        result = command.CommandResult()
+        boardnum = int(brd.target[-1])
+        result.return_code = 0
+        result.stderr = ''
+        result.stdout = ('This is the test output for board %s, commit %s' %
+                (brd.target, commit.hash))
+        if ((boardnum >= 1 and boardnum >= commit.sequence) or
+                boardnum == 4 and commit.sequence == 6):
+            result.return_code = commit.return_code
+            result.stderr = (''.join(commit.error_list)
+                % {'basedir' : base_dir + '/.bm-work/00/'})
+        if stage == 'build':
+            target_dir = None
+            for arg in args:
+                if arg.startswith('O='):
+                    target_dir = arg[2:]
+
+            if not os.path.isdir(target_dir):
+                os.mkdir(target_dir)
+
+        result.combined = result.stdout + result.stderr
+        return result
+
+    def assertSummary(self, text, arch, plus, boards, outcome=OUTCOME_ERR):
+        col = self._col
+        expected_colour = (col.GREEN if outcome == OUTCOME_OK else
+                           col.YELLOW if outcome == OUTCOME_WARN else col.RED)
+        expect = '%10s: ' % arch
+        # TODO(sjg@chromium.org): If plus is '', we shouldn't need this
+        expect += ' ' + col.Color(expected_colour, plus)
+        expect += '  '
+        for board in boards:
+            expect += col.Color(expected_colour, ' %s' % board)
+        self.assertEqual(text, expect)
+
+    def testOutput(self):
+        """Test basic builder operation and output
+
+        This does a line-by-line verification of the summary output.
+        """
+        global base_dir
+
+        base_dir = tempfile.mkdtemp()
+        if not os.path.isdir(base_dir):
+            os.mkdir(base_dir)
+        build = builder.Builder(self.toolchains, base_dir, None, 1, 2,
+                                checkout=False, show_unknown=False)
+        build.do_make = self.Make
+        board_selected = self.boards.GetSelectedDict()
+
+        # Build the boards for the pre-defined commits and warnings/errors
+        # associated with each. This calls our Make() to inject the fake output.
+        build.BuildBoards(self.commits, board_selected, keep_outputs=False,
+                          verbose=False)
+        lines = terminal.GetPrintTestLines()
+        count = 0
+        for line in lines:
+            if line.text.strip():
+                count += 1
+
+        # We should get two starting messages, then an update for every commit
+        # built.
+        self.assertEqual(count, len(commits) * len(boards) + 2)
+        build.SetDisplayOptions(show_errors=True);
+        build.ShowSummary(self.commits, board_selected)
+        #terminal.EchoPrintTestLines()
+        lines = terminal.GetPrintTestLines()
+
+        # Upstream commit: no errors
+        self.assertEqual(lines[0].text, '01: %s' % commits[0][1])
+
+        # Second commit: all archs should fail with warnings
+        self.assertEqual(lines[1].text, '02: %s' % commits[1][1])
+
+        col = terminal.Color()
+        self.assertSummary(lines[2].text, 'sandbox', 'w+', ['board4'],
+                           outcome=OUTCOME_WARN)
+        self.assertSummary(lines[3].text, 'arm', 'w+', ['board1'],
+                           outcome=OUTCOME_WARN)
+        self.assertSummary(lines[4].text, 'powerpc', 'w+', ['board2', 'board3'],
+                           outcome=OUTCOME_WARN)
+
+        # Second commit: The warnings should be listed
+        self.assertEqual(lines[5].text, 'w+%s' %
+                errors[0].rstrip().replace('\n', '\nw+'))
+        self.assertEqual(lines[5].colour, col.MAGENTA)
+
+        # Third commit: Still fails
+        self.assertEqual(lines[6].text, '03: %s' % commits[2][1])
+        self.assertSummary(lines[7].text, 'sandbox', '+', ['board4'])
+        self.assertSummary(lines[8].text, 'arm', '', ['board1'],
+                           outcome=OUTCOME_OK)
+        self.assertSummary(lines[9].text, 'powerpc', '+', ['board2', 'board3'])
+
+        # Expect a compiler error
+        self.assertEqual(lines[10].text, '+%s' %
+                errors[1].rstrip().replace('\n', '\n+'))
+
+        # Fourth commit: Compile errors are fixed, just have warning for board3
+        self.assertEqual(lines[11].text, '04: %s' % commits[3][1])
+        self.assertSummary(lines[12].text, 'sandbox', 'w+', ['board4'],
+                           outcome=OUTCOME_WARN)
+        expect = '%10s: ' % 'powerpc'
+        expect += ' ' + col.Color(col.GREEN, '')
+        expect += '  '
+        expect += col.Color(col.GREEN, ' %s' % 'board2')
+        expect += ' ' + col.Color(col.YELLOW, 'w+')
+        expect += '  '
+        expect += col.Color(col.YELLOW, ' %s' % 'board3')
+        self.assertEqual(lines[13].text, expect)
+
+        # Compile error fixed
+        self.assertEqual(lines[14].text, '-%s' %
+                errors[1].rstrip().replace('\n', '\n-'))
+        self.assertEqual(lines[14].colour, col.GREEN)
+
+        self.assertEqual(lines[15].text, 'w+%s' %
+                errors[2].rstrip().replace('\n', '\nw+'))
+        self.assertEqual(lines[15].colour, col.MAGENTA)
+
+        # Fifth commit
+        self.assertEqual(lines[16].text, '05: %s' % commits[4][1])
+        self.assertSummary(lines[17].text, 'sandbox', '+', ['board4'])
+        self.assertSummary(lines[18].text, 'powerpc', '', ['board3'],
+                           outcome=OUTCOME_OK)
+
+        # The second line of errors[3] is a duplicate, so buildman will drop it
+        expect = errors[3].rstrip().split('\n')
+        expect = [expect[0]] + expect[2:]
+        self.assertEqual(lines[19].text, '+%s' %
+                '\n'.join(expect).replace('\n', '\n+'))
+
+        self.assertEqual(lines[20].text, 'w-%s' %
+                errors[2].rstrip().replace('\n', '\nw-'))
+
+        # Sixth commit
+        self.assertEqual(lines[21].text, '06: %s' % commits[5][1])
+        self.assertSummary(lines[22].text, 'sandbox', '', ['board4'],
+                           outcome=OUTCOME_OK)
+
+        # The second line of errors[3] is a duplicate, so buildman will drop it
+        expect = errors[3].rstrip().split('\n')
+        expect = [expect[0]] + expect[2:]
+        self.assertEqual(lines[23].text, '-%s' %
+                '\n'.join(expect).replace('\n', '\n-'))
+
+        self.assertEqual(lines[24].text, 'w-%s' %
+                errors[0].rstrip().replace('\n', '\nw-'))
+
+        # Seventh commit
+        self.assertEqual(lines[25].text, '07: %s' % commits[6][1])
+        self.assertSummary(lines[26].text, 'sandbox', '+', ['board4'])
+
+        # Pick out the correct error lines
+        expect_str = errors[4].rstrip().replace('%(basedir)s', '').split('\n')
+        expect = expect_str[3:8] + [expect_str[-1]]
+        self.assertEqual(lines[27].text, '+%s' %
+                '\n'.join(expect).replace('\n', '\n+'))
+
+        # Now the warnings lines
+        expect = [expect_str[0]] + expect_str[10:12] + [expect_str[9]]
+        self.assertEqual(lines[28].text, 'w+%s' %
+                '\n'.join(expect).replace('\n', '\nw+'))
+
+        self.assertEqual(len(lines), 29)
+        shutil.rmtree(base_dir)
+
+    def _testGit(self):
+        """Test basic builder operation by building a branch"""
+        base_dir = tempfile.mkdtemp()
+        if not os.path.isdir(base_dir):
+            os.mkdir(base_dir)
+        options = Options()
+        options.git = os.getcwd()
+        options.summary = False
+        options.jobs = None
+        options.dry_run = False
+        #options.git = os.path.join(base_dir, 'repo')
+        options.branch = 'test-buildman'
+        options.force_build = False
+        options.list_tool_chains = False
+        options.count = -1
+        options.git_dir = None
+        options.threads = None
+        options.show_unknown = False
+        options.quick = False
+        options.show_errors = False
+        options.keep_outputs = False
+        args = ['tegra20']
+        control.DoBuildman(options, args)
+        shutil.rmtree(base_dir)
+
+    def testBoardSingle(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['sandbox']),
+                         ({'all': ['board4'], 'sandbox': ['board4']}, []))
+
+    def testBoardArch(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['arm']),
+                         ({'all': ['board0', 'board1'],
+                          'arm': ['board0', 'board1']}, []))
+
+    def testBoardArchSingle(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['arm sandbox']),
+                         ({'sandbox': ['board4'],
+                          'all': ['board0', 'board1', 'board4'],
+                          'arm': ['board0', 'board1']}, []))
+
+
+    def testBoardArchSingleMultiWord(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['arm', 'sandbox']),
+                         ({'sandbox': ['board4'],
+                          'all': ['board0', 'board1', 'board4'],
+                          'arm': ['board0', 'board1']}, []))
+
+    def testBoardSingleAnd(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['Tester & arm']),
+                         ({'Tester&arm': ['board0', 'board1'],
+                           'all': ['board0', 'board1']}, []))
+
+    def testBoardTwoAnd(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['Tester', '&', 'arm',
+                                                   'Tester' '&', 'powerpc',
+                                                   'sandbox']),
+                         ({'sandbox': ['board4'],
+                          'all': ['board0', 'board1', 'board2', 'board3',
+                                  'board4'],
+                          'Tester&powerpc': ['board2', 'board3'],
+                          'Tester&arm': ['board0', 'board1']}, []))
+
+    def testBoardAll(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards([]),
+                         ({'all': ['board0', 'board1', 'board2', 'board3',
+                                  'board4']}, []))
+
+    def testBoardRegularExpression(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['T.*r&^Po']),
+                         ({'all': ['board2', 'board3'],
+                          'T.*r&^Po': ['board2', 'board3']}, []))
+
+    def testBoardDuplicate(self):
+        """Test single board selection"""
+        self.assertEqual(self.boards.SelectBoards(['sandbox sandbox',
+                                                   'sandbox']),
+                         ({'all': ['board4'], 'sandbox': ['board4']}, []))
+    def CheckDirs(self, build, dirname):
+        self.assertEqual('base%s' % dirname, build._GetOutputDir(1))
+        self.assertEqual('base%s/fred' % dirname,
+                         build.GetBuildDir(1, 'fred'))
+        self.assertEqual('base%s/fred/done' % dirname,
+                         build.GetDoneFile(1, 'fred'))
+        self.assertEqual('base%s/fred/u-boot.sizes' % dirname,
+                         build.GetFuncSizesFile(1, 'fred', 'u-boot'))
+        self.assertEqual('base%s/fred/u-boot.objdump' % dirname,
+                         build.GetObjdumpFile(1, 'fred', 'u-boot'))
+        self.assertEqual('base%s/fred/err' % dirname,
+                         build.GetErrFile(1, 'fred'))
+
+    def testOutputDir(self):
+        build = builder.Builder(self.toolchains, BASE_DIR, None, 1, 2,
+                                checkout=False, show_unknown=False)
+        build.commits = self.commits
+        build.commit_count = len(self.commits)
+        subject = self.commits[1].subject.translate(builder.trans_valid_chars)
+        dirname ='/%02d_of_%02d_g%s_%s' % (2, build.commit_count, commits[1][0],
+                                           subject[:20])
+        self.CheckDirs(build, dirname)
+
+    def testOutputDirCurrent(self):
+        build = builder.Builder(self.toolchains, BASE_DIR, None, 1, 2,
+                                checkout=False, show_unknown=False)
+        build.commits = None
+        build.commit_count = 0
+        self.CheckDirs(build, '/current')
+
+    def testOutputDirNoSubdirs(self):
+        build = builder.Builder(self.toolchains, BASE_DIR, None, 1, 2,
+                                checkout=False, show_unknown=False,
+                                no_subdirs=True)
+        build.commits = None
+        build.commit_count = 0
+        self.CheckDirs(build, '')
+
+    def testToolchainAliases(self):
+        self.assertTrue(self.toolchains.Select('arm') != None)
+        with self.assertRaises(ValueError):
+            self.toolchains.Select('no-arch')
+        with self.assertRaises(ValueError):
+            self.toolchains.Select('x86')
+
+        self.toolchains = toolchain.Toolchains()
+        self.toolchains.Add('x86_64-linux-gcc', test=False)
+        self.assertTrue(self.toolchains.Select('x86') != None)
+
+        self.toolchains = toolchain.Toolchains()
+        self.toolchains.Add('i386-linux-gcc', test=False)
+        self.assertTrue(self.toolchains.Select('x86') != None)
+
+    def testToolchainDownload(self):
+        """Test that we can download toolchains"""
+        if use_network:
+            with test_util.capture_sys_output() as (stdout, stderr):
+                url = self.toolchains.LocateArchUrl('arm')
+            self.assertRegexpMatches(url, 'https://www.kernel.org/pub/tools/'
+                    'crosstool/files/bin/x86_64/.*/'
+                    'x86_64-gcc-.*-nolibc_arm-.*linux-gnueabi.tar.xz')
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tools/u-boot-tools/buildman/toolchain.py b/tools/u-boot-tools/buildman/toolchain.py
new file mode 100644
index 0000000000000000000000000000000000000000..c62ce136fa1fc6da4c9d652ab62f8313e8bee813
--- /dev/null
+++ b/tools/u-boot-tools/buildman/toolchain.py
@@ -0,0 +1,577 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+import re
+import glob
+from HTMLParser import HTMLParser
+import os
+import sys
+import tempfile
+import urllib2
+
+import bsettings
+import command
+import terminal
+
+(PRIORITY_FULL_PREFIX, PRIORITY_PREFIX_GCC, PRIORITY_PREFIX_GCC_PATH,
+    PRIORITY_CALC) = range(4)
+
+# Simple class to collect links from a page
+class MyHTMLParser(HTMLParser):
+    def __init__(self, arch):
+        """Create a new parser
+
+        After the parser runs, self.links will be set to a list of the links
+        to .xz archives found in the page, and self.arch_link will be set to
+        the one for the given architecture (or None if not found).
+
+        Args:
+            arch: Architecture to search for
+        """
+        HTMLParser.__init__(self)
+        self.arch_link = None
+        self.links = []
+        self.re_arch = re.compile('[-_]%s-' % arch)
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'a':
+            for tag, value in attrs:
+                if tag == 'href':
+                    if value and value.endswith('.xz'):
+                        self.links.append(value)
+                        if self.re_arch.search(value):
+                            self.arch_link = value
+
+
+class Toolchain:
+    """A single toolchain
+
+    Public members:
+        gcc: Full path to C compiler
+        path: Directory path containing C compiler
+        cross: Cross compile string, e.g. 'arm-linux-'
+        arch: Architecture of toolchain as determined from the first
+                component of the filename. E.g. arm-linux-gcc becomes arm
+        priority: Toolchain priority (0=highest, 20=lowest)
+    """
+    def __init__(self, fname, test, verbose=False, priority=PRIORITY_CALC,
+                 arch=None):
+        """Create a new toolchain object.
+
+        Args:
+            fname: Filename of the gcc component
+            test: True to run the toolchain to test it
+            verbose: True to print out the information
+            priority: Priority to use for this toolchain, or PRIORITY_CALC to
+                calculate it
+        """
+        self.gcc = fname
+        self.path = os.path.dirname(fname)
+
+        # Find the CROSS_COMPILE prefix to use for U-Boot. For example,
+        # 'arm-linux-gnueabihf-gcc' turns into 'arm-linux-gnueabihf-'.
+        basename = os.path.basename(fname)
+        pos = basename.rfind('-')
+        self.cross = basename[:pos + 1] if pos != -1 else ''
+
+        # The architecture is the first part of the name
+        pos = self.cross.find('-')
+        if arch:
+            self.arch = arch
+        else:
+            self.arch = self.cross[:pos] if pos != -1 else 'sandbox'
+
+        env = self.MakeEnvironment(False)
+
+        # As a basic sanity check, run the C compiler with --version
+        cmd = [fname, '--version']
+        if priority == PRIORITY_CALC:
+            self.priority = self.GetPriority(fname)
+        else:
+            self.priority = priority
+        if test:
+            result = command.RunPipe([cmd], capture=True, env=env,
+                                     raise_on_error=False)
+            self.ok = result.return_code == 0
+            if verbose:
+                print 'Tool chain test: ',
+                if self.ok:
+                    print "OK, arch='%s', priority %d" % (self.arch,
+                                                          self.priority)
+                else:
+                    print 'BAD'
+                    print 'Command: ', cmd
+                    print result.stdout
+                    print result.stderr
+        else:
+            self.ok = True
+
+    def GetPriority(self, fname):
+        """Return the priority of the toolchain.
+
+        Toolchains are ranked according to their suitability by their
+        filename prefix.
+
+        Args:
+            fname: Filename of toolchain
+        Returns:
+            Priority of toolchain, PRIORITY_CALC=highest, 20=lowest.
+        """
+        priority_list = ['-elf', '-unknown-linux-gnu', '-linux',
+            '-none-linux-gnueabi', '-none-linux-gnueabihf', '-uclinux',
+            '-none-eabi', '-gentoo-linux-gnu', '-linux-gnueabi',
+            '-linux-gnueabihf', '-le-linux', '-uclinux']
+        for prio in range(len(priority_list)):
+            if priority_list[prio] in fname:
+                return PRIORITY_CALC + prio
+        return PRIORITY_CALC + prio
+
+    def GetWrapper(self, show_warning=True):
+        """Get toolchain wrapper from the setting file.
+        """
+	value = ''
+	for name, value in bsettings.GetItems('toolchain-wrapper'):
+            if not value:
+                print "Warning: Wrapper not found"
+        if value:
+            value = value + ' '
+
+        return value
+
+    def MakeEnvironment(self, full_path):
+        """Returns an environment for using the toolchain.
+
+        Thie takes the current environment and adds CROSS_COMPILE so that
+        the tool chain will operate correctly. This also disables localized
+        output and possibly unicode encoded output of all build tools by
+        adding LC_ALL=C.
+
+        Args:
+            full_path: Return the full path in CROSS_COMPILE and don't set
+                PATH
+        """
+        env = dict(os.environ)
+        wrapper = self.GetWrapper()
+
+        if full_path:
+            env['CROSS_COMPILE'] = wrapper + os.path.join(self.path, self.cross)
+        else:
+            env['CROSS_COMPILE'] = wrapper + self.cross
+            env['PATH'] = self.path + ':' + env['PATH']
+
+        env['LC_ALL'] = 'C'
+
+        return env
+
+
+class Toolchains:
+    """Manage a list of toolchains for building U-Boot
+
+    We select one toolchain for each architecture type
+
+    Public members:
+        toolchains: Dict of Toolchain objects, keyed by architecture name
+        prefixes: Dict of prefixes to check, keyed by architecture. This can
+            be a full path and toolchain prefix, for example
+            {'x86', 'opt/i386-linux/bin/i386-linux-'}, or the name of
+            something on the search path, for example
+            {'arm', 'arm-linux-gnueabihf-'}. Wildcards are not supported.
+        paths: List of paths to check for toolchains (may contain wildcards)
+    """
+
+    def __init__(self):
+        self.toolchains = {}
+        self.prefixes = {}
+        self.paths = []
+        self._make_flags = dict(bsettings.GetItems('make-flags'))
+
+    def GetPathList(self, show_warning=True):
+        """Get a list of available toolchain paths
+
+        Args:
+            show_warning: True to show a warning if there are no tool chains.
+
+        Returns:
+            List of strings, each a path to a toolchain mentioned in the
+            [toolchain] section of the settings file.
+        """
+        toolchains = bsettings.GetItems('toolchain')
+        if show_warning and not toolchains:
+            print ("Warning: No tool chains. Please run 'buildman "
+                   "--fetch-arch all' to download all available toolchains, or "
+                   "add a [toolchain] section to your buildman config file "
+                   "%s. See README for details" %
+                   bsettings.config_fname)
+
+        paths = []
+        for name, value in toolchains:
+            if '*' in value:
+                paths += glob.glob(value)
+            else:
+                paths.append(value)
+        return paths
+
+    def GetSettings(self, show_warning=True):
+        """Get toolchain settings from the settings file.
+
+        Args:
+            show_warning: True to show a warning if there are no tool chains.
+        """
+        self.prefixes = bsettings.GetItems('toolchain-prefix')
+        self.paths += self.GetPathList(show_warning)
+
+    def Add(self, fname, test=True, verbose=False, priority=PRIORITY_CALC,
+            arch=None):
+        """Add a toolchain to our list
+
+        We select the given toolchain as our preferred one for its
+        architecture if it is a higher priority than the others.
+
+        Args:
+            fname: Filename of toolchain's gcc driver
+            test: True to run the toolchain to test it
+            priority: Priority to use for this toolchain
+            arch: Toolchain architecture, or None if not known
+        """
+        toolchain = Toolchain(fname, test, verbose, priority, arch)
+        add_it = toolchain.ok
+        if toolchain.arch in self.toolchains:
+            add_it = (toolchain.priority <
+                        self.toolchains[toolchain.arch].priority)
+        if add_it:
+            self.toolchains[toolchain.arch] = toolchain
+        elif verbose:
+            print ("Toolchain '%s' at priority %d will be ignored because "
+                   "another toolchain for arch '%s' has priority %d" %
+                   (toolchain.gcc, toolchain.priority, toolchain.arch,
+                    self.toolchains[toolchain.arch].priority))
+
+    def ScanPath(self, path, verbose):
+        """Scan a path for a valid toolchain
+
+        Args:
+            path: Path to scan
+            verbose: True to print out progress information
+        Returns:
+            Filename of C compiler if found, else None
+        """
+        fnames = []
+        for subdir in ['.', 'bin', 'usr/bin']:
+            dirname = os.path.join(path, subdir)
+            if verbose: print "      - looking in '%s'" % dirname
+            for fname in glob.glob(dirname + '/*gcc'):
+                if verbose: print "         - found '%s'" % fname
+                fnames.append(fname)
+        return fnames
+
+    def ScanPathEnv(self, fname):
+        """Scan the PATH environment variable for a given filename.
+
+        Args:
+            fname: Filename to scan for
+        Returns:
+            List of matching pathanames, or [] if none
+        """
+        pathname_list = []
+        for path in os.environ["PATH"].split(os.pathsep):
+            path = path.strip('"')
+            pathname = os.path.join(path, fname)
+            if os.path.exists(pathname):
+                pathname_list.append(pathname)
+        return pathname_list
+
+    def Scan(self, verbose):
+        """Scan for available toolchains and select the best for each arch.
+
+        We look for all the toolchains we can file, figure out the
+        architecture for each, and whether it works. Then we select the
+        highest priority toolchain for each arch.
+
+        Args:
+            verbose: True to print out progress information
+        """
+        if verbose: print 'Scanning for tool chains'
+        for name, value in self.prefixes:
+            if verbose: print "   - scanning prefix '%s'" % value
+            if os.path.exists(value):
+                self.Add(value, True, verbose, PRIORITY_FULL_PREFIX, name)
+                continue
+            fname = value + 'gcc'
+            if os.path.exists(fname):
+                self.Add(fname, True, verbose, PRIORITY_PREFIX_GCC, name)
+                continue
+            fname_list = self.ScanPathEnv(fname)
+            for f in fname_list:
+                self.Add(f, True, verbose, PRIORITY_PREFIX_GCC_PATH, name)
+            if not fname_list:
+                raise ValueError, ("No tool chain found for prefix '%s'" %
+                                   value)
+        for path in self.paths:
+            if verbose: print "   - scanning path '%s'" % path
+            fnames = self.ScanPath(path, verbose)
+            for fname in fnames:
+                self.Add(fname, True, verbose)
+
+    def List(self):
+        """List out the selected toolchains for each architecture"""
+        col = terminal.Color()
+        print col.Color(col.BLUE, 'List of available toolchains (%d):' %
+                        len(self.toolchains))
+        if len(self.toolchains):
+            for key, value in sorted(self.toolchains.iteritems()):
+                print '%-10s: %s' % (key, value.gcc)
+        else:
+            print 'None'
+
+    def Select(self, arch):
+        """Returns the toolchain for a given architecture
+
+        Args:
+            args: Name of architecture (e.g. 'arm', 'ppc_8xx')
+
+        returns:
+            toolchain object, or None if none found
+        """
+        for tag, value in bsettings.GetItems('toolchain-alias'):
+            if arch == tag:
+                for alias in value.split():
+                    if alias in self.toolchains:
+                        return self.toolchains[alias]
+
+        if not arch in self.toolchains:
+            raise ValueError, ("No tool chain found for arch '%s'" % arch)
+        return self.toolchains[arch]
+
+    def ResolveReferences(self, var_dict, args):
+        """Resolve variable references in a string
+
+        This converts ${blah} within the string to the value of blah.
+        This function works recursively.
+
+        Args:
+            var_dict: Dictionary containing variables and their values
+            args: String containing make arguments
+        Returns:
+            Resolved string
+
+        >>> bsettings.Setup()
+        >>> tcs = Toolchains()
+        >>> tcs.Add('fred', False)
+        >>> var_dict = {'oblique' : 'OBLIQUE', 'first' : 'fi${second}rst', \
+                        'second' : '2nd'}
+        >>> tcs.ResolveReferences(var_dict, 'this=${oblique}_set')
+        'this=OBLIQUE_set'
+        >>> tcs.ResolveReferences(var_dict, 'this=${oblique}_set${first}nd')
+        'this=OBLIQUE_setfi2ndrstnd'
+        """
+        re_var = re.compile('(\$\{[-_a-z0-9A-Z]{1,}\})')
+
+        while True:
+            m = re_var.search(args)
+            if not m:
+                break
+            lookup = m.group(0)[2:-1]
+            value = var_dict.get(lookup, '')
+            args = args[:m.start(0)] + value + args[m.end(0):]
+        return args
+
+    def GetMakeArguments(self, board):
+        """Returns 'make' arguments for a given board
+
+        The flags are in a section called 'make-flags'. Flags are named
+        after the target they represent, for example snapper9260=TESTING=1
+        will pass TESTING=1 to make when building the snapper9260 board.
+
+        References to other boards can be added in the string also. For
+        example:
+
+        [make-flags]
+        at91-boards=ENABLE_AT91_TEST=1
+        snapper9260=${at91-boards} BUILD_TAG=442
+        snapper9g45=${at91-boards} BUILD_TAG=443
+
+        This will return 'ENABLE_AT91_TEST=1 BUILD_TAG=442' for snapper9260
+        and 'ENABLE_AT91_TEST=1 BUILD_TAG=443' for snapper9g45.
+
+        A special 'target' variable is set to the board target.
+
+        Args:
+            board: Board object for the board to check.
+        Returns:
+            'make' flags for that board, or '' if none
+        """
+        self._make_flags['target'] = board.target
+        arg_str = self.ResolveReferences(self._make_flags,
+                           self._make_flags.get(board.target, ''))
+        args = arg_str.split(' ')
+        i = 0
+        while i < len(args):
+            if not args[i]:
+                del args[i]
+            else:
+                i += 1
+        return args
+
+    def LocateArchUrl(self, fetch_arch):
+        """Find a toolchain available online
+
+        Look in standard places for available toolchains. At present the
+        only standard place is at kernel.org.
+
+        Args:
+            arch: Architecture to look for, or 'list' for all
+        Returns:
+            If fetch_arch is 'list', a tuple:
+                Machine architecture (e.g. x86_64)
+                List of toolchains
+            else
+                URL containing this toolchain, if avaialble, else None
+        """
+        arch = command.OutputOneLine('uname', '-m')
+        base = 'https://www.kernel.org/pub/tools/crosstool/files/bin'
+        versions = ['7.3.0', '6.4.0', '4.9.4']
+        links = []
+        for version in versions:
+            url = '%s/%s/%s/' % (base, arch, version)
+            print 'Checking: %s' % url
+            response = urllib2.urlopen(url)
+            html = response.read()
+            parser = MyHTMLParser(fetch_arch)
+            parser.feed(html)
+            if fetch_arch == 'list':
+                links += parser.links
+            elif parser.arch_link:
+                return url + parser.arch_link
+        if fetch_arch == 'list':
+            return arch, links
+        return None
+
+    def Download(self, url):
+        """Download a file to a temporary directory
+
+        Args:
+            url: URL to download
+        Returns:
+            Tuple:
+                Temporary directory name
+                Full path to the downloaded archive file in that directory,
+                    or None if there was an error while downloading
+        """
+        print 'Downloading: %s' % url
+        leaf = url.split('/')[-1]
+        tmpdir = tempfile.mkdtemp('.buildman')
+        response = urllib2.urlopen(url)
+        fname = os.path.join(tmpdir, leaf)
+        fd = open(fname, 'wb')
+        meta = response.info()
+        size = int(meta.getheaders('Content-Length')[0])
+        done = 0
+        block_size = 1 << 16
+        status = ''
+
+        # Read the file in chunks and show progress as we go
+        while True:
+            buffer = response.read(block_size)
+            if not buffer:
+                print chr(8) * (len(status) + 1), '\r',
+                break
+
+            done += len(buffer)
+            fd.write(buffer)
+            status = r'%10d MiB  [%3d%%]' % (done / 1024 / 1024,
+                                             done * 100 / size)
+            status = status + chr(8) * (len(status) + 1)
+            print status,
+            sys.stdout.flush()
+        fd.close()
+        if done != size:
+            print 'Error, failed to download'
+            os.remove(fname)
+            fname = None
+        return tmpdir, fname
+
+    def Unpack(self, fname, dest):
+        """Unpack a tar file
+
+        Args:
+            fname: Filename to unpack
+            dest: Destination directory
+        Returns:
+            Directory name of the first entry in the archive, without the
+            trailing /
+        """
+        stdout = command.Output('tar', 'xvfJ', fname, '-C', dest)
+        dirs = stdout.splitlines()[1].split('/')[:2]
+        return '/'.join(dirs)
+
+    def TestSettingsHasPath(self, path):
+        """Check if buildman will find this toolchain
+
+        Returns:
+            True if the path is in settings, False if not
+        """
+        paths = self.GetPathList(False)
+        return path in paths
+
+    def ListArchs(self):
+        """List architectures with available toolchains to download"""
+        host_arch, archives = self.LocateArchUrl('list')
+        re_arch = re.compile('[-a-z0-9.]*[-_]([^-]*)-.*')
+        arch_set = set()
+        for archive in archives:
+            # Remove the host architecture from the start
+            arch = re_arch.match(archive[len(host_arch):])
+            if arch:
+                if arch.group(1) != '2.0' and arch.group(1) != '64':
+                    arch_set.add(arch.group(1))
+        return sorted(arch_set)
+
+    def FetchAndInstall(self, arch):
+        """Fetch and install a new toolchain
+
+        arch:
+            Architecture to fetch, or 'list' to list
+        """
+        # Fist get the URL for this architecture
+        col = terminal.Color()
+        print col.Color(col.BLUE, "Downloading toolchain for arch '%s'" % arch)
+        url = self.LocateArchUrl(arch)
+        if not url:
+            print ("Cannot find toolchain for arch '%s' - use 'list' to list" %
+                   arch)
+            return 2
+        home = os.environ['HOME']
+        dest = os.path.join(home, '.buildman-toolchains')
+        if not os.path.exists(dest):
+            os.mkdir(dest)
+
+        # Download the tar file for this toolchain and unpack it
+        tmpdir, tarfile = self.Download(url)
+        if not tarfile:
+            return 1
+        print col.Color(col.GREEN, 'Unpacking to: %s' % dest),
+        sys.stdout.flush()
+        path = self.Unpack(tarfile, dest)
+        os.remove(tarfile)
+        os.rmdir(tmpdir)
+        print
+
+        # Check that the toolchain works
+        print col.Color(col.GREEN, 'Testing')
+        dirpath = os.path.join(dest, path)
+        compiler_fname_list = self.ScanPath(dirpath, True)
+        if not compiler_fname_list:
+            print 'Could not locate C compiler - fetch failed.'
+            return 1
+        if len(compiler_fname_list) != 1:
+            print col.Color(col.RED, 'Warning, ambiguous toolchains: %s' %
+                            ', '.join(compiler_fname_list))
+        toolchain = Toolchain(compiler_fname_list[0], True, True)
+
+        # Make sure that it will be found by buildman
+        if not self.TestSettingsHasPath(dirpath):
+            print ("Adding 'download' to config file '%s'" %
+                   bsettings.config_fname)
+            bsettings.SetItem('toolchain', 'download', '%s/*/*' % dest)
+        return 0
diff --git a/tools/u-boot-tools/common/.bootm.o.cmd b/tools/u-boot-tools/common/.bootm.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..075bd8a491478a03eff5726baf48bd8e12167049
--- /dev/null
+++ b/tools/u-boot-tools/common/.bootm.o.cmd
@@ -0,0 +1,206 @@
+cmd_tools/common/bootm.o := cc -Wp,-MD,tools/common/.bootm.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/common/bootm.o tools/common/bootm.c
+
+source_tools/common/bootm.o := tools/common/bootm.c
+
+deps_tools/common/bootm.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../common/bootm.c \
+    $(wildcard include/config/cmd/usb.h) \
+    $(wildcard include/config/sys/bootm/len.h) \
+    $(wildcard include/config/lmb.h) \
+    $(wildcard include/config/image/format/legacy.h) \
+    $(wildcard include/config/android/boot/image.h) \
+    $(wildcard include/config/cmd/booti.h) \
+    $(wildcard include/config/fpga.h) \
+    $(wildcard include/config/gzip.h) \
+    $(wildcard include/config/bzip2.h) \
+    $(wildcard include/config/sys/malloc/len.h) \
+    $(wildcard include/config/lzma.h) \
+    $(wildcard include/config/lzo.h) \
+    $(wildcard include/config/lz4.h) \
+    $(wildcard include/config/netconsole.h) \
+    $(wildcard include/config/dm/eth.h) \
+    $(wildcard include/config/silent/console.h) \
+    $(wildcard include/config/silent/u/boot/only.h) \
+    $(wildcard include/config/sys/boot/ramdisk/high.h) \
+    $(wildcard include/config/trace.h) \
+  tools/mkimage.h \
+  tools/os_support.h \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/sys/stat.h \
+  /usr/include/x86_64-linux-gnu/bits/statx.h \
+  /usr/include/unistd.h \
+  /usr/include/x86_64-linux-gnu/bits/posix_opt.h \
+  /usr/include/x86_64-linux-gnu/bits/environments.h \
+  /usr/include/x86_64-linux-gnu/bits/confname.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_posix.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_core.h \
+  include/u-boot/sha1.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/imagetool.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdbool.h \
+  include/command.h \
+    $(wildcard include/config/sys/help/cmd/width.h) \
+    $(wildcard include/config/sys/longhelp.h) \
+    $(wildcard include/config/auto/complete.h) \
+    $(wildcard include/config/cmd/run.h) \
+    $(wildcard include/config/cmd/memory.h) \
+    $(wildcard include/config/cmd/i2c.h) \
+    $(wildcard include/config/cmd/itest.h) \
+    $(wildcard include/config/cmd/pci.h) \
+    $(wildcard include/config/cmd/bootd.h) \
+    $(wildcard include/config/cmd/bootm.h) \
+    $(wildcard include/config/cmdline.h) \
+  include/linker_lists.h \
+  include/linux/compiler.h \
+    $(wildcard include/config/sparse/rcu/pointer.h) \
+    $(wildcard include/config/trace/branch/profiling.h) \
+    $(wildcard include/config/profile/all/branches.h) \
+    $(wildcard include/config/kasan.h) \
+    $(wildcard include/config/enable/must/check.h) \
+    $(wildcard include/config/enable/warn/deprecated.h) \
+    $(wildcard include/config/kprobes.h) \
+  include/bootm.h \
+  include/image.h \
+    $(wildcard include/config/fit/verbose.h) \
+    $(wildcard include/config/fit/enable/rsassa/pss/support.h) \
+    $(wildcard include/config/fit/enable/sha256/support.h) \
+    $(wildcard include/config/sha1.h) \
+    $(wildcard include/config/sha256.h) \
+    $(wildcard include/config/fit.h) \
+    $(wildcard include/config/spl/build.h) \
+    $(wildcard include/config/spl/crc32/support.h) \
+    $(wildcard include/config/spl/md5/support.h) \
+    $(wildcard include/config/spl/sha1/support.h) \
+    $(wildcard include/config/crc32.h) \
+    $(wildcard include/config/spl/sha256/support.h) \
+    $(wildcard include/config/sys/boot/get/cmdline.h) \
+    $(wildcard include/config/timestamp.h) \
+    $(wildcard include/config/cmd/date.h) \
+    $(wildcard include/config/sys/boot/get/kbd.h) \
+    $(wildcard include/config/fit/signature.h) \
+    $(wildcard include/config/fit/best/match.h) \
+    $(wildcard include/config/spl/fit/image/post/process.h) \
+    $(wildcard include/config/fit/image/post/process.h) \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/asm/byteorder.h \
+  /usr/include/linux/byteorder/little_endian.h \
+  /usr/include/linux/types.h \
+  /usr/include/x86_64-linux-gnu/asm/types.h \
+  /usr/include/asm-generic/types.h \
+  /usr/include/asm-generic/int-ll64.h \
+  /usr/include/x86_64-linux-gnu/asm/bitsperlong.h \
+  /usr/include/asm-generic/bitsperlong.h \
+    $(wildcard include/config/64bit.h) \
+  /usr/include/linux/posix_types.h \
+  /usr/include/linux/stddef.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types_64.h \
+  /usr/include/asm-generic/posix_types.h \
+  /usr/include/linux/swab.h \
+  /usr/include/x86_64-linux-gnu/asm/swab.h \
+  include/hash.h \
+  include/linux/libfdt.h \
+  include/fdt_support.h \
+
+tools/common/bootm.o: $(deps_tools/common/bootm.o)
+
+$(deps_tools/common/bootm.o):
diff --git a/tools/u-boot-tools/common/.hash.o.cmd b/tools/u-boot-tools/common/.hash.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..3006984b39895fbc65d77525cd9196e1b3a678fc
--- /dev/null
+++ b/tools/u-boot-tools/common/.hash.o.cmd
@@ -0,0 +1,180 @@
+cmd_tools/common/hash.o := cc -Wp,-MD,tools/common/.hash.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/common/hash.o tools/common/hash.c
+
+source_tools/common/hash.o := tools/common/hash.c
+
+deps_tools/common/hash.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../common/hash.c \
+    $(wildcard include/config/needs/manual/reloc.h) \
+    $(wildcard include/config/sha1.h) \
+    $(wildcard include/config/sha/prog/hw/accel.h) \
+    $(wildcard include/config/sha256.h) \
+    $(wildcard include/config/sha/hw/accel.h) \
+    $(wildcard include/config/cmd/sha1sum.h) \
+    $(wildcard include/config/crc32/verify.h) \
+    $(wildcard include/config/cmd/hash.h) \
+    $(wildcard include/config/cmd/crc32.h) \
+    $(wildcard include/config/sha1sum/verify.h) \
+    $(wildcard include/config/hash/verify.h) \
+  tools/mkimage.h \
+  tools/os_support.h \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/sys/stat.h \
+  /usr/include/x86_64-linux-gnu/bits/statx.h \
+  /usr/include/unistd.h \
+  /usr/include/x86_64-linux-gnu/bits/posix_opt.h \
+  /usr/include/x86_64-linux-gnu/bits/environments.h \
+  /usr/include/x86_64-linux-gnu/bits/confname.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_posix.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_core.h \
+  include/u-boot/sha1.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/imagetool.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdbool.h \
+  include/image.h \
+    $(wildcard include/config/fit/verbose.h) \
+    $(wildcard include/config/fit/enable/rsassa/pss/support.h) \
+    $(wildcard include/config/fit/enable/sha256/support.h) \
+    $(wildcard include/config/fit.h) \
+    $(wildcard include/config/spl/build.h) \
+    $(wildcard include/config/spl/crc32/support.h) \
+    $(wildcard include/config/spl/md5/support.h) \
+    $(wildcard include/config/spl/sha1/support.h) \
+    $(wildcard include/config/crc32.h) \
+    $(wildcard include/config/spl/sha256/support.h) \
+    $(wildcard include/config/sys/boot/get/cmdline.h) \
+    $(wildcard include/config/lmb.h) \
+    $(wildcard include/config/timestamp.h) \
+    $(wildcard include/config/cmd/date.h) \
+    $(wildcard include/config/image/format/legacy.h) \
+    $(wildcard include/config/sys/boot/get/kbd.h) \
+    $(wildcard include/config/fit/signature.h) \
+    $(wildcard include/config/fit/best/match.h) \
+    $(wildcard include/config/android/boot/image.h) \
+    $(wildcard include/config/spl/fit/image/post/process.h) \
+    $(wildcard include/config/fit/image/post/process.h) \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/asm/byteorder.h \
+  /usr/include/linux/byteorder/little_endian.h \
+  /usr/include/linux/types.h \
+  /usr/include/x86_64-linux-gnu/asm/types.h \
+  /usr/include/asm-generic/types.h \
+  /usr/include/asm-generic/int-ll64.h \
+  /usr/include/x86_64-linux-gnu/asm/bitsperlong.h \
+  /usr/include/asm-generic/bitsperlong.h \
+    $(wildcard include/config/64bit.h) \
+  /usr/include/linux/posix_types.h \
+  /usr/include/linux/stddef.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types_64.h \
+  /usr/include/asm-generic/posix_types.h \
+  /usr/include/linux/swab.h \
+  /usr/include/x86_64-linux-gnu/asm/swab.h \
+  include/hash.h \
+  include/linux/libfdt.h \
+  include/fdt_support.h \
+  include/u-boot/crc.h \
+  include/u-boot/sha256.h \
+  include/u-boot/md5.h \
+
+tools/common/hash.o: $(deps_tools/common/hash.o)
+
+$(deps_tools/common/hash.o):
diff --git a/tools/u-boot-tools/common/.image-fit.o.cmd b/tools/u-boot-tools/common/.image-fit.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..56939d1e1394ffa9a79a54506ac7a3a0cbced8b7
--- /dev/null
+++ b/tools/u-boot-tools/common/.image-fit.o.cmd
@@ -0,0 +1,178 @@
+cmd_tools/common/image-fit.o := cc -Wp,-MD,tools/common/.image-fit.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/common/image-fit.o tools/common/image-fit.c
+
+source_tools/common/image-fit.o := tools/common/image-fit.c
+
+deps_tools/common/image-fit.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../common/image-fit.c \
+    $(wildcard include/config/spl/build.h) \
+    $(wildcard include/config/spl/fit/print.h) \
+    $(wildcard include/config/arm64/support/aarch32.h) \
+    $(wildcard include/config/sandbox.h) \
+    $(wildcard include/config/fit/image/post/process.h) \
+    $(wildcard include/config/of/libfdt/overlay.h) \
+  tools/mkimage.h \
+  tools/os_support.h \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/sys/stat.h \
+  /usr/include/x86_64-linux-gnu/bits/statx.h \
+  /usr/include/unistd.h \
+  /usr/include/x86_64-linux-gnu/bits/posix_opt.h \
+  /usr/include/x86_64-linux-gnu/bits/environments.h \
+  /usr/include/x86_64-linux-gnu/bits/confname.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_posix.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_core.h \
+  include/u-boot/sha1.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/imagetool.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdbool.h \
+  include/image.h \
+    $(wildcard include/config/fit/verbose.h) \
+    $(wildcard include/config/fit/enable/rsassa/pss/support.h) \
+    $(wildcard include/config/fit/enable/sha256/support.h) \
+    $(wildcard include/config/sha1.h) \
+    $(wildcard include/config/sha256.h) \
+    $(wildcard include/config/fit.h) \
+    $(wildcard include/config/spl/crc32/support.h) \
+    $(wildcard include/config/spl/md5/support.h) \
+    $(wildcard include/config/spl/sha1/support.h) \
+    $(wildcard include/config/crc32.h) \
+    $(wildcard include/config/spl/sha256/support.h) \
+    $(wildcard include/config/sys/boot/get/cmdline.h) \
+    $(wildcard include/config/lmb.h) \
+    $(wildcard include/config/timestamp.h) \
+    $(wildcard include/config/cmd/date.h) \
+    $(wildcard include/config/image/format/legacy.h) \
+    $(wildcard include/config/sys/boot/get/kbd.h) \
+    $(wildcard include/config/fit/signature.h) \
+    $(wildcard include/config/fit/best/match.h) \
+    $(wildcard include/config/android/boot/image.h) \
+    $(wildcard include/config/spl/fit/image/post/process.h) \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/asm/byteorder.h \
+  /usr/include/linux/byteorder/little_endian.h \
+  /usr/include/linux/types.h \
+  /usr/include/x86_64-linux-gnu/asm/types.h \
+  /usr/include/asm-generic/types.h \
+  /usr/include/asm-generic/int-ll64.h \
+  /usr/include/x86_64-linux-gnu/asm/bitsperlong.h \
+  /usr/include/asm-generic/bitsperlong.h \
+    $(wildcard include/config/64bit.h) \
+  /usr/include/linux/posix_types.h \
+  /usr/include/linux/stddef.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types_64.h \
+  /usr/include/asm-generic/posix_types.h \
+  /usr/include/linux/swab.h \
+  /usr/include/x86_64-linux-gnu/asm/swab.h \
+  include/hash.h \
+  include/linux/libfdt.h \
+  include/fdt_support.h \
+  include/bootstage.h \
+    $(wildcard include/config/bootstage.h) \
+    $(wildcard include/config/show/boot/progress.h) \
+  include/u-boot/crc.h \
+  include/u-boot/md5.h \
+  include/u-boot/sha256.h \
+
+tools/common/image-fit.o: $(deps_tools/common/image-fit.o)
+
+$(deps_tools/common/image-fit.o):
diff --git a/tools/u-boot-tools/common/.image.o.cmd b/tools/u-boot-tools/common/.image.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..688b4b4194ac476f039558d9ef8f0dd5b939e39a
--- /dev/null
+++ b/tools/u-boot-tools/common/.image.o.cmd
@@ -0,0 +1,189 @@
+cmd_tools/common/image.o := cc -Wp,-MD,tools/common/.image.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/common/image.o tools/common/image.c
+
+source_tools/common/image.o := tools/common/image.c
+
+deps_tools/common/image.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../common/image.c \
+    $(wildcard include/config/show/boot/progress.h) \
+    $(wildcard include/config/cmd/bdi.h) \
+    $(wildcard include/config/image/format/legacy.h) \
+    $(wildcard include/config/sys/bargsize.h) \
+    $(wildcard include/config/lynxkdi.h) \
+    $(wildcard include/config/cmd/elf.h) \
+    $(wildcard include/config/integrity.h) \
+    $(wildcard include/config/bootm/openrtos.h) \
+    $(wildcard include/config/sys/load/addr.h) \
+    $(wildcard include/config/sys/sdram/base.h) \
+    $(wildcard include/config/arm.h) \
+    $(wildcard include/config/nr/dram/banks.h) \
+    $(wildcard include/config/sys/bootmapsz.h) \
+    $(wildcard include/config/hw/watchdog.h) \
+    $(wildcard include/config/watchdog.h) \
+    $(wildcard include/config/needs/manual/reloc.h) \
+    $(wildcard include/config/fit.h) \
+    $(wildcard include/config/android/boot/image.h) \
+    $(wildcard include/config/support/raw/initrd.h) \
+    $(wildcard include/config/sys/boot/ramdisk/high.h) \
+    $(wildcard include/config/mp.h) \
+    $(wildcard include/config/fpga.h) \
+    $(wildcard include/config/sys/boot/get/cmdline.h) \
+    $(wildcard include/config/sys/boot/get/kbd.h) \
+    $(wildcard include/config/lmb.h) \
+  tools/mkimage.h \
+  tools/os_support.h \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/sys/stat.h \
+  /usr/include/x86_64-linux-gnu/bits/statx.h \
+  /usr/include/unistd.h \
+  /usr/include/x86_64-linux-gnu/bits/posix_opt.h \
+  /usr/include/x86_64-linux-gnu/bits/environments.h \
+  /usr/include/x86_64-linux-gnu/bits/confname.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_posix.h \
+  /usr/include/x86_64-linux-gnu/bits/getopt_core.h \
+  include/u-boot/sha1.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/imagetool.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdbool.h \
+  include/u-boot/md5.h \
+  include/image.h \
+    $(wildcard include/config/fit/verbose.h) \
+    $(wildcard include/config/fit/enable/rsassa/pss/support.h) \
+    $(wildcard include/config/fit/enable/sha256/support.h) \
+    $(wildcard include/config/sha1.h) \
+    $(wildcard include/config/sha256.h) \
+    $(wildcard include/config/spl/build.h) \
+    $(wildcard include/config/spl/crc32/support.h) \
+    $(wildcard include/config/spl/md5/support.h) \
+    $(wildcard include/config/spl/sha1/support.h) \
+    $(wildcard include/config/crc32.h) \
+    $(wildcard include/config/spl/sha256/support.h) \
+    $(wildcard include/config/timestamp.h) \
+    $(wildcard include/config/cmd/date.h) \
+    $(wildcard include/config/fit/signature.h) \
+    $(wildcard include/config/fit/best/match.h) \
+    $(wildcard include/config/spl/fit/image/post/process.h) \
+    $(wildcard include/config/fit/image/post/process.h) \
+  include/compiler.h \
+  /usr/include/x86_64-linux-gnu/asm/byteorder.h \
+  /usr/include/linux/byteorder/little_endian.h \
+  /usr/include/linux/types.h \
+  /usr/include/x86_64-linux-gnu/asm/types.h \
+  /usr/include/asm-generic/types.h \
+  /usr/include/asm-generic/int-ll64.h \
+  /usr/include/x86_64-linux-gnu/asm/bitsperlong.h \
+  /usr/include/asm-generic/bitsperlong.h \
+    $(wildcard include/config/64bit.h) \
+  /usr/include/linux/posix_types.h \
+  /usr/include/linux/stddef.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types.h \
+  /usr/include/x86_64-linux-gnu/asm/posix_types_64.h \
+  /usr/include/asm-generic/posix_types.h \
+  /usr/include/linux/swab.h \
+  /usr/include/x86_64-linux-gnu/asm/swab.h \
+  include/hash.h \
+  include/linux/libfdt.h \
+  include/fdt_support.h \
+  include/u-boot/crc.h \
+
+tools/common/image.o: $(deps_tools/common/image.o)
+
+$(deps_tools/common/image.o):
diff --git a/tools/u-boot-tools/common/bootm.c b/tools/u-boot-tools/common/bootm.c
new file mode 100644
index 0000000000000000000000000000000000000000..2f2a1cad4b9cf97bc38d36197583a80b61a66e10
--- /dev/null
+++ b/tools/u-boot-tools/common/bootm.c
@@ -0,0 +1 @@
+#include <../common/bootm.c>
diff --git a/tools/u-boot-tools/common/bootm.o b/tools/u-boot-tools/common/bootm.o
new file mode 100644
index 0000000000000000000000000000000000000000..1d1edae03ad80230fe4c163fba0b8eed975a3139
Binary files /dev/null and b/tools/u-boot-tools/common/bootm.o differ
diff --git a/tools/u-boot-tools/common/hash.c b/tools/u-boot-tools/common/hash.c
new file mode 100644
index 0000000000000000000000000000000000000000..de5b81f1f83463287c138e4a53c3c7f77a666b77
--- /dev/null
+++ b/tools/u-boot-tools/common/hash.c
@@ -0,0 +1 @@
+#include <../common/hash.c>
diff --git a/tools/u-boot-tools/common/hash.o b/tools/u-boot-tools/common/hash.o
new file mode 100644
index 0000000000000000000000000000000000000000..f8803dbd390cc58beda6278ea0616239a92df7af
Binary files /dev/null and b/tools/u-boot-tools/common/hash.o differ
diff --git a/tools/u-boot-tools/common/image-fit.c b/tools/u-boot-tools/common/image-fit.c
new file mode 100644
index 0000000000000000000000000000000000000000..ae738f3c7d7330712d65d1fc9ae8dfdf0a88eb31
--- /dev/null
+++ b/tools/u-boot-tools/common/image-fit.c
@@ -0,0 +1 @@
+#include <../common/image-fit.c>
diff --git a/tools/u-boot-tools/common/image-fit.o b/tools/u-boot-tools/common/image-fit.o
new file mode 100644
index 0000000000000000000000000000000000000000..4d42b812f29ba7ad4506241832a5761f0ceb8626
Binary files /dev/null and b/tools/u-boot-tools/common/image-fit.o differ
diff --git a/tools/u-boot-tools/common/image.c b/tools/u-boot-tools/common/image.c
new file mode 100644
index 0000000000000000000000000000000000000000..3aef1601f28fc327e8837e0d74fb76ab2b52e947
--- /dev/null
+++ b/tools/u-boot-tools/common/image.c
@@ -0,0 +1 @@
+#include <../common/image.c>
diff --git a/tools/u-boot-tools/common/image.o b/tools/u-boot-tools/common/image.o
new file mode 100644
index 0000000000000000000000000000000000000000..9c4fa0741241645611a4d27786a355db863db74c
Binary files /dev/null and b/tools/u-boot-tools/common/image.o differ
diff --git a/tools/u-boot-tools/concurrencytest/.gitignore b/tools/u-boot-tools/concurrencytest/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d
--- /dev/null
+++ b/tools/u-boot-tools/concurrencytest/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/u-boot-tools/concurrencytest/README.md b/tools/u-boot-tools/concurrencytest/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8e65776f178d8abb1b8fe5488986b42f128f517f
--- /dev/null
+++ b/tools/u-boot-tools/concurrencytest/README.md
@@ -0,0 +1,74 @@
+concurrencytest
+===============
+
+![testing goats](https://raw.github.com/cgoldberg/concurrencytest/master/testing-goats.png "testing goats")
+
+Python testtools extension for running unittest suites concurrently.
+
+----
+
+Install from PyPI:
+```
+pip install concurrencytest
+```
+
+----
+
+Requires:
+
+ * [testtools](https://pypi.python.org/pypi/testtools) : `pip install testtools`
+ * [python-subunit](https://pypi.python.org/pypi/python-subunit) : `pip install python-subunit`
+
+----
+
+Example:
+
+```python
+import time
+import unittest
+
+from concurrencytest import ConcurrentTestSuite, fork_for_tests
+
+
+class SampleTestCase(unittest.TestCase):
+    """Dummy tests that sleep for demo."""
+
+    def test_me_1(self):
+        time.sleep(0.5)
+
+    def test_me_2(self):
+        time.sleep(0.5)
+
+    def test_me_3(self):
+        time.sleep(0.5)
+
+    def test_me_4(self):
+        time.sleep(0.5)
+
+
+# Load tests from SampleTestCase defined above
+suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
+runner = unittest.TextTestRunner()
+
+# Run tests sequentially
+runner.run(suite)
+
+# Run same tests across 4 processes
+suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
+concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
+runner.run(concurrent_suite)
+```
+Output:
+
+```
+....
+----------------------------------------------------------------------
+Ran 4 tests in 2.003s
+
+OK
+....
+----------------------------------------------------------------------
+Ran 4 tests in 0.504s
+
+OK
+```
diff --git a/tools/u-boot-tools/concurrencytest/concurrencytest.py b/tools/u-boot-tools/concurrencytest/concurrencytest.py
new file mode 100644
index 0000000000000000000000000000000000000000..418d7eed21d1b4583df5c41b8dc03d245aa16806
--- /dev/null
+++ b/tools/u-boot-tools/concurrencytest/concurrencytest.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Modified by: Corey Goldberg, 2013
+#
+# Original code from:
+#   Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
+#   Copyright (C) 2005-2011 Canonical Ltd
+
+"""Python testtools extension for running unittest suites concurrently.
+
+The `testtools` project provides a ConcurrentTestSuite class, but does
+not provide a `make_tests` implementation needed to use it.
+
+This allows you to parallelize a test run across a configurable number
+of worker processes. While this can speed up CPU-bound test runs, it is
+mainly useful for IO-bound tests that spend most of their time waiting for
+data to arrive from someplace else and can benefit from cocncurrency.
+
+Unix only.
+"""
+
+import os
+import sys
+import traceback
+import unittest
+from itertools import cycle
+from multiprocessing import cpu_count
+
+from subunit import ProtocolTestCase, TestProtocolClient
+from subunit.test_results import AutoTimingTestResultDecorator
+
+from testtools import ConcurrentTestSuite, iterate_tests
+
+
+_all__ = [
+    'ConcurrentTestSuite',
+    'fork_for_tests',
+    'partition_tests',
+]
+
+
+CPU_COUNT = cpu_count()
+
+
+def fork_for_tests(concurrency_num=CPU_COUNT):
+    """Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
+
+    :param concurrency_num: number of processes to use.
+    """
+    def do_fork(suite):
+        """Take suite and start up multiple runners by forking (Unix only).
+
+        :param suite: TestSuite object.
+
+        :return: An iterable of TestCase-like objects which can each have
+        run(result) called on them to feed tests to result.
+        """
+        result = []
+        test_blocks = partition_tests(suite, concurrency_num)
+        # Clear the tests from the original suite so it doesn't keep them alive
+        suite._tests[:] = []
+        for process_tests in test_blocks:
+            process_suite = unittest.TestSuite(process_tests)
+            # Also clear each split list so new suite has only reference
+            process_tests[:] = []
+            c2pread, c2pwrite = os.pipe()
+            pid = os.fork()
+            if pid == 0:
+                try:
+                    stream = os.fdopen(c2pwrite, 'wb', 1)
+                    os.close(c2pread)
+                    # Leave stderr and stdout open so we can see test noise
+                    # Close stdin so that the child goes away if it decides to
+                    # read from stdin (otherwise its a roulette to see what
+                    # child actually gets keystrokes for pdb etc).
+                    sys.stdin.close()
+                    subunit_result = AutoTimingTestResultDecorator(
+                        TestProtocolClient(stream)
+                    )
+                    process_suite.run(subunit_result)
+                except:
+                    # Try and report traceback on stream, but exit with error
+                    # even if stream couldn't be created or something else
+                    # goes wrong.  The traceback is formatted to a string and
+                    # written in one go to avoid interleaving lines from
+                    # multiple failing children.
+                    try:
+                        stream.write(traceback.format_exc())
+                    finally:
+                        os._exit(1)
+                os._exit(0)
+            else:
+                os.close(c2pwrite)
+                stream = os.fdopen(c2pread, 'rb', 1)
+                test = ProtocolTestCase(stream)
+                result.append(test)
+        return result
+    return do_fork
+
+
+def partition_tests(suite, count):
+    """Partition suite into count lists of tests."""
+    # This just assigns tests in a round-robin fashion.  On one hand this
+    # splits up blocks of related tests that might run faster if they shared
+    # resources, but on the other it avoids assigning blocks of slow tests to
+    # just one partition.  So the slowest partition shouldn't be much slower
+    # than the fastest.
+    partitions = [list() for _ in range(count)]
+    tests = iterate_tests(suite)
+    for partition, test in zip(cycle(partitions), tests):
+        partition.append(test)
+    return partitions
+
+
+if __name__ == '__main__':
+    import time
+
+    class SampleTestCase(unittest.TestCase):
+        """Dummy tests that sleep for demo."""
+
+        def test_me_1(self):
+            time.sleep(0.5)
+
+        def test_me_2(self):
+            time.sleep(0.5)
+
+        def test_me_3(self):
+            time.sleep(0.5)
+
+        def test_me_4(self):
+            time.sleep(0.5)
+
+    # Load tests from SampleTestCase defined above
+    suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
+    runner = unittest.TextTestRunner()
+
+    # Run tests sequentially
+    runner.run(suite)
+
+    # Run same tests across 4 processes
+    suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
+    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
+    runner.run(concurrent_suite)
diff --git a/tools/u-boot-tools/default_image.c b/tools/u-boot-tools/default_image.c
new file mode 100644
index 0000000000000000000000000000000000000000..4b7d1ed4a1a5247d72a1f73c65466c2daac486e9
--- /dev/null
+++ b/tools/u-boot-tools/default_image.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2004
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ *
+ * Updated-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *		default_image specific code abstracted from mkimage.c
+ *		some functions added to address abstraction
+ *
+ * All rights reserved.
+ */
+
+#include "imagetool.h"
+#include "mkimage.h"
+
+#include <image.h>
+#include <tee/optee.h>
+#include <u-boot/crc.h>
+
+static image_header_t header;
+
+static int image_check_image_types(uint8_t type)
+{
+	if (((type > IH_TYPE_INVALID) && (type < IH_TYPE_FLATDT)) ||
+	    (type == IH_TYPE_KERNEL_NOLOAD) || (type == IH_TYPE_FIRMWARE_IVT))
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int image_check_params(struct image_tool_params *params)
+{
+	return	((params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)));
+}
+
+static int image_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	uint32_t len;
+	const unsigned char *data;
+	uint32_t checksum;
+	image_header_t header;
+	image_header_t *hdr = &header;
+
+	/*
+	 * create copy of header so that we can blank out the
+	 * checksum field for checking - this can't be done
+	 * on the PROT_READ mapped data.
+	 */
+	memcpy(hdr, ptr, sizeof(image_header_t));
+
+	if (be32_to_cpu(hdr->ih_magic) != IH_MAGIC) {
+		debug("%s: Bad Magic Number: \"%s\" is no valid image\n",
+		      params->cmdname, params->imagefile);
+		return -FDT_ERR_BADMAGIC;
+	}
+
+	data = (const unsigned char *)hdr;
+	len  = sizeof(image_header_t);
+
+	checksum = be32_to_cpu(hdr->ih_hcrc);
+	hdr->ih_hcrc = cpu_to_be32(0);	/* clear for re-calculation */
+
+	if (crc32(0, data, len) != checksum) {
+		debug("%s: ERROR: \"%s\" has bad header checksum!\n",
+		      params->cmdname, params->imagefile);
+		return -FDT_ERR_BADSTATE;
+	}
+
+	data = (const unsigned char *)ptr + sizeof(image_header_t);
+	len  = image_size - sizeof(image_header_t) ;
+
+	checksum = be32_to_cpu(hdr->ih_dcrc);
+	if (crc32(0, data, len) != checksum) {
+		debug("%s: ERROR: \"%s\" has corrupted data!\n",
+		      params->cmdname, params->imagefile);
+		return -FDT_ERR_BADSTRUCTURE;
+	}
+	return 0;
+}
+
+static void image_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	uint32_t checksum;
+	time_t time;
+	uint32_t imagesize;
+	uint32_t ep;
+	uint32_t addr;
+
+	image_header_t * hdr = (image_header_t *)ptr;
+
+	checksum = crc32(0,
+			(const unsigned char *)(ptr +
+				sizeof(image_header_t)),
+			sbuf->st_size - sizeof(image_header_t));
+
+	time = imagetool_get_source_date(params->cmdname, sbuf->st_mtime);
+	ep = params->ep;
+	addr = params->addr;
+
+	if (params->type == IH_TYPE_FIRMWARE_IVT)
+		/* Add size of CSF minus IVT */
+		imagesize = sbuf->st_size - sizeof(image_header_t) + 0x1FE0;
+	else
+		imagesize = sbuf->st_size - sizeof(image_header_t);
+
+	if (params->os == IH_OS_TEE) {
+		addr = optee_image_get_load_addr(hdr);
+		ep = optee_image_get_entry_point(hdr);
+	}
+
+	/* Build new header */
+	image_set_magic(hdr, IH_MAGIC);
+	image_set_time(hdr, time);
+	image_set_size(hdr, imagesize);
+	image_set_load(hdr, addr);
+	image_set_ep(hdr, ep);
+	image_set_dcrc(hdr, checksum);
+	image_set_os(hdr, params->os);
+	image_set_arch(hdr, params->arch);
+	image_set_type(hdr, params->type);
+	image_set_comp(hdr, params->comp);
+
+	image_set_name(hdr, params->imagename);
+
+	checksum = crc32(0, (const unsigned char *)hdr,
+				sizeof(image_header_t));
+
+	image_set_hcrc(hdr, checksum);
+}
+
+static int image_extract_subimage(void *ptr, struct image_tool_params *params)
+{
+	const image_header_t *hdr = (const image_header_t *)ptr;
+	ulong file_data;
+	ulong file_len;
+
+	if (image_check_type(hdr, IH_TYPE_MULTI)) {
+		ulong idx = params->pflag;
+		ulong count;
+
+		/* get the number of data files present in the image */
+		count = image_multi_count(hdr);
+
+		/* retrieve the "data file" at the idx position */
+		image_multi_getimg(hdr, idx, &file_data, &file_len);
+
+		if ((file_len == 0) || (idx >= count)) {
+			fprintf(stderr, "%s: No such data file %ld in \"%s\"\n",
+				params->cmdname, idx, params->imagefile);
+			return -1;
+		}
+	} else {
+		file_data = image_get_data(hdr);
+		file_len = image_get_size(hdr);
+	}
+
+	/* save the "data file" into the file system */
+	return imagetool_save_subimage(params->outfile, file_data, file_len);
+}
+
+/*
+ * Default image type parameters definition
+ */
+U_BOOT_IMAGE_TYPE(
+	defimage,
+	"Default Image support",
+	sizeof(image_header_t),
+	(void *)&header,
+	image_check_params,
+	image_verify_header,
+	image_print_contents,
+	image_set_header,
+	image_extract_subimage,
+	image_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/default_image.o b/tools/u-boot-tools/default_image.o
new file mode 100644
index 0000000000000000000000000000000000000000..d9aced3057fce265c044cc9f6d9094a708931a8a
Binary files /dev/null and b/tools/u-boot-tools/default_image.o differ
diff --git a/tools/u-boot-tools/dtoc/.gitignore b/tools/u-boot-tools/dtoc/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/u-boot-tools/dtoc/dtb_platdata.py b/tools/u-boot-tools/dtoc/dtb_platdata.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cb125944660ca090e224b35b94a4bbf9c4981bf
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtb_platdata.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2017 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+"""Device tree to platform data class
+
+This supports converting device tree data to C structures definitions and
+static data.
+"""
+
+import collections
+import copy
+import sys
+
+import fdt
+import fdt_util
+
+# When we see these properties we ignore them - i.e. do not create a structure member
+PROP_IGNORE_LIST = [
+    '#address-cells',
+    '#gpio-cells',
+    '#size-cells',
+    'compatible',
+    'linux,phandle',
+    "status",
+    'phandle',
+    'u-boot,dm-pre-reloc',
+    'u-boot,dm-tpl',
+    'u-boot,dm-spl',
+]
+
+# C type declarations for the tyues we support
+TYPE_NAMES = {
+    fdt.TYPE_INT: 'fdt32_t',
+    fdt.TYPE_BYTE: 'unsigned char',
+    fdt.TYPE_STRING: 'const char *',
+    fdt.TYPE_BOOL: 'bool',
+    fdt.TYPE_INT64: 'fdt64_t',
+}
+
+STRUCT_PREFIX = 'dtd_'
+VAL_PREFIX = 'dtv_'
+
+# This holds information about a property which includes phandles.
+#
+# max_args: integer: Maximum number or arguments that any phandle uses (int).
+# args: Number of args for each phandle in the property. The total number of
+#     phandles is len(args). This is a list of integers.
+PhandleInfo = collections.namedtuple('PhandleInfo', ['max_args', 'args'])
+
+
+def conv_name_to_c(name):
+    """Convert a device-tree name to a C identifier
+
+    This uses multiple replace() calls instead of re.sub() since it is faster
+    (400ms for 1m calls versus 1000ms for the 're' version).
+
+    Args:
+        name:   Name to convert
+    Return:
+        String containing the C version of this name
+    """
+    new = name.replace('@', '_at_')
+    new = new.replace('-', '_')
+    new = new.replace(',', '_')
+    new = new.replace('.', '_')
+    return new
+
+def tab_to(num_tabs, line):
+    """Append tabs to a line of text to reach a tab stop.
+
+    Args:
+        num_tabs: Tab stop to obtain (0 = column 0, 1 = column 8, etc.)
+        line: Line of text to append to
+
+    Returns:
+        line with the correct number of tabs appeneded. If the line already
+        extends past that tab stop then a single space is appended.
+    """
+    if len(line) >= num_tabs * 8:
+        return line + ' '
+    return line + '\t' * (num_tabs - len(line) // 8)
+
+def get_value(ftype, value):
+    """Get a value as a C expression
+
+    For integers this returns a byte-swapped (little-endian) hex string
+    For bytes this returns a hex string, e.g. 0x12
+    For strings this returns a literal string enclosed in quotes
+    For booleans this return 'true'
+
+    Args:
+        type: Data type (fdt_util)
+        value: Data value, as a string of bytes
+    """
+    if ftype == fdt.TYPE_INT:
+        return '%#x' % fdt_util.fdt32_to_cpu(value)
+    elif ftype == fdt.TYPE_BYTE:
+        return '%#x' % ord(value[0])
+    elif ftype == fdt.TYPE_STRING:
+        return '"%s"' % value
+    elif ftype == fdt.TYPE_BOOL:
+        return 'true'
+    elif ftype == fdt.TYPE_INT64:
+        return '%#x' % value
+
+def get_compat_name(node):
+    """Get a node's first compatible string as a C identifier
+
+    Args:
+        node: Node object to check
+    Return:
+        Tuple:
+            C identifier for the first compatible string
+            List of C identifiers for all the other compatible strings
+                (possibly empty)
+    """
+    compat = node.props['compatible'].value
+    aliases = []
+    if isinstance(compat, list):
+        compat, aliases = compat[0], compat[1:]
+    return conv_name_to_c(compat), [conv_name_to_c(a) for a in aliases]
+
+
+class DtbPlatdata(object):
+    """Provide a means to convert device tree binary data to platform data
+
+    The output of this process is C structures which can be used in space-
+    constrained encvironments where the ~3KB code overhead of device tree
+    code is not affordable.
+
+    Properties:
+        _fdt: Fdt object, referencing the device tree
+        _dtb_fname: Filename of the input device tree binary file
+        _valid_nodes: A list of Node object with compatible strings
+        _include_disabled: true to include nodes marked status = "disabled"
+        _outfile: The current output file (sys.stdout or a real file)
+        _lines: Stashed list of output lines for outputting in the future
+    """
+    def __init__(self, dtb_fname, include_disabled):
+        self._fdt = None
+        self._dtb_fname = dtb_fname
+        self._valid_nodes = None
+        self._include_disabled = include_disabled
+        self._outfile = None
+        self._lines = []
+        self._aliases = {}
+
+    def setup_output(self, fname):
+        """Set up the output destination
+
+        Once this is done, future calls to self.out() will output to this
+        file.
+
+        Args:
+            fname: Filename to send output to, or '-' for stdout
+        """
+        if fname == '-':
+            self._outfile = sys.stdout
+        else:
+            self._outfile = open(fname, 'w')
+
+    def out(self, line):
+        """Output a string to the output file
+
+        Args:
+            line: String to output
+        """
+        self._outfile.write(line)
+
+    def buf(self, line):
+        """Buffer up a string to send later
+
+        Args:
+            line: String to add to our 'buffer' list
+        """
+        self._lines.append(line)
+
+    def get_buf(self):
+        """Get the contents of the output buffer, and clear it
+
+        Returns:
+            The output buffer, which is then cleared for future use
+        """
+        lines = self._lines
+        self._lines = []
+        return lines
+
+    def out_header(self):
+        """Output a message indicating that this is an auto-generated file"""
+        self.out('''/*
+ * DO NOT MODIFY
+ *
+ * This file was generated by dtoc from a .dtb (device tree binary) file.
+ */
+
+''')
+
+    def get_phandle_argc(self, prop, node_name):
+        """Check if a node contains phandles
+
+        We have no reliable way of detecting whether a node uses a phandle
+        or not. As an interim measure, use a list of known property names.
+
+        Args:
+            prop: Prop object to check
+        Return:
+            Number of argument cells is this is a phandle, else None
+        """
+        if prop.name in ['clocks']:
+            if not isinstance(prop.value, list):
+                prop.value = [prop.value]
+            val = prop.value
+            i = 0
+
+            max_args = 0
+            args = []
+            while i < len(val):
+                phandle = fdt_util.fdt32_to_cpu(val[i])
+                # If we get to the end of the list, stop. This can happen
+                # since some nodes have more phandles in the list than others,
+                # but we allocate enough space for the largest list. So those
+                # nodes with shorter lists end up with zeroes at the end.
+                if not phandle:
+                    break
+                target = self._fdt.phandle_to_node.get(phandle)
+                if not target:
+                    raise ValueError("Cannot parse '%s' in node '%s'" %
+                                     (prop.name, node_name))
+                prop_name = '#clock-cells'
+                cells = target.props.get(prop_name)
+                if not cells:
+                    raise ValueError("Node '%s' has no '%s' property" %
+                            (target.name, prop_name))
+                num_args = fdt_util.fdt32_to_cpu(cells.value)
+                max_args = max(max_args, num_args)
+                args.append(num_args)
+                i += 1 + num_args
+            return PhandleInfo(max_args, args)
+        return None
+
+    def scan_dtb(self):
+        """Scan the device tree to obtain a tree of nodes and properties
+
+        Once this is done, self._fdt.GetRoot() can be called to obtain the
+        device tree root node, and progress from there.
+        """
+        self._fdt = fdt.FdtScan(self._dtb_fname)
+
+    def scan_node(self, root):
+        """Scan a node and subnodes to build a tree of node and phandle info
+
+        This adds each node to self._valid_nodes.
+
+        Args:
+            root: Root node for scan
+        """
+        for node in root.subnodes:
+            if 'compatible' in node.props:
+                status = node.props.get('status')
+                if (not self._include_disabled and not status or
+                        status.value != 'disabled'):
+                    self._valid_nodes.append(node)
+
+            # recurse to handle any subnodes
+            self.scan_node(node)
+
+    def scan_tree(self):
+        """Scan the device tree for useful information
+
+        This fills in the following properties:
+            _valid_nodes: A list of nodes we wish to consider include in the
+                platform data
+        """
+        self._valid_nodes = []
+        return self.scan_node(self._fdt.GetRoot())
+
+    @staticmethod
+    def get_num_cells(node):
+        """Get the number of cells in addresses and sizes for this node
+
+        Args:
+            node: Node to check
+
+        Returns:
+            Tuple:
+                Number of address cells for this node
+                Number of size cells for this node
+        """
+        parent = node.parent
+        na, ns = 2, 2
+        if parent:
+            na_prop = parent.props.get('#address-cells')
+            ns_prop = parent.props.get('#size-cells')
+            if na_prop:
+                na = fdt_util.fdt32_to_cpu(na_prop.value)
+            if ns_prop:
+                ns = fdt_util.fdt32_to_cpu(ns_prop.value)
+        return na, ns
+
+    def scan_reg_sizes(self):
+        """Scan for 64-bit 'reg' properties and update the values
+
+        This finds 'reg' properties with 64-bit data and converts the value to
+        an array of 64-values. This allows it to be output in a way that the
+        C code can read.
+        """
+        for node in self._valid_nodes:
+            reg = node.props.get('reg')
+            if not reg:
+                continue
+            na, ns = self.get_num_cells(node)
+            total = na + ns
+
+            if reg.type != fdt.TYPE_INT:
+                raise ValueError("Node '%s' reg property is not an int" %
+                                 node.name)
+            if len(reg.value) % total:
+                raise ValueError("Node '%s' reg property has %d cells "
+                        'which is not a multiple of na + ns = %d + %d)' %
+                        (node.name, len(reg.value), na, ns))
+            reg.na = na
+            reg.ns = ns
+            if na != 1 or ns != 1:
+                reg.type = fdt.TYPE_INT64
+                i = 0
+                new_value = []
+                val = reg.value
+                if not isinstance(val, list):
+                    val = [val]
+                while i < len(val):
+                    addr = fdt_util.fdt_cells_to_cpu(val[i:], reg.na)
+                    i += na
+                    size = fdt_util.fdt_cells_to_cpu(val[i:], reg.ns)
+                    i += ns
+                    new_value += [addr, size]
+                reg.value = new_value
+
+    def scan_structs(self):
+        """Scan the device tree building up the C structures we will use.
+
+        Build a dict keyed by C struct name containing a dict of Prop
+        object for each struct field (keyed by property name). Where the
+        same struct appears multiple times, try to use the 'widest'
+        property, i.e. the one with a type which can express all others.
+
+        Once the widest property is determined, all other properties are
+        updated to match that width.
+        """
+        structs = {}
+        for node in self._valid_nodes:
+            node_name, _ = get_compat_name(node)
+            fields = {}
+
+            # Get a list of all the valid properties in this node.
+            for name, prop in node.props.items():
+                if name not in PROP_IGNORE_LIST and name[0] != '#':
+                    fields[name] = copy.deepcopy(prop)
+
+            # If we've seen this node_name before, update the existing struct.
+            if node_name in structs:
+                struct = structs[node_name]
+                for name, prop in fields.items():
+                    oldprop = struct.get(name)
+                    if oldprop:
+                        oldprop.Widen(prop)
+                    else:
+                        struct[name] = prop
+
+            # Otherwise store this as a new struct.
+            else:
+                structs[node_name] = fields
+
+        upto = 0
+        for node in self._valid_nodes:
+            node_name, _ = get_compat_name(node)
+            struct = structs[node_name]
+            for name, prop in node.props.items():
+                if name not in PROP_IGNORE_LIST and name[0] != '#':
+                    prop.Widen(struct[name])
+            upto += 1
+
+            struct_name, aliases = get_compat_name(node)
+            for alias in aliases:
+                self._aliases[alias] = struct_name
+
+        return structs
+
+    def scan_phandles(self):
+        """Figure out what phandles each node uses
+
+        We need to be careful when outputing nodes that use phandles since
+        they must come after the declaration of the phandles in the C file.
+        Otherwise we get a compiler error since the phandle struct is not yet
+        declared.
+
+        This function adds to each node a list of phandle nodes that the node
+        depends on. This allows us to output things in the right order.
+        """
+        for node in self._valid_nodes:
+            node.phandles = set()
+            for pname, prop in node.props.items():
+                if pname in PROP_IGNORE_LIST or pname[0] == '#':
+                    continue
+                info = self.get_phandle_argc(prop, node.name)
+                if info:
+                    # Process the list as pairs of (phandle, id)
+                    pos = 0
+                    for args in info.args:
+                        phandle_cell = prop.value[pos]
+                        phandle = fdt_util.fdt32_to_cpu(phandle_cell)
+                        target_node = self._fdt.phandle_to_node[phandle]
+                        node.phandles.add(target_node)
+                        pos += 1 + args
+
+
+    def generate_structs(self, structs):
+        """Generate struct defintions for the platform data
+
+        This writes out the body of a header file consisting of structure
+        definitions for node in self._valid_nodes. See the documentation in
+        README.of-plat for more information.
+        """
+        self.out_header()
+        self.out('#include <stdbool.h>\n')
+        self.out('#include <linux/libfdt.h>\n')
+
+        # Output the struct definition
+        for name in sorted(structs):
+            self.out('struct %s%s {\n' % (STRUCT_PREFIX, name))
+            for pname in sorted(structs[name]):
+                prop = structs[name][pname]
+                info = self.get_phandle_argc(prop, structs[name])
+                if info:
+                    # For phandles, include a reference to the target
+                    struct_name = 'struct phandle_%d_arg' % info.max_args
+                    self.out('\t%s%s[%d]' % (tab_to(2, struct_name),
+                                             conv_name_to_c(prop.name),
+                                             len(info.args)))
+                else:
+                    ptype = TYPE_NAMES[prop.type]
+                    self.out('\t%s%s' % (tab_to(2, ptype),
+                                         conv_name_to_c(prop.name)))
+                    if isinstance(prop.value, list):
+                        self.out('[%d]' % len(prop.value))
+                self.out(';\n')
+            self.out('};\n')
+
+        for alias, struct_name in self._aliases.iteritems():
+            self.out('#define %s%s %s%s\n'% (STRUCT_PREFIX, alias,
+                                             STRUCT_PREFIX, struct_name))
+
+    def output_node(self, node):
+        """Output the C code for a node
+
+        Args:
+            node: node to output
+        """
+        struct_name, _ = get_compat_name(node)
+        var_name = conv_name_to_c(node.name)
+        self.buf('static struct %s%s %s%s = {\n' %
+                 (STRUCT_PREFIX, struct_name, VAL_PREFIX, var_name))
+        for pname, prop in node.props.items():
+            if pname in PROP_IGNORE_LIST or pname[0] == '#':
+                continue
+            member_name = conv_name_to_c(prop.name)
+            self.buf('\t%s= ' % tab_to(3, '.' + member_name))
+
+            # Special handling for lists
+            if isinstance(prop.value, list):
+                self.buf('{')
+                vals = []
+                # For phandles, output a reference to the platform data
+                # of the target node.
+                info = self.get_phandle_argc(prop, node.name)
+                if info:
+                    # Process the list as pairs of (phandle, id)
+                    pos = 0
+                    for args in info.args:
+                        phandle_cell = prop.value[pos]
+                        phandle = fdt_util.fdt32_to_cpu(phandle_cell)
+                        target_node = self._fdt.phandle_to_node[phandle]
+                        name = conv_name_to_c(target_node.name)
+                        arg_values = []
+                        for i in range(args):
+                            arg_values.append(str(fdt_util.fdt32_to_cpu(prop.value[pos + 1 + i])))
+                        pos += 1 + args
+                        vals.append('\t{&%s%s, {%s}}' % (VAL_PREFIX, name,
+                                                     ', '.join(arg_values)))
+                    for val in vals:
+                        self.buf('\n\t\t%s,' % val)
+                else:
+                    for val in prop.value:
+                        vals.append(get_value(prop.type, val))
+
+                    # Put 8 values per line to avoid very long lines.
+                    for i in xrange(0, len(vals), 8):
+                        if i:
+                            self.buf(',\n\t\t')
+                        self.buf(', '.join(vals[i:i + 8]))
+                self.buf('}')
+            else:
+                self.buf(get_value(prop.type, prop.value))
+            self.buf(',\n')
+        self.buf('};\n')
+
+        # Add a device declaration
+        self.buf('U_BOOT_DEVICE(%s) = {\n' % var_name)
+        self.buf('\t.name\t\t= "%s",\n' % struct_name)
+        self.buf('\t.platdata\t= &%s%s,\n' % (VAL_PREFIX, var_name))
+        self.buf('\t.platdata_size\t= sizeof(%s%s),\n' % (VAL_PREFIX, var_name))
+        self.buf('};\n')
+        self.buf('\n')
+
+        self.out(''.join(self.get_buf()))
+
+    def generate_tables(self):
+        """Generate device defintions for the platform data
+
+        This writes out C platform data initialisation data and
+        U_BOOT_DEVICE() declarations for each valid node. Where a node has
+        multiple compatible strings, a #define is used to make them equivalent.
+
+        See the documentation in doc/driver-model/of-plat.txt for more
+        information.
+        """
+        self.out_header()
+        self.out('#include <common.h>\n')
+        self.out('#include <dm.h>\n')
+        self.out('#include <dt-structs.h>\n')
+        self.out('\n')
+        nodes_to_output = list(self._valid_nodes)
+
+        # Keep outputing nodes until there is none left
+        while nodes_to_output:
+            node = nodes_to_output[0]
+            # Output all the node's dependencies first
+            for req_node in node.phandles:
+                if req_node in nodes_to_output:
+                    self.output_node(req_node)
+                    nodes_to_output.remove(req_node)
+            self.output_node(node)
+            nodes_to_output.remove(node)
+
+
+def run_steps(args, dtb_file, include_disabled, output):
+    """Run all the steps of the dtoc tool
+
+    Args:
+        args: List of non-option arguments provided to the problem
+        dtb_file: Filename of dtb file to process
+        include_disabled: True to include disabled nodes
+        output: Name of output file
+    """
+    if not args:
+        raise ValueError('Please specify a command: struct, platdata')
+
+    plat = DtbPlatdata(dtb_file, include_disabled)
+    plat.scan_dtb()
+    plat.scan_tree()
+    plat.scan_reg_sizes()
+    plat.setup_output(output)
+    structs = plat.scan_structs()
+    plat.scan_phandles()
+
+    for cmd in args[0].split(','):
+        if cmd == 'struct':
+            plat.generate_structs(structs)
+        elif cmd == 'platdata':
+            plat.generate_tables()
+        else:
+            raise ValueError("Unknown command '%s': (use: struct, platdata)" %
+                             cmd)
diff --git a/tools/u-boot-tools/dtoc/dtoc b/tools/u-boot-tools/dtoc/dtoc
new file mode 120000
index 0000000000000000000000000000000000000000..896ca44e62f76ce1117198f7b20c30bcca8d3326
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc
@@ -0,0 +1 @@
+dtoc.py
\ No newline at end of file
diff --git a/tools/u-boot-tools/dtoc/dtoc.py b/tools/u-boot-tools/dtoc/dtoc.py
new file mode 100755
index 0000000000000000000000000000000000000000..2277af9bf78b09248340c0cd6ed8c2be0b022d2d
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+"""Device tree to C tool
+
+This tool converts a device tree binary file (.dtb) into two C files. The
+indent is to allow a C program to access data from the device tree without
+having to link against libfdt. By putting the data from the device tree into
+C structures, normal C code can be used. This helps to reduce the size of the
+compiled program.
+
+Dtoc produces two output files:
+
+   dt-structs.h  - contains struct definitions
+   dt-platdata.c - contains data from the device tree using the struct
+                      definitions, as well as U-Boot driver definitions.
+
+This tool is used in U-Boot to provide device tree data to SPL without
+increasing the code size of SPL. This supports the CONFIG_SPL_OF_PLATDATA
+options. For more information about the use of this options and tool please
+see doc/driver-model/of-plat.txt
+"""
+
+from optparse import OptionParser
+import os
+import sys
+import unittest
+
+# Bring in the patman libraries
+our_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(our_path, '../patman'))
+
+# Bring in the libfdt module
+sys.path.insert(0, 'scripts/dtc/pylibfdt')
+sys.path.insert(0, os.path.join(our_path,
+                '../../build-sandbox_spl/scripts/dtc/pylibfdt'))
+
+import dtb_platdata
+import test_util
+
+def run_tests(args):
+    """Run all the test we have for dtoc
+
+    Args:
+        args: List of positional args provided to dtoc. This can hold a test
+            name to execute (as in 'dtoc -t test_empty_file', for example)
+    """
+    import test_dtoc
+
+    result = unittest.TestResult()
+    sys.argv = [sys.argv[0]]
+    test_name = args and args[0] or None
+    for module in (test_dtoc.TestDtoc,):
+        if test_name:
+            try:
+                suite = unittest.TestLoader().loadTestsFromName(test_name, module)
+            except AttributeError:
+                continue
+        else:
+            suite = unittest.TestLoader().loadTestsFromTestCase(module)
+        suite.run(result)
+
+    print result
+    for _, err in result.errors:
+        print err
+    for _, err in result.failures:
+        print err
+
+def RunTestCoverage():
+    """Run the tests and check that we get 100% coverage"""
+    sys.argv = [sys.argv[0]]
+    test_util.RunTestCoverage('tools/dtoc/dtoc.py', '/dtoc.py',
+            ['tools/patman/*.py', '*/fdt*', '*test*'], options.build_dir)
+
+
+if __name__ != '__main__':
+    sys.exit(1)
+
+parser = OptionParser()
+parser.add_option('-B', '--build-dir', type='string', default='b',
+        help='Directory containing the build output')
+parser.add_option('-d', '--dtb-file', action='store',
+                  help='Specify the .dtb input file')
+parser.add_option('--include-disabled', action='store_true',
+                  help='Include disabled nodes')
+parser.add_option('-o', '--output', action='store', default='-',
+                  help='Select output filename')
+parser.add_option('-P', '--processes', type=int,
+                  help='set number of processes to use for running tests')
+parser.add_option('-t', '--test', action='store_true', dest='test',
+                  default=False, help='run tests')
+parser.add_option('-T', '--test-coverage', action='store_true',
+                default=False, help='run tests and check for 100% coverage')
+(options, args) = parser.parse_args()
+
+# Run our meagre tests
+if options.test:
+    run_tests(args)
+
+elif options.test_coverage:
+    RunTestCoverage()
+
+else:
+    dtb_platdata.run_steps(args, options.dtb_file, options.include_disabled,
+                           options.output)
diff --git a/tools/u-boot-tools/dtoc/dtoc_test.dts b/tools/u-boot-tools/dtoc/dtoc_test.dts
new file mode 100644
index 0000000000000000000000000000000000000000..b2259483a6b784f76c7799b337e21c5f71ef5bb9
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_add_prop.dts b/tools/u-boot-tools/dtoc/dtoc_test_add_prop.dts
new file mode 100644
index 0000000000000000000000000000000000000000..fa296e555278f5cafa4f84e59c56c9c16df13d79
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_add_prop.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	spl-test {
+		u-boot,dm-pre-reloc;
+		compatible = "sandbox,spl-test";
+		intval = <1>;
+	};
+
+	spl-test2 {
+		u-boot,dm-pre-reloc;
+		compatible = "sandbox,spl-test";
+		intarray = <5>;
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_addr32.dts b/tools/u-boot-tools/dtoc/dtoc_test_addr32.dts
new file mode 100644
index 0000000000000000000000000000000000000000..239045497c6e343f2a9a36f55cd3206738c6f401
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_addr32.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	test1 {
+		u-boot,dm-pre-reloc;
+		compatible = "test1";
+		reg = <0x1234 0x5678>;
+	};
+
+	test2 {
+		u-boot,dm-pre-reloc;
+		compatible = "test2";
+		reg = <0x12345678 0x98765432 2 3>;
+	};
+
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_addr32_64.dts b/tools/u-boot-tools/dtoc/dtoc_test_addr32_64.dts
new file mode 100644
index 0000000000000000000000000000000000000000..7599d5b0a593a403f817d6879979d75fda65d060
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_addr32_64.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <2>;
+
+	test1 {
+		u-boot,dm-pre-reloc;
+		compatible = "test1";
+		reg = <0x1234 0x5678 0x0>;
+	};
+
+	test2 {
+		u-boot,dm-pre-reloc;
+		compatible = "test2";
+		reg = <0x12345678 0x98765432 0x10987654>;
+	};
+
+	test3 {
+		u-boot,dm-pre-reloc;
+		compatible = "test3";
+		reg = <0x12345678 0x98765432 0x10987654 2 0 3>;
+	};
+
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_addr64.dts b/tools/u-boot-tools/dtoc/dtoc_test_addr64.dts
new file mode 100644
index 0000000000000000000000000000000000000000..263d2513869b06d3cef50e7b06fab53dacddb08b
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_addr64.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	test1 {
+		u-boot,dm-pre-reloc;
+		compatible = "test1";
+		reg = /bits/ 64 <0x1234 0x5678>;
+	};
+
+	test2 {
+		u-boot,dm-pre-reloc;
+		compatible = "test2";
+		reg = /bits/ 64 <0x1234567890123456 0x9876543210987654>;
+	};
+
+	test3 {
+		u-boot,dm-pre-reloc;
+		compatible = "test3";
+		reg = /bits/ 64 <0x1234567890123456 0x9876543210987654 2 3>;
+	};
+
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_addr64_32.dts b/tools/u-boot-tools/dtoc/dtoc_test_addr64_32.dts
new file mode 100644
index 0000000000000000000000000000000000000000..85e4f5fdaeb40a854196b8e1d22e6ee648035d2a
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_addr64_32.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <1>;
+
+	test1 {
+		u-boot,dm-pre-reloc;
+		compatible = "test1";
+		reg = <0x1234 0x0 0x5678>;
+	};
+
+	test2 {
+		u-boot,dm-pre-reloc;
+		compatible = "test2";
+		reg = <0x12345678 0x90123456 0x98765432>;
+	};
+
+	test3 {
+		u-boot,dm-pre-reloc;
+		compatible = "test3";
+		reg = <0x12345678 0x90123456 0x98765432 0 2 3>;
+	};
+
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_aliases.dts b/tools/u-boot-tools/dtoc/dtoc_test_aliases.dts
new file mode 100644
index 0000000000000000000000000000000000000000..e545816f4e5593d0aefbfcd324c9f64617a12fdc
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_aliases.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+	spl-test {
+		u-boot,dm-pre-reloc;
+		compatible = "compat1", "compat2.1-fred", "compat3";
+		intval = <1>;
+	};
+
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_bad_reg.dts b/tools/u-boot-tools/dtoc/dtoc_test_bad_reg.dts
new file mode 100644
index 0000000000000000000000000000000000000000..1312acb619b68969cc9ef90d1805021c29371863
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_bad_reg.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	spl-test {
+	compatible = "test";
+		reg = "fre";
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_bad_reg2.dts b/tools/u-boot-tools/dtoc/dtoc_test_bad_reg2.dts
new file mode 100644
index 0000000000000000000000000000000000000000..3e9efa43af1632e31b628198deaab43f54435c57
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_bad_reg2.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	spl-test {
+	compatible = "test";
+		reg = <1 2 3>;
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_empty.dts b/tools/u-boot-tools/dtoc/dtoc_test_empty.dts
new file mode 100644
index 0000000000000000000000000000000000000000..b2259483a6b784f76c7799b337e21c5f71ef5bb9
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_empty.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_phandle.dts b/tools/u-boot-tools/dtoc/dtoc_test_phandle.dts
new file mode 100644
index 0000000000000000000000000000000000000000..a71acffc698d3e590dead59a4f36c5e3c580d8c3
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_phandle.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+	phandle: phandle-target {
+		u-boot,dm-pre-reloc;
+		compatible = "target";
+		intval = <0>;
+                #clock-cells = <0>;
+	};
+
+	phandle_1: phandle2-target {
+		u-boot,dm-pre-reloc;
+		compatible = "target";
+		intval = <1>;
+		#clock-cells = <1>;
+	};
+	phandle_2: phandle3-target {
+		u-boot,dm-pre-reloc;
+		compatible = "target";
+		intval = <2>;
+		#clock-cells = <2>;
+	};
+
+	phandle-source {
+		u-boot,dm-pre-reloc;
+		compatible = "source";
+		clocks = <&phandle &phandle_1 11 &phandle_2 12 13 &phandle>;
+	};
+
+	phandle-source2 {
+		u-boot,dm-pre-reloc;
+		compatible = "source";
+		clocks = <&phandle>;
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_phandle_bad.dts b/tools/u-boot-tools/dtoc/dtoc_test_phandle_bad.dts
new file mode 100644
index 0000000000000000000000000000000000000000..a3ddc59585183fab4706aba40c45015209e3158f
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_phandle_bad.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	phandle-source {
+		u-boot,dm-pre-reloc;
+		compatible = "source";
+		clocks = <20>;    /* Invalid phandle */
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_phandle_bad2.dts b/tools/u-boot-tools/dtoc/dtoc_test_phandle_bad2.dts
new file mode 100644
index 0000000000000000000000000000000000000000..fe25f565fbbd88d3e94ac45b5b00da6659691edb
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_phandle_bad2.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	phandle: phandle-target {
+		u-boot,dm-pre-reloc;
+		compatible = "target";
+		intval = <0>;
+	};
+
+	phandle-source2 {
+		u-boot,dm-pre-reloc;
+		compatible = "source";
+		clocks = <&phandle>;
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_phandle_reorder.dts b/tools/u-boot-tools/dtoc/dtoc_test_phandle_reorder.dts
new file mode 100644
index 0000000000000000000000000000000000000000..aa71d56f27cce7ebb4bde1ef1003a7545e1a5d51
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_phandle_reorder.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+
+	phandle-source2 {
+		u-boot,dm-pre-reloc;
+		compatible = "source";
+		clocks = <&phandle>;
+	};
+
+	phandle: phandle-target {
+		u-boot,dm-pre-reloc;
+		compatible = "target";
+		#clock-cells = <0>;
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_phandle_single.dts b/tools/u-boot-tools/dtoc/dtoc_test_phandle_single.dts
new file mode 100644
index 0000000000000000000000000000000000000000..aacd0b15fa1f003b0bf2a15848c3c005cd63f53d
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_phandle_single.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2018 Google, Inc
+ */
+
+/dts-v1/;
+
+/ {
+	phandle: phandle-target {
+		u-boot,dm-pre-reloc;
+		compatible = "target";
+		intval = <0>;
+		#clock-cells = <0>;
+	};
+
+	phandle-source2 {
+		u-boot,dm-pre-reloc;
+		compatible = "source";
+		clocks = <&phandle>;
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/dtoc_test_simple.dts b/tools/u-boot-tools/dtoc/dtoc_test_simple.dts
new file mode 100644
index 0000000000000000000000000000000000000000..165680bd4bf0cc1422b8bf7b38a65ef1c9414d04
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/dtoc_test_simple.dts
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test device tree file for dtoc
+ *
+ * Copyright 2017 Google, Inc
+ */
+
+ /dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	spl-test {
+		u-boot,dm-pre-reloc;
+		compatible = "sandbox,spl-test";
+		boolval;
+		intval = <1>;
+		intarray = <2 3 4>;
+		byteval = [05];
+		bytearray = [06];
+		longbytearray = [09 0a 0b 0c 0d 0e 0f 10 11];
+		stringval = "message";
+		stringarray = "multi-word", "message";
+		notstring = [20 21 22 10 00];
+	};
+
+	spl-test2 {
+		u-boot,dm-pre-reloc;
+		compatible = "sandbox,spl-test";
+		intval = <3>;
+		intarray = <5>;
+		byteval = [08];
+		bytearray = [01 23 34];
+		longbytearray = [09 0a 0b 0c];
+		stringval = "message2";
+		stringarray = "another", "multi-word", "message";
+	};
+
+	spl-test3 {
+		u-boot,dm-pre-reloc;
+		compatible = "sandbox,spl-test";
+		stringarray = "one";
+	};
+
+	spl-test4 {
+		u-boot,dm-pre-reloc;
+		compatible = "sandbox,spl-test.2";
+	};
+
+	i2c@0 {
+		compatible = "sandbox,i2c-test";
+		u-boot,dm-pre-reloc;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pmic@9 {
+			compatible = "sandbox,pmic-test";
+			u-boot,dm-pre-reloc;
+			reg = <9>;
+			low-power;
+		};
+	};
+};
diff --git a/tools/u-boot-tools/dtoc/fdt.py b/tools/u-boot-tools/dtoc/fdt.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ad72f89ec7bffea7ad0dd4effcf2433191075a1
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/fdt.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+import struct
+import sys
+
+import fdt_util
+import libfdt
+from libfdt import QUIET_NOTFOUND
+
+# This deals with a device tree, presenting it as an assortment of Node and
+# Prop objects, representing nodes and properties, respectively. This file
+# contains the base classes and defines the high-level API. You can use
+# FdtScan() as a convenience function to create and scan an Fdt.
+
+# This implementation uses a libfdt Python library to access the device tree,
+# so it is fairly efficient.
+
+# A list of types we support
+(TYPE_BYTE, TYPE_INT, TYPE_STRING, TYPE_BOOL, TYPE_INT64) = range(5)
+
+def CheckErr(errnum, msg):
+    if errnum:
+        raise ValueError('Error %d: %s: %s' %
+            (errnum, libfdt.fdt_strerror(errnum), msg))
+
+class Prop:
+    """A device tree property
+
+    Properties:
+        name: Property name (as per the device tree)
+        value: Property value as a string of bytes, or a list of strings of
+            bytes
+        type: Value type
+    """
+    def __init__(self, node, offset, name, bytes):
+        self._node = node
+        self._offset = offset
+        self.name = name
+        self.value = None
+        self.bytes = str(bytes)
+        self.dirty = False
+        if not bytes:
+            self.type = TYPE_BOOL
+            self.value = True
+            return
+        self.type, self.value = self.BytesToValue(bytes)
+
+    def RefreshOffset(self, poffset):
+        self._offset = poffset
+
+    def Widen(self, newprop):
+        """Figure out which property type is more general
+
+        Given a current property and a new property, this function returns the
+        one that is less specific as to type. The less specific property will
+        be ble to represent the data in the more specific property. This is
+        used for things like:
+
+            node1 {
+                compatible = "fred";
+                value = <1>;
+            };
+            node1 {
+                compatible = "fred";
+                value = <1 2>;
+            };
+
+        He we want to use an int array for 'value'. The first property
+        suggests that a single int is enough, but the second one shows that
+        it is not. Calling this function with these two propertes would
+        update the current property to be like the second, since it is less
+        specific.
+        """
+        if newprop.type < self.type:
+            self.type = newprop.type
+
+        if type(newprop.value) == list and type(self.value) != list:
+            self.value = [self.value]
+
+        if type(self.value) == list and len(newprop.value) > len(self.value):
+            val = self.GetEmpty(self.type)
+            while len(self.value) < len(newprop.value):
+                self.value.append(val)
+
+    def BytesToValue(self, bytes):
+        """Converts a string of bytes into a type and value
+
+        Args:
+            A string containing bytes
+
+        Return:
+            A tuple:
+                Type of data
+                Data, either a single element or a list of elements. Each element
+                is one of:
+                    TYPE_STRING: string value from the property
+                    TYPE_INT: a byte-swapped integer stored as a 4-byte string
+                    TYPE_BYTE: a byte stored as a single-byte string
+        """
+        bytes = str(bytes)
+        size = len(bytes)
+        strings = bytes.split('\0')
+        is_string = True
+        count = len(strings) - 1
+        if count > 0 and not strings[-1]:
+            for string in strings[:-1]:
+                if not string:
+                    is_string = False
+                    break
+                for ch in string:
+                    if ch < ' ' or ch > '~':
+                        is_string = False
+                        break
+        else:
+            is_string = False
+        if is_string:
+            if count == 1:
+                return TYPE_STRING, strings[0]
+            else:
+                return TYPE_STRING, strings[:-1]
+        if size % 4:
+            if size == 1:
+                return TYPE_BYTE, bytes[0]
+            else:
+                return TYPE_BYTE, list(bytes)
+        val = []
+        for i in range(0, size, 4):
+            val.append(bytes[i:i + 4])
+        if size == 4:
+            return TYPE_INT, val[0]
+        else:
+            return TYPE_INT, val
+
+    @classmethod
+    def GetEmpty(self, type):
+        """Get an empty / zero value of the given type
+
+        Returns:
+            A single value of the given type
+        """
+        if type == TYPE_BYTE:
+            return chr(0)
+        elif type == TYPE_INT:
+            return struct.pack('>I', 0);
+        elif type == TYPE_STRING:
+            return ''
+        else:
+            return True
+
+    def GetOffset(self):
+        """Get the offset of a property
+
+        Returns:
+            The offset of the property (struct fdt_property) within the file
+        """
+        self._node._fdt.CheckCache()
+        return self._node._fdt.GetStructOffset(self._offset)
+
+    def SetInt(self, val):
+        """Set the integer value of the property
+
+        The device tree is marked dirty so that the value will be written to
+        the block on the next sync.
+
+        Args:
+            val: Integer value (32-bit, single cell)
+        """
+        self.bytes = struct.pack('>I', val);
+        self.value = self.bytes
+        self.type = TYPE_INT
+        self.dirty = True
+
+    def SetData(self, bytes):
+        """Set the value of a property as bytes
+
+        Args:
+            bytes: New property value to set
+        """
+        self.bytes = str(bytes)
+        self.type, self.value = self.BytesToValue(bytes)
+        self.dirty = True
+
+    def Sync(self, auto_resize=False):
+        """Sync property changes back to the device tree
+
+        This updates the device tree blob with any changes to this property
+        since the last sync.
+
+        Args:
+            auto_resize: Resize the device tree automatically if it does not
+                have enough space for the update
+
+        Raises:
+            FdtException if auto_resize is False and there is not enough space
+        """
+        if self._offset is None or self.dirty:
+            node = self._node
+            fdt_obj = node._fdt._fdt_obj
+            if auto_resize:
+                while fdt_obj.setprop(node.Offset(), self.name, self.bytes,
+                                    (libfdt.NOSPACE,)) == -libfdt.NOSPACE:
+                    fdt_obj.resize(fdt_obj.totalsize() + 1024)
+                    fdt_obj.setprop(node.Offset(), self.name, self.bytes)
+            else:
+                fdt_obj.setprop(node.Offset(), self.name, self.bytes)
+
+
+class Node:
+    """A device tree node
+
+    Properties:
+        offset: Integer offset in the device tree
+        name: Device tree node tname
+        path: Full path to node, along with the node name itself
+        _fdt: Device tree object
+        subnodes: A list of subnodes for this node, each a Node object
+        props: A dict of properties for this node, each a Prop object.
+            Keyed by property name
+    """
+    def __init__(self, fdt, parent, offset, name, path):
+        self._fdt = fdt
+        self.parent = parent
+        self._offset = offset
+        self.name = name
+        self.path = path
+        self.subnodes = []
+        self.props = {}
+
+    def GetFdt(self):
+        """Get the Fdt object for this node
+
+        Returns:
+            Fdt object
+        """
+        return self._fdt
+
+    def FindNode(self, name):
+        """Find a node given its name
+
+        Args:
+            name: Node name to look for
+        Returns:
+            Node object if found, else None
+        """
+        for subnode in self.subnodes:
+            if subnode.name == name:
+                return subnode
+        return None
+
+    def Offset(self):
+        """Returns the offset of a node, after checking the cache
+
+        This should be used instead of self._offset directly, to ensure that
+        the cache does not contain invalid offsets.
+        """
+        self._fdt.CheckCache()
+        return self._offset
+
+    def Scan(self):
+        """Scan a node's properties and subnodes
+
+        This fills in the props and subnodes properties, recursively
+        searching into subnodes so that the entire tree is built.
+        """
+        fdt_obj = self._fdt._fdt_obj
+        self.props = self._fdt.GetProps(self)
+        phandle = fdt_obj.get_phandle(self.Offset())
+        if phandle:
+            self._fdt.phandle_to_node[phandle] = self
+
+        offset = fdt_obj.first_subnode(self.Offset(), QUIET_NOTFOUND)
+        while offset >= 0:
+            sep = '' if self.path[-1] == '/' else '/'
+            name = fdt_obj.get_name(offset)
+            path = self.path + sep + name
+            node = Node(self._fdt, self, offset, name, path)
+            self.subnodes.append(node)
+
+            node.Scan()
+            offset = fdt_obj.next_subnode(offset, QUIET_NOTFOUND)
+
+    def Refresh(self, my_offset):
+        """Fix up the _offset for each node, recursively
+
+        Note: This does not take account of property offsets - these will not
+        be updated.
+        """
+        fdt_obj = self._fdt._fdt_obj
+        if self._offset != my_offset:
+            self._offset = my_offset
+        offset = fdt_obj.first_subnode(self._offset, QUIET_NOTFOUND)
+        for subnode in self.subnodes:
+            if subnode.name != fdt_obj.get_name(offset):
+                raise ValueError('Internal error, node name mismatch %s != %s' %
+                                 (subnode.name, fdt_obj.get_name(offset)))
+            subnode.Refresh(offset)
+            offset = fdt_obj.next_subnode(offset, QUIET_NOTFOUND)
+        if offset != -libfdt.FDT_ERR_NOTFOUND:
+            raise ValueError('Internal error, offset == %d' % offset)
+
+        poffset = fdt_obj.first_property_offset(self._offset, QUIET_NOTFOUND)
+        while poffset >= 0:
+            p = fdt_obj.get_property_by_offset(poffset)
+            prop = self.props.get(p.name)
+            if not prop:
+                raise ValueError("Internal error, property '%s' missing, "
+                                 'offset %d' % (p.name, poffset))
+            prop.RefreshOffset(poffset)
+            poffset = fdt_obj.next_property_offset(poffset, QUIET_NOTFOUND)
+
+    def DeleteProp(self, prop_name):
+        """Delete a property of a node
+
+        The property is deleted and the offset cache is invalidated.
+
+        Args:
+            prop_name: Name of the property to delete
+        Raises:
+            ValueError if the property does not exist
+        """
+        CheckErr(self._fdt._fdt_obj.delprop(self.Offset(), prop_name),
+                 "Node '%s': delete property: '%s'" % (self.path, prop_name))
+        del self.props[prop_name]
+        self._fdt.Invalidate()
+
+    def AddZeroProp(self, prop_name):
+        """Add a new property to the device tree with an integer value of 0.
+
+        Args:
+            prop_name: Name of property
+        """
+        self.props[prop_name] = Prop(self, None, prop_name, '\0' * 4)
+
+    def AddEmptyProp(self, prop_name, len):
+        """Add a property with a fixed data size, for filling in later
+
+        The device tree is marked dirty so that the value will be written to
+        the blob on the next sync.
+
+        Args:
+            prop_name: Name of property
+            len: Length of data in property
+        """
+        value = chr(0) * len
+        self.props[prop_name] = Prop(self, None, prop_name, value)
+
+    def SetInt(self, prop_name, val):
+        """Update an integer property int the device tree.
+
+        This is not allowed to change the size of the FDT.
+
+        The device tree is marked dirty so that the value will be written to
+        the blob on the next sync.
+
+        Args:
+            prop_name: Name of property
+            val: Value to set
+        """
+        self.props[prop_name].SetInt(val)
+
+    def SetData(self, prop_name, val):
+        """Set the data value of a property
+
+        The device tree is marked dirty so that the value will be written to
+        the blob on the next sync.
+
+        Args:
+            prop_name: Name of property to set
+            val: Data value to set
+        """
+        self.props[prop_name].SetData(val)
+
+    def SetString(self, prop_name, val):
+        """Set the string value of a property
+
+        The device tree is marked dirty so that the value will be written to
+        the blob on the next sync.
+
+        Args:
+            prop_name: Name of property to set
+            val: String value to set (will be \0-terminated in DT)
+        """
+        self.props[prop_name].SetData(val + chr(0))
+
+    def AddString(self, prop_name, val):
+        """Add a new string property to a node
+
+        The device tree is marked dirty so that the value will be written to
+        the blob on the next sync.
+
+        Args:
+            prop_name: Name of property to add
+            val: String value of property
+        """
+        self.props[prop_name] = Prop(self, None, prop_name, val + chr(0))
+
+    def AddSubnode(self, name):
+        """Add a new subnode to the node
+
+        Args:
+            name: name of node to add
+
+        Returns:
+            New subnode that was created
+        """
+        path = self.path + '/' + name
+        subnode = Node(self._fdt, self, None, name, path)
+        self.subnodes.append(subnode)
+        return subnode
+
+    def Sync(self, auto_resize=False):
+        """Sync node changes back to the device tree
+
+        This updates the device tree blob with any changes to this node and its
+        subnodes since the last sync.
+
+        Args:
+            auto_resize: Resize the device tree automatically if it does not
+                have enough space for the update
+
+        Raises:
+            FdtException if auto_resize is False and there is not enough space
+        """
+        if self._offset is None:
+            # The subnode doesn't exist yet, so add it
+            fdt_obj = self._fdt._fdt_obj
+            if auto_resize:
+                while True:
+                    offset = fdt_obj.add_subnode(self.parent._offset, self.name,
+                                                (libfdt.NOSPACE,))
+                    if offset != -libfdt.NOSPACE:
+                        break
+                    fdt_obj.resize(fdt_obj.totalsize() + 1024)
+            else:
+                offset = fdt_obj.add_subnode(self.parent._offset, self.name)
+            self._offset = offset
+
+        # Sync subnodes in reverse so that we don't disturb node offsets for
+        # nodes that are earlier in the DT. This avoids an O(n^2) rescan of
+        # node offsets.
+        for node in reversed(self.subnodes):
+            node.Sync(auto_resize)
+
+        # Sync properties now, whose offsets should not have been disturbed.
+        # We do this after subnodes, since this disturbs the offsets of these
+        # properties.
+        prop_list = sorted(self.props.values(), key=lambda prop: prop._offset,
+                           reverse=True)
+        for prop in prop_list:
+            prop.Sync(auto_resize)
+
+
+class Fdt:
+    """Provides simple access to a flat device tree blob using libfdts.
+
+    Properties:
+      fname: Filename of fdt
+      _root: Root of device tree (a Node object)
+    """
+    def __init__(self, fname):
+        self._fname = fname
+        self._cached_offsets = False
+        self.phandle_to_node = {}
+        if self._fname:
+            self._fname = fdt_util.EnsureCompiled(self._fname)
+
+            with open(self._fname) as fd:
+                self._fdt_obj = libfdt.Fdt(fd.read())
+
+    @staticmethod
+    def FromData(data):
+        """Create a new Fdt object from the given data
+
+        Args:
+            data: Device-tree data blob
+
+        Returns:
+            Fdt object containing the data
+        """
+        fdt = Fdt(None)
+        fdt._fdt_obj = libfdt.Fdt(bytearray(data))
+        return fdt
+
+    def LookupPhandle(self, phandle):
+        """Look up a phandle
+
+        Args:
+            phandle: Phandle to look up (int)
+
+        Returns:
+            Node object the phandle points to
+        """
+        return self.phandle_to_node.get(phandle)
+
+    def Scan(self, root='/'):
+        """Scan a device tree, building up a tree of Node objects
+
+        This fills in the self._root property
+
+        Args:
+            root: Ignored
+
+        TODO(sjg@chromium.org): Implement the 'root' parameter
+        """
+        self._cached_offsets = True
+        self._root = self.Node(self, None, 0, '/', '/')
+        self._root.Scan()
+
+    def GetRoot(self):
+        """Get the root Node of the device tree
+
+        Returns:
+            The root Node object
+        """
+        return self._root
+
+    def GetNode(self, path):
+        """Look up a node from its path
+
+        Args:
+            path: Path to look up, e.g. '/microcode/update@0'
+        Returns:
+            Node object, or None if not found
+        """
+        node = self._root
+        parts = path.split('/')
+        if len(parts) < 2:
+            return None
+        for part in parts[1:]:
+            node = node.FindNode(part)
+            if not node:
+                return None
+        return node
+
+    def Flush(self):
+        """Flush device tree changes back to the file
+
+        If the device tree has changed in memory, write it back to the file.
+        """
+        with open(self._fname, 'wb') as fd:
+            fd.write(self._fdt_obj.as_bytearray())
+
+    def Sync(self, auto_resize=False):
+        """Make sure any DT changes are written to the blob
+
+        Args:
+            auto_resize: Resize the device tree automatically if it does not
+                have enough space for the update
+
+        Raises:
+            FdtException if auto_resize is False and there is not enough space
+        """
+        self._root.Sync(auto_resize)
+        self.Invalidate()
+
+    def Pack(self):
+        """Pack the device tree down to its minimum size
+
+        When nodes and properties shrink or are deleted, wasted space can
+        build up in the device tree binary.
+        """
+        CheckErr(self._fdt_obj.pack(), 'pack')
+        self.Invalidate()
+
+    def GetContents(self):
+        """Get the contents of the FDT
+
+        Returns:
+            The FDT contents as a string of bytes
+        """
+        return self._fdt_obj.as_bytearray()
+
+    def GetFdtObj(self):
+        """Get the contents of the FDT
+
+        Returns:
+            The FDT contents as a libfdt.Fdt object
+        """
+        return self._fdt_obj
+
+    def GetProps(self, node):
+        """Get all properties from a node.
+
+        Args:
+            node: Full path to node name to look in.
+
+        Returns:
+            A dictionary containing all the properties, indexed by node name.
+            The entries are Prop objects.
+
+        Raises:
+            ValueError: if the node does not exist.
+        """
+        props_dict = {}
+        poffset = self._fdt_obj.first_property_offset(node._offset,
+                                                      QUIET_NOTFOUND)
+        while poffset >= 0:
+            p = self._fdt_obj.get_property_by_offset(poffset)
+            prop = Prop(node, poffset, p.name, p)
+            props_dict[prop.name] = prop
+
+            poffset = self._fdt_obj.next_property_offset(poffset,
+                                                         QUIET_NOTFOUND)
+        return props_dict
+
+    def Invalidate(self):
+        """Mark our offset cache as invalid"""
+        self._cached_offsets = False
+
+    def CheckCache(self):
+        """Refresh the offset cache if needed"""
+        if self._cached_offsets:
+            return
+        self.Refresh()
+        self._cached_offsets = True
+
+    def Refresh(self):
+        """Refresh the offset cache"""
+        self._root.Refresh(0)
+
+    def GetStructOffset(self, offset):
+        """Get the file offset of a given struct offset
+
+        Args:
+            offset: Offset within the 'struct' region of the device tree
+        Returns:
+            Position of @offset within the device tree binary
+        """
+        return self._fdt_obj.off_dt_struct() + offset
+
+    @classmethod
+    def Node(self, fdt, parent, offset, name, path):
+        """Create a new node
+
+        This is used by Fdt.Scan() to create a new node using the correct
+        class.
+
+        Args:
+            fdt: Fdt object
+            parent: Parent node, or None if this is the root node
+            offset: Offset of node
+            name: Node name
+            path: Full path to node
+        """
+        node = Node(fdt, parent, offset, name, path)
+        return node
+
+def FdtScan(fname):
+    """Returns a new Fdt object"""
+    dtb = Fdt(fname)
+    dtb.Scan()
+    return dtb
diff --git a/tools/u-boot-tools/dtoc/fdt_util.py b/tools/u-boot-tools/dtoc/fdt_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fbfc8877bd31777d2a8b68a45b05c59c0b0fd1d
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/fdt_util.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Utility functions for reading from a device tree. Once the upstream pylibfdt
+# implementation advances far enough, we should be able to drop these.
+
+import os
+import struct
+import sys
+import tempfile
+
+import command
+import tools
+
+VERSION3 = sys.version_info > (3, 0)
+
+def get_plain_bytes(val):
+    """Handle Python 3 strings"""
+    if isinstance(val, bytes):
+        val = val.decode('utf-8')
+    return val.encode('raw_unicode_escape')
+
+def fdt32_to_cpu(val):
+    """Convert a device tree cell to an integer
+
+    Args:
+        Value to convert (4-character string representing the cell value)
+
+    Return:
+        A native-endian integer value
+    """
+    if VERSION3:
+        # This code is not reached in Python 2
+        val = get_plain_bytes(val)  # pragma: no cover
+    return struct.unpack('>I', val)[0]
+
+def fdt_cells_to_cpu(val, cells):
+    """Convert one or two cells to a long integer
+
+    Args:
+        Value to convert (array of one or more 4-character strings)
+
+    Return:
+        A native-endian long value
+    """
+    if not cells:
+        return 0
+    out = long(fdt32_to_cpu(val[0]))
+    if cells == 2:
+        out = out << 32 | fdt32_to_cpu(val[1])
+    return out
+
+def EnsureCompiled(fname, capture_stderr=False):
+    """Compile an fdt .dts source file into a .dtb binary blob if needed.
+
+    Args:
+        fname: Filename (if .dts it will be compiled). It not it will be
+            left alone
+
+    Returns:
+        Filename of resulting .dtb file
+    """
+    _, ext = os.path.splitext(fname)
+    if ext != '.dts':
+        return fname
+
+    dts_input = tools.GetOutputFilename('source.dts')
+    dtb_output = tools.GetOutputFilename('source.dtb')
+
+    search_paths = [os.path.join(os.getcwd(), 'include')]
+    root, _ = os.path.splitext(fname)
+    args = ['-E', '-P', '-x', 'assembler-with-cpp', '-D__ASSEMBLY__']
+    args += ['-Ulinux']
+    for path in search_paths:
+        args.extend(['-I', path])
+    args += ['-o', dts_input, fname]
+    command.Run('cc', *args)
+
+    # If we don't have a directory, put it in the tools tempdir
+    search_list = []
+    for path in search_paths:
+        search_list.extend(['-i', path])
+    args = ['-I', 'dts', '-o', dtb_output, '-O', 'dtb',
+            '-W', 'no-unit_address_vs_reg']
+    args.extend(search_list)
+    args.append(dts_input)
+    dtc = os.environ.get('DTC') or 'dtc'
+    command.Run(dtc, *args, capture_stderr=capture_stderr)
+    return dtb_output
+
+def GetInt(node, propname, default=None):
+    """Get an integer from a property
+
+    Args:
+        node: Node object to read from
+        propname: property name to read
+        default: Default value to use if the node/property do not exist
+
+    Returns:
+        Integer value read, or default if none
+    """
+    prop = node.props.get(propname)
+    if not prop:
+        return default
+    if isinstance(prop.value, list):
+        raise ValueError("Node '%s' property '%s' has list value: expecting "
+                         "a single integer" % (node.name, propname))
+    value = fdt32_to_cpu(prop.value)
+    return value
+
+def GetString(node, propname, default=None):
+    """Get a string from a property
+
+    Args:
+        node: Node object to read from
+        propname: property name to read
+        default: Default value to use if the node/property do not exist
+
+    Returns:
+        String value read, or default if none
+    """
+    prop = node.props.get(propname)
+    if not prop:
+        return default
+    value = prop.value
+    if isinstance(value, list):
+        raise ValueError("Node '%s' property '%s' has list value: expecting "
+                         "a single string" % (node.name, propname))
+    return value
+
+def GetBool(node, propname, default=False):
+    """Get an boolean from a property
+
+    Args:
+        node: Node object to read from
+        propname: property name to read
+        default: Default value to use if the node/property do not exist
+
+    Returns:
+        Boolean value read, or default if none (if you set this to True the
+            function will always return True)
+    """
+    if propname in node.props:
+        return True
+    return default
+
+def GetByte(node, propname, default=None):
+    """Get an byte from a property
+
+    Args:
+        node: Node object to read from
+        propname: property name to read
+        default: Default value to use if the node/property do not exist
+
+    Returns:
+        Byte value read, or default if none
+    """
+    prop = node.props.get(propname)
+    if not prop:
+        return default
+    value = prop.value
+    if isinstance(value, list):
+        raise ValueError("Node '%s' property '%s' has list value: expecting "
+                         "a single byte" % (node.name, propname))
+    if len(value) != 1:
+        raise ValueError("Node '%s' property '%s' has length %d, expecting %d" %
+                         (node.name, propname, len(value), 1))
+    return ord(value[0])
+
+def GetPhandleList(node, propname):
+    """Get a list of phandles from a property
+
+    Args:
+        node: Node object to read from
+        propname: property name to read
+
+    Returns:
+        List of phandles read, each an integer
+    """
+    prop = node.props.get(propname)
+    if not prop:
+        return None
+    value = prop.value
+    if not isinstance(value, list):
+        value = [value]
+    return [fdt32_to_cpu(v) for v in value]
+
+def GetDatatype(node, propname, datatype):
+    """Get a value of a given type from a property
+
+    Args:
+        node: Node object to read from
+        propname: property name to read
+        datatype: Type to read (str or int)
+
+    Returns:
+        value read, or None if none
+
+    Raises:
+        ValueError if datatype is not str or int
+    """
+    if datatype == str:
+        return GetString(node, propname)
+    elif datatype == int:
+        return GetInt(node, propname)
+    raise ValueError("fdt_util internal error: Unknown data type '%s'" %
+                     datatype)
diff --git a/tools/u-boot-tools/dtoc/test_dtoc.py b/tools/u-boot-tools/dtoc/test_dtoc.py
new file mode 100644
index 0000000000000000000000000000000000000000..11bead126070d275ed70ca9c0f0e717c67e9491d
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/test_dtoc.py
@@ -0,0 +1,710 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+"""Tests for the dtb_platdata module
+
+This includes unit tests for some functions and functional tests for the dtoc
+tool.
+"""
+
+import collections
+import os
+import struct
+import unittest
+
+import dtb_platdata
+from dtb_platdata import conv_name_to_c
+from dtb_platdata import get_compat_name
+from dtb_platdata import get_value
+from dtb_platdata import tab_to
+import fdt
+import fdt_util
+import test_util
+import tools
+
+our_path = os.path.dirname(os.path.realpath(__file__))
+
+
+HEADER = '''/*
+ * DO NOT MODIFY
+ *
+ * This file was generated by dtoc from a .dtb (device tree binary) file.
+ */
+
+#include <stdbool.h>
+#include <linux/libfdt.h>'''
+
+C_HEADER = '''/*
+ * DO NOT MODIFY
+ *
+ * This file was generated by dtoc from a .dtb (device tree binary) file.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dt-structs.h>
+'''
+
+
+
+def get_dtb_file(dts_fname, capture_stderr=False):
+    """Compile a .dts file to a .dtb
+
+    Args:
+        dts_fname: Filename of .dts file in the current directory
+        capture_stderr: True to capture and discard stderr output
+
+    Returns:
+        Filename of compiled file in output directory
+    """
+    return fdt_util.EnsureCompiled(os.path.join(our_path, dts_fname),
+                                   capture_stderr=capture_stderr)
+
+
+class TestDtoc(unittest.TestCase):
+    """Tests for dtoc"""
+    @classmethod
+    def setUpClass(cls):
+        tools.PrepareOutputDir(None)
+
+    @classmethod
+    def tearDownClass(cls):
+        tools._RemoveOutputDir()
+
+    def _WritePythonString(self, fname, data):
+        """Write a string with tabs expanded as done in this Python file
+
+        Args:
+            fname: Filename to write to
+            data: Raw string to convert
+        """
+        data = data.replace('\t', '\\t')
+        with open(fname, 'w') as fd:
+            fd.write(data)
+
+    def _CheckStrings(self, expected, actual):
+        """Check that a string matches its expected value
+
+        If the strings do not match, they are written to the /tmp directory in
+        the same Python format as is used here in the test. This allows for
+        easy comparison and update of the tests.
+
+        Args:
+            expected: Expected string
+            actual: Actual string
+        """
+        if expected != actual:
+            self._WritePythonString('/tmp/binman.expected', expected)
+            self._WritePythonString('/tmp/binman.actual', actual)
+            print 'Failures written to /tmp/binman.{expected,actual}'
+        self.assertEquals(expected, actual)
+
+    def test_name(self):
+        """Test conversion of device tree names to C identifiers"""
+        self.assertEqual('serial_at_0x12', conv_name_to_c('serial@0x12'))
+        self.assertEqual('vendor_clock_frequency',
+                         conv_name_to_c('vendor,clock-frequency'))
+        self.assertEqual('rockchip_rk3399_sdhci_5_1',
+                         conv_name_to_c('rockchip,rk3399-sdhci-5.1'))
+
+    def test_tab_to(self):
+        """Test operation of tab_to() function"""
+        self.assertEqual('fred ', tab_to(0, 'fred'))
+        self.assertEqual('fred\t', tab_to(1, 'fred'))
+        self.assertEqual('fred was here ', tab_to(1, 'fred was here'))
+        self.assertEqual('fred was here\t\t', tab_to(3, 'fred was here'))
+        self.assertEqual('exactly8 ', tab_to(1, 'exactly8'))
+        self.assertEqual('exactly8\t', tab_to(2, 'exactly8'))
+
+    def test_get_value(self):
+        """Test operation of get_value() function"""
+        self.assertEqual('0x45',
+                         get_value(fdt.TYPE_INT, struct.pack('>I', 0x45)))
+        self.assertEqual('0x45',
+                         get_value(fdt.TYPE_BYTE, struct.pack('<I', 0x45)))
+        self.assertEqual('0x0',
+                         get_value(fdt.TYPE_BYTE, struct.pack('>I', 0x45)))
+        self.assertEqual('"test"', get_value(fdt.TYPE_STRING, 'test'))
+        self.assertEqual('true', get_value(fdt.TYPE_BOOL, None))
+
+    def test_get_compat_name(self):
+        """Test operation of get_compat_name() function"""
+        Prop = collections.namedtuple('Prop', ['value'])
+        Node = collections.namedtuple('Node', ['props'])
+
+        prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1'])
+        node = Node({'compatible': prop})
+        self.assertEqual(('rockchip_rk3399_sdhci_5_1', ['arasan_sdhci_5_1']),
+                         get_compat_name(node))
+
+        prop = Prop(['rockchip,rk3399-sdhci-5.1'])
+        node = Node({'compatible': prop})
+        self.assertEqual(('rockchip_rk3399_sdhci_5_1', []),
+                         get_compat_name(node))
+
+        prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1', 'third'])
+        node = Node({'compatible': prop})
+        self.assertEqual(('rockchip_rk3399_sdhci_5_1',
+                          ['arasan_sdhci_5_1', 'third']),
+                         get_compat_name(node))
+
+    def test_empty_file(self):
+        """Test output from a device tree file with no nodes"""
+        dtb_file = get_dtb_file('dtoc_test_empty.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            lines = infile.read().splitlines()
+        self.assertEqual(HEADER.splitlines(), lines)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            lines = infile.read().splitlines()
+        self.assertEqual(C_HEADER.splitlines() + [''], lines)
+
+    def test_simple(self):
+        """Test output from some simple nodes with various types of data"""
+        dtb_file = get_dtb_file('dtoc_test_simple.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_sandbox_i2c_test {
+};
+struct dtd_sandbox_pmic_test {
+\tbool\t\tlow_power;
+\tfdt64_t\t\treg[2];
+};
+struct dtd_sandbox_spl_test {
+\tbool\t\tboolval;
+\tunsigned char\tbytearray[3];
+\tunsigned char\tbyteval;
+\tfdt32_t\t\tintarray[4];
+\tfdt32_t\t\tintval;
+\tunsigned char\tlongbytearray[9];
+\tunsigned char\tnotstring[5];
+\tconst char *\tstringarray[3];
+\tconst char *\tstringval;
+};
+struct dtd_sandbox_spl_test_2 {
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_sandbox_spl_test dtv_spl_test = {
+\t.bytearray\t\t= {0x6, 0x0, 0x0},
+\t.byteval\t\t= 0x5,
+\t.intval\t\t\t= 0x1,
+\t.notstring\t\t= {0x20, 0x21, 0x22, 0x10, 0x0},
+\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
+\t\t0x11},
+\t.stringval\t\t= "message",
+\t.boolval\t\t= true,
+\t.intarray\t\t= {0x2, 0x3, 0x4, 0x0},
+\t.stringarray\t\t= {"multi-word", "message", ""},
+};
+U_BOOT_DEVICE(spl_test) = {
+\t.name\t\t= "sandbox_spl_test",
+\t.platdata\t= &dtv_spl_test,
+\t.platdata_size\t= sizeof(dtv_spl_test),
+};
+
+static struct dtd_sandbox_spl_test dtv_spl_test2 = {
+\t.bytearray\t\t= {0x1, 0x23, 0x34},
+\t.byteval\t\t= 0x8,
+\t.intval\t\t\t= 0x3,
+\t.longbytearray\t\t= {0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+\t\t0x0},
+\t.stringval\t\t= "message2",
+\t.intarray\t\t= {0x5, 0x0, 0x0, 0x0},
+\t.stringarray\t\t= {"another", "multi-word", "message"},
+};
+U_BOOT_DEVICE(spl_test2) = {
+\t.name\t\t= "sandbox_spl_test",
+\t.platdata\t= &dtv_spl_test2,
+\t.platdata_size\t= sizeof(dtv_spl_test2),
+};
+
+static struct dtd_sandbox_spl_test dtv_spl_test3 = {
+\t.stringarray\t\t= {"one", "", ""},
+};
+U_BOOT_DEVICE(spl_test3) = {
+\t.name\t\t= "sandbox_spl_test",
+\t.platdata\t= &dtv_spl_test3,
+\t.platdata_size\t= sizeof(dtv_spl_test3),
+};
+
+static struct dtd_sandbox_spl_test_2 dtv_spl_test4 = {
+};
+U_BOOT_DEVICE(spl_test4) = {
+\t.name\t\t= "sandbox_spl_test_2",
+\t.platdata\t= &dtv_spl_test4,
+\t.platdata_size\t= sizeof(dtv_spl_test4),
+};
+
+static struct dtd_sandbox_i2c_test dtv_i2c_at_0 = {
+};
+U_BOOT_DEVICE(i2c_at_0) = {
+\t.name\t\t= "sandbox_i2c_test",
+\t.platdata\t= &dtv_i2c_at_0,
+\t.platdata_size\t= sizeof(dtv_i2c_at_0),
+};
+
+static struct dtd_sandbox_pmic_test dtv_pmic_at_9 = {
+\t.low_power\t\t= true,
+\t.reg\t\t\t= {0x9, 0x0},
+};
+U_BOOT_DEVICE(pmic_at_9) = {
+\t.name\t\t= "sandbox_pmic_test",
+\t.platdata\t= &dtv_pmic_at_9,
+\t.platdata_size\t= sizeof(dtv_pmic_at_9),
+};
+
+''', data)
+
+    def test_phandle(self):
+        """Test output from a node containing a phandle reference"""
+        dtb_file = get_dtb_file('dtoc_test_phandle.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_source {
+\tstruct phandle_2_arg clocks[4];
+};
+struct dtd_target {
+\tfdt32_t\t\tintval;
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_target dtv_phandle_target = {
+\t.intval\t\t\t= 0x0,
+};
+U_BOOT_DEVICE(phandle_target) = {
+\t.name\t\t= "target",
+\t.platdata\t= &dtv_phandle_target,
+\t.platdata_size\t= sizeof(dtv_phandle_target),
+};
+
+static struct dtd_target dtv_phandle2_target = {
+\t.intval\t\t\t= 0x1,
+};
+U_BOOT_DEVICE(phandle2_target) = {
+\t.name\t\t= "target",
+\t.platdata\t= &dtv_phandle2_target,
+\t.platdata_size\t= sizeof(dtv_phandle2_target),
+};
+
+static struct dtd_target dtv_phandle3_target = {
+\t.intval\t\t\t= 0x2,
+};
+U_BOOT_DEVICE(phandle3_target) = {
+\t.name\t\t= "target",
+\t.platdata\t= &dtv_phandle3_target,
+\t.platdata_size\t= sizeof(dtv_phandle3_target),
+};
+
+static struct dtd_source dtv_phandle_source = {
+\t.clocks\t\t\t= {
+\t\t\t{&dtv_phandle_target, {}},
+\t\t\t{&dtv_phandle2_target, {11}},
+\t\t\t{&dtv_phandle3_target, {12, 13}},
+\t\t\t{&dtv_phandle_target, {}},},
+};
+U_BOOT_DEVICE(phandle_source) = {
+\t.name\t\t= "source",
+\t.platdata\t= &dtv_phandle_source,
+\t.platdata_size\t= sizeof(dtv_phandle_source),
+};
+
+static struct dtd_source dtv_phandle_source2 = {
+\t.clocks\t\t\t= {
+\t\t\t{&dtv_phandle_target, {}},},
+};
+U_BOOT_DEVICE(phandle_source2) = {
+\t.name\t\t= "source",
+\t.platdata\t= &dtv_phandle_source2,
+\t.platdata_size\t= sizeof(dtv_phandle_source2),
+};
+
+''', data)
+
+    def test_phandle_single(self):
+        """Test output from a node containing a phandle reference"""
+        dtb_file = get_dtb_file('dtoc_test_phandle_single.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_source {
+\tstruct phandle_0_arg clocks[1];
+};
+struct dtd_target {
+\tfdt32_t\t\tintval;
+};
+''', data)
+
+    def test_phandle_reorder(self):
+        """Test that phandle targets are generated before their references"""
+        dtb_file = get_dtb_file('dtoc_test_phandle_reorder.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_target dtv_phandle_target = {
+};
+U_BOOT_DEVICE(phandle_target) = {
+\t.name\t\t= "target",
+\t.platdata\t= &dtv_phandle_target,
+\t.platdata_size\t= sizeof(dtv_phandle_target),
+};
+
+static struct dtd_source dtv_phandle_source2 = {
+\t.clocks\t\t\t= {
+\t\t\t{&dtv_phandle_target, {}},},
+};
+U_BOOT_DEVICE(phandle_source2) = {
+\t.name\t\t= "source",
+\t.platdata\t= &dtv_phandle_source2,
+\t.platdata_size\t= sizeof(dtv_phandle_source2),
+};
+
+''', data)
+
+    def test_phandle_bad(self):
+        """Test a node containing an invalid phandle fails"""
+        dtb_file = get_dtb_file('dtoc_test_phandle_bad.dts',
+                                capture_stderr=True)
+        output = tools.GetOutputFilename('output')
+        with self.assertRaises(ValueError) as e:
+            dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        self.assertIn("Cannot parse 'clocks' in node 'phandle-source'",
+                      str(e.exception))
+
+    def test_phandle_bad2(self):
+        """Test a phandle target missing its #*-cells property"""
+        dtb_file = get_dtb_file('dtoc_test_phandle_bad2.dts',
+                                capture_stderr=True)
+        output = tools.GetOutputFilename('output')
+        with self.assertRaises(ValueError) as e:
+            dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        self.assertIn("Node 'phandle-target' has no '#clock-cells' property",
+                      str(e.exception))
+
+    def test_aliases(self):
+        """Test output from a node with multiple compatible strings"""
+        dtb_file = get_dtb_file('dtoc_test_aliases.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_compat1 {
+\tfdt32_t\t\tintval;
+};
+#define dtd_compat2_1_fred dtd_compat1
+#define dtd_compat3 dtd_compat1
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_compat1 dtv_spl_test = {
+\t.intval\t\t\t= 0x1,
+};
+U_BOOT_DEVICE(spl_test) = {
+\t.name\t\t= "compat1",
+\t.platdata\t= &dtv_spl_test,
+\t.platdata_size\t= sizeof(dtv_spl_test),
+};
+
+''', data)
+
+    def test_addresses64(self):
+        """Test output from a node with a 'reg' property with na=2, ns=2"""
+        dtb_file = get_dtb_file('dtoc_test_addr64.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_test1 {
+\tfdt64_t\t\treg[2];
+};
+struct dtd_test2 {
+\tfdt64_t\t\treg[2];
+};
+struct dtd_test3 {
+\tfdt64_t\t\treg[4];
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_test1 dtv_test1 = {
+\t.reg\t\t\t= {0x1234, 0x5678},
+};
+U_BOOT_DEVICE(test1) = {
+\t.name\t\t= "test1",
+\t.platdata\t= &dtv_test1,
+\t.platdata_size\t= sizeof(dtv_test1),
+};
+
+static struct dtd_test2 dtv_test2 = {
+\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654},
+};
+U_BOOT_DEVICE(test2) = {
+\t.name\t\t= "test2",
+\t.platdata\t= &dtv_test2,
+\t.platdata_size\t= sizeof(dtv_test2),
+};
+
+static struct dtd_test3 dtv_test3 = {
+\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654, 0x2, 0x3},
+};
+U_BOOT_DEVICE(test3) = {
+\t.name\t\t= "test3",
+\t.platdata\t= &dtv_test3,
+\t.platdata_size\t= sizeof(dtv_test3),
+};
+
+''', data)
+
+    def test_addresses32(self):
+        """Test output from a node with a 'reg' property with na=1, ns=1"""
+        dtb_file = get_dtb_file('dtoc_test_addr32.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_test1 {
+\tfdt32_t\t\treg[2];
+};
+struct dtd_test2 {
+\tfdt32_t\t\treg[4];
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_test1 dtv_test1 = {
+\t.reg\t\t\t= {0x1234, 0x5678},
+};
+U_BOOT_DEVICE(test1) = {
+\t.name\t\t= "test1",
+\t.platdata\t= &dtv_test1,
+\t.platdata_size\t= sizeof(dtv_test1),
+};
+
+static struct dtd_test2 dtv_test2 = {
+\t.reg\t\t\t= {0x12345678, 0x98765432, 0x2, 0x3},
+};
+U_BOOT_DEVICE(test2) = {
+\t.name\t\t= "test2",
+\t.platdata\t= &dtv_test2,
+\t.platdata_size\t= sizeof(dtv_test2),
+};
+
+''', data)
+
+    def test_addresses64_32(self):
+        """Test output from a node with a 'reg' property with na=2, ns=1"""
+        dtb_file = get_dtb_file('dtoc_test_addr64_32.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_test1 {
+\tfdt64_t\t\treg[2];
+};
+struct dtd_test2 {
+\tfdt64_t\t\treg[2];
+};
+struct dtd_test3 {
+\tfdt64_t\t\treg[4];
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_test1 dtv_test1 = {
+\t.reg\t\t\t= {0x123400000000, 0x5678},
+};
+U_BOOT_DEVICE(test1) = {
+\t.name\t\t= "test1",
+\t.platdata\t= &dtv_test1,
+\t.platdata_size\t= sizeof(dtv_test1),
+};
+
+static struct dtd_test2 dtv_test2 = {
+\t.reg\t\t\t= {0x1234567890123456, 0x98765432},
+};
+U_BOOT_DEVICE(test2) = {
+\t.name\t\t= "test2",
+\t.platdata\t= &dtv_test2,
+\t.platdata_size\t= sizeof(dtv_test2),
+};
+
+static struct dtd_test3 dtv_test3 = {
+\t.reg\t\t\t= {0x1234567890123456, 0x98765432, 0x2, 0x3},
+};
+U_BOOT_DEVICE(test3) = {
+\t.name\t\t= "test3",
+\t.platdata\t= &dtv_test3,
+\t.platdata_size\t= sizeof(dtv_test3),
+};
+
+''', data)
+
+    def test_addresses32_64(self):
+        """Test output from a node with a 'reg' property with na=1, ns=2"""
+        dtb_file = get_dtb_file('dtoc_test_addr32_64.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_test1 {
+\tfdt64_t\t\treg[2];
+};
+struct dtd_test2 {
+\tfdt64_t\t\treg[2];
+};
+struct dtd_test3 {
+\tfdt64_t\t\treg[4];
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_test1 dtv_test1 = {
+\t.reg\t\t\t= {0x1234, 0x567800000000},
+};
+U_BOOT_DEVICE(test1) = {
+\t.name\t\t= "test1",
+\t.platdata\t= &dtv_test1,
+\t.platdata_size\t= sizeof(dtv_test1),
+};
+
+static struct dtd_test2 dtv_test2 = {
+\t.reg\t\t\t= {0x12345678, 0x9876543210987654},
+};
+U_BOOT_DEVICE(test2) = {
+\t.name\t\t= "test2",
+\t.platdata\t= &dtv_test2,
+\t.platdata_size\t= sizeof(dtv_test2),
+};
+
+static struct dtd_test3 dtv_test3 = {
+\t.reg\t\t\t= {0x12345678, 0x9876543210987654, 0x2, 0x3},
+};
+U_BOOT_DEVICE(test3) = {
+\t.name\t\t= "test3",
+\t.platdata\t= &dtv_test3,
+\t.platdata_size\t= sizeof(dtv_test3),
+};
+
+''', data)
+
+    def test_bad_reg(self):
+        """Test that a reg property with an invalid type generates an error"""
+        # Capture stderr since dtc will emit warnings for this file
+        dtb_file = get_dtb_file('dtoc_test_bad_reg.dts', capture_stderr=True)
+        output = tools.GetOutputFilename('output')
+        with self.assertRaises(ValueError) as e:
+            dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        self.assertIn("Node 'spl-test' reg property is not an int",
+                      str(e.exception))
+
+    def test_bad_reg2(self):
+        """Test that a reg property with an invalid cell count is detected"""
+        # Capture stderr since dtc will emit warnings for this file
+        dtb_file = get_dtb_file('dtoc_test_bad_reg2.dts', capture_stderr=True)
+        output = tools.GetOutputFilename('output')
+        with self.assertRaises(ValueError) as e:
+            dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        self.assertIn("Node 'spl-test' reg property has 3 cells which is not a multiple of na + ns = 1 + 1)",
+                      str(e.exception))
+
+    def test_add_prop(self):
+        """Test that a subequent node can add a new property to a struct"""
+        dtb_file = get_dtb_file('dtoc_test_add_prop.dts')
+        output = tools.GetOutputFilename('output')
+        dtb_platdata.run_steps(['struct'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(HEADER + '''
+struct dtd_sandbox_spl_test {
+\tfdt32_t\t\tintarray;
+\tfdt32_t\t\tintval;
+};
+''', data)
+
+        dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
+        with open(output) as infile:
+            data = infile.read()
+        self._CheckStrings(C_HEADER + '''
+static struct dtd_sandbox_spl_test dtv_spl_test = {
+\t.intval\t\t\t= 0x1,
+};
+U_BOOT_DEVICE(spl_test) = {
+\t.name\t\t= "sandbox_spl_test",
+\t.platdata\t= &dtv_spl_test,
+\t.platdata_size\t= sizeof(dtv_spl_test),
+};
+
+static struct dtd_sandbox_spl_test dtv_spl_test2 = {
+\t.intarray\t\t= 0x5,
+};
+U_BOOT_DEVICE(spl_test2) = {
+\t.name\t\t= "sandbox_spl_test",
+\t.platdata\t= &dtv_spl_test2,
+\t.platdata_size\t= sizeof(dtv_spl_test2),
+};
+
+''', data)
+
+    def testStdout(self):
+        """Test output to stdout"""
+        dtb_file = get_dtb_file('dtoc_test_simple.dts')
+        with test_util.capture_sys_output() as (stdout, stderr):
+            dtb_platdata.run_steps(['struct'], dtb_file, False, '-')
+
+    def testNoCommand(self):
+        """Test running dtoc without a command"""
+        with self.assertRaises(ValueError) as e:
+            dtb_platdata.run_steps([], '', False, '')
+        self.assertIn("Please specify a command: struct, platdata",
+                      str(e.exception))
+
+    def testBadCommand(self):
+        """Test running dtoc with an invalid command"""
+        dtb_file = get_dtb_file('dtoc_test_simple.dts')
+        output = tools.GetOutputFilename('output')
+        with self.assertRaises(ValueError) as e:
+            dtb_platdata.run_steps(['invalid-cmd'], dtb_file, False, output)
+        self.assertIn("Unknown command 'invalid-cmd': (use: struct, platdata)",
+                      str(e.exception))
diff --git a/tools/u-boot-tools/dtoc/test_fdt b/tools/u-boot-tools/dtoc/test_fdt
new file mode 120000
index 0000000000000000000000000000000000000000..7c3b23031f0ec723165dfeb931ebdee75e8fc7a1
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/test_fdt
@@ -0,0 +1 @@
+test_fdt.py
\ No newline at end of file
diff --git a/tools/u-boot-tools/dtoc/test_fdt.py b/tools/u-boot-tools/dtoc/test_fdt.py
new file mode 100755
index 0000000000000000000000000000000000000000..8d70dd2a294a59f28804d99a04eed9ac2f1ddab8
--- /dev/null
+++ b/tools/u-boot-tools/dtoc/test_fdt.py
@@ -0,0 +1,562 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+from optparse import OptionParser
+import glob
+import os
+import sys
+import unittest
+
+# Bring in the patman libraries
+our_path = os.path.dirname(os.path.realpath(__file__))
+for dirname in ['../patman', '..']:
+    sys.path.insert(0, os.path.join(our_path, dirname))
+
+import command
+import fdt
+from fdt import TYPE_BYTE, TYPE_INT, TYPE_STRING, TYPE_BOOL
+import fdt_util
+from fdt_util import fdt32_to_cpu
+import libfdt
+import test_util
+import tools
+
+def _GetPropertyValue(dtb, node, prop_name):
+    """Low-level function to get the property value based on its offset
+
+    This looks directly in the device tree at the property's offset to find
+    its value. It is useful as a check that the property is in the correct
+    place.
+
+    Args:
+        node: Node to look in
+        prop_name: Property name to find
+
+    Returns:
+        Tuple:
+            Prop object found
+            Value of property as a string (found using property offset)
+    """
+    prop = node.props[prop_name]
+
+    # Add 12, which is sizeof(struct fdt_property), to get to start of data
+    offset = prop.GetOffset() + 12
+    data = dtb.GetContents()[offset:offset + len(prop.value)]
+    return prop, [chr(x) for x in data]
+
+
+class TestFdt(unittest.TestCase):
+    """Tests for the Fdt module
+
+    This includes unit tests for some functions and functional tests for the fdt
+    module.
+    """
+    @classmethod
+    def setUpClass(cls):
+        tools.PrepareOutputDir(None)
+
+    @classmethod
+    def tearDownClass(cls):
+        tools.FinaliseOutputDir()
+
+    def setUp(self):
+        self.dtb = fdt.FdtScan('tools/dtoc/dtoc_test_simple.dts')
+
+    def testFdt(self):
+        """Test that we can open an Fdt"""
+        self.dtb.Scan()
+        root = self.dtb.GetRoot()
+        self.assertTrue(isinstance(root, fdt.Node))
+
+    def testGetNode(self):
+        """Test the GetNode() method"""
+        node = self.dtb.GetNode('/spl-test')
+        self.assertTrue(isinstance(node, fdt.Node))
+        node = self.dtb.GetNode('/i2c@0/pmic@9')
+        self.assertTrue(isinstance(node, fdt.Node))
+        self.assertEqual('pmic@9', node.name)
+        self.assertIsNone(self.dtb.GetNode('/i2c@0/pmic@9/missing'))
+
+    def testFlush(self):
+        """Check that we can flush the device tree out to its file"""
+        fname = self.dtb._fname
+        with open(fname) as fd:
+            data = fd.read()
+        os.remove(fname)
+        with self.assertRaises(IOError):
+            open(fname)
+        self.dtb.Flush()
+        with open(fname) as fd:
+            data = fd.read()
+
+    def testPack(self):
+        """Test that packing a device tree works"""
+        self.dtb.Pack()
+
+    def testGetFdt(self):
+        """Tetst that we can access the raw device-tree data"""
+        self.assertTrue(isinstance(self.dtb.GetContents(), bytearray))
+
+    def testGetProps(self):
+        """Tests obtaining a list of properties"""
+        node = self.dtb.GetNode('/spl-test')
+        props = self.dtb.GetProps(node)
+        self.assertEqual(['boolval', 'bytearray', 'byteval', 'compatible',
+                          'intarray', 'intval', 'longbytearray', 'notstring',
+                          'stringarray', 'stringval', 'u-boot,dm-pre-reloc'],
+                         sorted(props.keys()))
+
+    def testCheckError(self):
+        """Tests the ChecKError() function"""
+        with self.assertRaises(ValueError) as e:
+            fdt.CheckErr(-libfdt.NOTFOUND, 'hello')
+        self.assertIn('FDT_ERR_NOTFOUND: hello', str(e.exception))
+
+    def testGetFdt(self):
+        node = self.dtb.GetNode('/spl-test')
+        self.assertEqual(self.dtb, node.GetFdt())
+
+class TestNode(unittest.TestCase):
+    """Test operation of the Node class"""
+
+    @classmethod
+    def setUpClass(cls):
+        tools.PrepareOutputDir(None)
+
+    @classmethod
+    def tearDownClass(cls):
+        tools.FinaliseOutputDir()
+
+    def setUp(self):
+        self.dtb = fdt.FdtScan('tools/dtoc/dtoc_test_simple.dts')
+        self.node = self.dtb.GetNode('/spl-test')
+
+    def testOffset(self):
+        """Tests that we can obtain the offset of a node"""
+        self.assertTrue(self.node.Offset() > 0)
+
+    def testDelete(self):
+        """Tests that we can delete a property"""
+        node2 = self.dtb.GetNode('/spl-test2')
+        offset1 = node2.Offset()
+        self.node.DeleteProp('intval')
+        offset2 = node2.Offset()
+        self.assertTrue(offset2 < offset1)
+        self.node.DeleteProp('intarray')
+        offset3 = node2.Offset()
+        self.assertTrue(offset3 < offset2)
+        with self.assertRaises(libfdt.FdtException):
+            self.node.DeleteProp('missing')
+
+    def testDeleteGetOffset(self):
+        """Test that property offset update when properties are deleted"""
+        self.node.DeleteProp('intval')
+        prop, value = _GetPropertyValue(self.dtb, self.node, 'longbytearray')
+        self.assertEqual(prop.value, value)
+
+    def testFindNode(self):
+        """Tests that we can find a node using the FindNode() functoin"""
+        node = self.dtb.GetRoot().FindNode('i2c@0')
+        self.assertEqual('i2c@0', node.name)
+        subnode = node.FindNode('pmic@9')
+        self.assertEqual('pmic@9', subnode.name)
+        self.assertEqual(None, node.FindNode('missing'))
+
+    def testRefreshMissingNode(self):
+        """Test refreshing offsets when an extra node is present in dtb"""
+        # Delete it from our tables, not the device tree
+        del self.dtb._root.subnodes[-1]
+        with self.assertRaises(ValueError) as e:
+            self.dtb.Refresh()
+        self.assertIn('Internal error, offset', str(e.exception))
+
+    def testRefreshExtraNode(self):
+        """Test refreshing offsets when an expected node is missing"""
+        # Delete it from the device tre, not our tables
+        self.dtb.GetFdtObj().del_node(self.node.Offset())
+        with self.assertRaises(ValueError) as e:
+            self.dtb.Refresh()
+        self.assertIn('Internal error, node name mismatch '
+                      'spl-test != spl-test2', str(e.exception))
+
+    def testRefreshMissingProp(self):
+        """Test refreshing offsets when an extra property is present in dtb"""
+        # Delete it from our tables, not the device tree
+        del self.node.props['notstring']
+        with self.assertRaises(ValueError) as e:
+            self.dtb.Refresh()
+        self.assertIn("Internal error, property 'notstring' missing, offset ",
+                      str(e.exception))
+
+    def testLookupPhandle(self):
+        """Test looking up a single phandle"""
+        dtb = fdt.FdtScan('tools/dtoc/dtoc_test_phandle.dts')
+        node = dtb.GetNode('/phandle-source2')
+        prop = node.props['clocks']
+        target = dtb.GetNode('/phandle-target')
+        self.assertEqual(target, dtb.LookupPhandle(fdt32_to_cpu(prop.value)))
+
+
+class TestProp(unittest.TestCase):
+    """Test operation of the Prop class"""
+
+    @classmethod
+    def setUpClass(cls):
+        tools.PrepareOutputDir(None)
+
+    @classmethod
+    def tearDownClass(cls):
+        tools.FinaliseOutputDir()
+
+    def setUp(self):
+        self.dtb = fdt.FdtScan('tools/dtoc/dtoc_test_simple.dts')
+        self.node = self.dtb.GetNode('/spl-test')
+        self.fdt = self.dtb.GetFdtObj()
+
+    def testMissingNode(self):
+        self.assertEqual(None, self.dtb.GetNode('missing'))
+
+    def testPhandle(self):
+        dtb = fdt.FdtScan('tools/dtoc/dtoc_test_phandle.dts')
+        node = dtb.GetNode('/phandle-source2')
+        prop = node.props['clocks']
+        self.assertTrue(fdt32_to_cpu(prop.value) > 0)
+
+    def _ConvertProp(self, prop_name):
+        """Helper function to look up a property in self.node and return it
+
+        Args:
+            Property name to find
+
+        Return fdt.Prop object for this property
+        """
+        p = self.fdt.getprop(self.node.Offset(), prop_name)
+        return fdt.Prop(self.node, -1, prop_name, p)
+
+    def testMakeProp(self):
+        """Test we can convert all the the types that are supported"""
+        prop = self._ConvertProp('boolval')
+        self.assertEqual(fdt.TYPE_BOOL, prop.type)
+        self.assertEqual(True, prop.value)
+
+        prop = self._ConvertProp('intval')
+        self.assertEqual(fdt.TYPE_INT, prop.type)
+        self.assertEqual(1, fdt32_to_cpu(prop.value))
+
+        prop = self._ConvertProp('intarray')
+        self.assertEqual(fdt.TYPE_INT, prop.type)
+        val = [fdt32_to_cpu(val) for val in prop.value]
+        self.assertEqual([2, 3, 4], val)
+
+        prop = self._ConvertProp('byteval')
+        self.assertEqual(fdt.TYPE_BYTE, prop.type)
+        self.assertEqual(5, ord(prop.value))
+
+        prop = self._ConvertProp('longbytearray')
+        self.assertEqual(fdt.TYPE_BYTE, prop.type)
+        val = [ord(val) for val in prop.value]
+        self.assertEqual([9, 10, 11, 12, 13, 14, 15, 16, 17], val)
+
+        prop = self._ConvertProp('stringval')
+        self.assertEqual(fdt.TYPE_STRING, prop.type)
+        self.assertEqual('message', prop.value)
+
+        prop = self._ConvertProp('stringarray')
+        self.assertEqual(fdt.TYPE_STRING, prop.type)
+        self.assertEqual(['multi-word', 'message'], prop.value)
+
+        prop = self._ConvertProp('notstring')
+        self.assertEqual(fdt.TYPE_BYTE, prop.type)
+        val = [ord(val) for val in prop.value]
+        self.assertEqual([0x20, 0x21, 0x22, 0x10, 0], val)
+
+    def testGetEmpty(self):
+        """Tests the GetEmpty() function for the various supported types"""
+        self.assertEqual(True, fdt.Prop.GetEmpty(fdt.TYPE_BOOL))
+        self.assertEqual(chr(0), fdt.Prop.GetEmpty(fdt.TYPE_BYTE))
+        self.assertEqual(chr(0) * 4, fdt.Prop.GetEmpty(fdt.TYPE_INT))
+        self.assertEqual('', fdt.Prop.GetEmpty(fdt.TYPE_STRING))
+
+    def testGetOffset(self):
+        """Test we can get the offset of a property"""
+        prop, value = _GetPropertyValue(self.dtb, self.node, 'longbytearray')
+        self.assertEqual(prop.value, value)
+
+    def testWiden(self):
+        """Test widening of values"""
+        node2 = self.dtb.GetNode('/spl-test2')
+        prop = self.node.props['intval']
+
+        # No action
+        prop2 = node2.props['intval']
+        prop.Widen(prop2)
+        self.assertEqual(fdt.TYPE_INT, prop.type)
+        self.assertEqual(1, fdt32_to_cpu(prop.value))
+
+        # Convert singla value to array
+        prop2 = self.node.props['intarray']
+        prop.Widen(prop2)
+        self.assertEqual(fdt.TYPE_INT, prop.type)
+        self.assertTrue(isinstance(prop.value, list))
+
+        # A 4-byte array looks like a single integer. When widened by a longer
+        # byte array, it should turn into an array.
+        prop = self.node.props['longbytearray']
+        prop2 = node2.props['longbytearray']
+        self.assertFalse(isinstance(prop2.value, list))
+        self.assertEqual(4, len(prop2.value))
+        prop2.Widen(prop)
+        self.assertTrue(isinstance(prop2.value, list))
+        self.assertEqual(9, len(prop2.value))
+
+        # Similarly for a string array
+        prop = self.node.props['stringval']
+        prop2 = node2.props['stringarray']
+        self.assertFalse(isinstance(prop.value, list))
+        self.assertEqual(7, len(prop.value))
+        prop.Widen(prop2)
+        self.assertTrue(isinstance(prop.value, list))
+        self.assertEqual(3, len(prop.value))
+
+        # Enlarging an existing array
+        prop = self.node.props['stringarray']
+        prop2 = node2.props['stringarray']
+        self.assertTrue(isinstance(prop.value, list))
+        self.assertEqual(2, len(prop.value))
+        prop.Widen(prop2)
+        self.assertTrue(isinstance(prop.value, list))
+        self.assertEqual(3, len(prop.value))
+
+    def testAdd(self):
+        """Test adding properties"""
+        self.fdt.pack()
+        # This function should automatically expand the device tree
+        self.node.AddZeroProp('one')
+        self.node.AddZeroProp('two')
+        self.node.AddZeroProp('three')
+        self.dtb.Sync(auto_resize=True)
+
+        # Updating existing properties should be OK, since the device-tree size
+        # does not change
+        self.fdt.pack()
+        self.node.SetInt('one', 1)
+        self.node.SetInt('two', 2)
+        self.node.SetInt('three', 3)
+        self.dtb.Sync(auto_resize=False)
+
+        # This should fail since it would need to increase the device-tree size
+        self.node.AddZeroProp('four')
+        with self.assertRaises(libfdt.FdtException) as e:
+            self.dtb.Sync(auto_resize=False)
+        self.assertIn('FDT_ERR_NOSPACE', str(e.exception))
+        self.dtb.Sync(auto_resize=True)
+
+    def testAddNode(self):
+        self.fdt.pack()
+        self.node.AddSubnode('subnode')
+        with self.assertRaises(libfdt.FdtException) as e:
+            self.dtb.Sync(auto_resize=False)
+        self.assertIn('FDT_ERR_NOSPACE', str(e.exception))
+
+        self.dtb.Sync(auto_resize=True)
+        offset = self.fdt.path_offset('/spl-test/subnode')
+        self.assertTrue(offset > 0)
+
+    def testAddMore(self):
+        """Test various other methods for adding and setting properties"""
+        self.node.AddZeroProp('one')
+        self.dtb.Sync(auto_resize=True)
+        data = self.fdt.getprop(self.node.Offset(), 'one')
+        self.assertEqual(0, fdt32_to_cpu(data))
+
+        self.node.SetInt('one', 1)
+        self.dtb.Sync(auto_resize=False)
+        data = self.fdt.getprop(self.node.Offset(), 'one')
+        self.assertEqual(1, fdt32_to_cpu(data))
+
+        val = '123' + chr(0) + '456'
+        self.node.AddString('string', val)
+        self.dtb.Sync(auto_resize=True)
+        data = self.fdt.getprop(self.node.Offset(), 'string')
+        self.assertEqual(val + '\0', data)
+
+        self.fdt.pack()
+        self.node.SetString('string', val + 'x')
+        with self.assertRaises(libfdt.FdtException) as e:
+            self.dtb.Sync(auto_resize=False)
+        self.assertIn('FDT_ERR_NOSPACE', str(e.exception))
+        self.node.SetString('string', val[:-1])
+
+        prop = self.node.props['string']
+        prop.SetData(val)
+        self.dtb.Sync(auto_resize=False)
+        data = self.fdt.getprop(self.node.Offset(), 'string')
+        self.assertEqual(val, data)
+
+        self.node.AddEmptyProp('empty', 5)
+        self.dtb.Sync(auto_resize=True)
+        prop = self.node.props['empty']
+        prop.SetData(val)
+        self.dtb.Sync(auto_resize=False)
+        data = self.fdt.getprop(self.node.Offset(), 'empty')
+        self.assertEqual(val, data)
+
+        self.node.SetData('empty', '123')
+        self.assertEqual('123', prop.bytes)
+
+    def testFromData(self):
+        dtb2 = fdt.Fdt.FromData(self.dtb.GetContents())
+        self.assertEqual(dtb2.GetContents(), self.dtb.GetContents())
+
+        self.node.AddEmptyProp('empty', 5)
+        self.dtb.Sync(auto_resize=True)
+        self.assertTrue(dtb2.GetContents() != self.dtb.GetContents())
+
+
+class TestFdtUtil(unittest.TestCase):
+    """Tests for the fdt_util module
+
+    This module will likely be mostly replaced at some point, once upstream
+    libfdt has better Python support. For now, this provides tests for current
+    functionality.
+    """
+    @classmethod
+    def setUpClass(cls):
+        tools.PrepareOutputDir(None)
+
+    @classmethod
+    def tearDownClass(cls):
+        tools.FinaliseOutputDir()
+
+    def setUp(self):
+        self.dtb = fdt.FdtScan('tools/dtoc/dtoc_test_simple.dts')
+        self.node = self.dtb.GetNode('/spl-test')
+
+    def testGetInt(self):
+        self.assertEqual(1, fdt_util.GetInt(self.node, 'intval'))
+        self.assertEqual(3, fdt_util.GetInt(self.node, 'missing', 3))
+
+        with self.assertRaises(ValueError) as e:
+            self.assertEqual(3, fdt_util.GetInt(self.node, 'intarray'))
+        self.assertIn("property 'intarray' has list value: expecting a single "
+                      'integer', str(e.exception))
+
+    def testGetString(self):
+        self.assertEqual('message', fdt_util.GetString(self.node, 'stringval'))
+        self.assertEqual('test', fdt_util.GetString(self.node, 'missing',
+                                                    'test'))
+
+        with self.assertRaises(ValueError) as e:
+            self.assertEqual(3, fdt_util.GetString(self.node, 'stringarray'))
+        self.assertIn("property 'stringarray' has list value: expecting a "
+                      'single string', str(e.exception))
+
+    def testGetBool(self):
+        self.assertEqual(True, fdt_util.GetBool(self.node, 'boolval'))
+        self.assertEqual(False, fdt_util.GetBool(self.node, 'missing'))
+        self.assertEqual(True, fdt_util.GetBool(self.node, 'missing', True))
+        self.assertEqual(False, fdt_util.GetBool(self.node, 'missing', False))
+
+    def testGetByte(self):
+        self.assertEqual(5, fdt_util.GetByte(self.node, 'byteval'))
+        self.assertEqual(3, fdt_util.GetByte(self.node, 'missing', 3))
+
+        with self.assertRaises(ValueError) as e:
+            fdt_util.GetByte(self.node, 'longbytearray')
+        self.assertIn("property 'longbytearray' has list value: expecting a "
+                      'single byte', str(e.exception))
+
+        with self.assertRaises(ValueError) as e:
+            fdt_util.GetByte(self.node, 'intval')
+        self.assertIn("property 'intval' has length 4, expecting 1",
+                      str(e.exception))
+
+    def testGetPhandleList(self):
+        dtb = fdt.FdtScan('tools/dtoc/dtoc_test_phandle.dts')
+        node = dtb.GetNode('/phandle-source2')
+        self.assertEqual([1], fdt_util.GetPhandleList(node, 'clocks'))
+        node = dtb.GetNode('/phandle-source')
+        self.assertEqual([1, 2, 11, 3, 12, 13, 1],
+                         fdt_util.GetPhandleList(node, 'clocks'))
+        self.assertEqual(None, fdt_util.GetPhandleList(node, 'missing'))
+
+    def testGetDataType(self):
+        self.assertEqual(1, fdt_util.GetDatatype(self.node, 'intval', int))
+        self.assertEqual('message', fdt_util.GetDatatype(self.node, 'stringval',
+                                                         str))
+        with self.assertRaises(ValueError) as e:
+            self.assertEqual(3, fdt_util.GetDatatype(self.node, 'boolval',
+                                                     bool))
+    def testFdtCellsToCpu(self):
+        val = self.node.props['intarray'].value
+        self.assertEqual(0, fdt_util.fdt_cells_to_cpu(val, 0))
+        self.assertEqual(2, fdt_util.fdt_cells_to_cpu(val, 1))
+
+        dtb2 = fdt.FdtScan('tools/dtoc/dtoc_test_addr64.dts')
+        node2 = dtb2.GetNode('/test1')
+        val = node2.props['reg'].value
+        self.assertEqual(0x1234, fdt_util.fdt_cells_to_cpu(val, 2))
+
+    def testEnsureCompiled(self):
+        """Test a degenerate case of this function"""
+        dtb = fdt_util.EnsureCompiled('tools/dtoc/dtoc_test_simple.dts')
+        self.assertEqual(dtb, fdt_util.EnsureCompiled(dtb))
+
+    def testGetPlainBytes(self):
+        self.assertEqual('fred', fdt_util.get_plain_bytes('fred'))
+
+
+def RunTestCoverage():
+    """Run the tests and check that we get 100% coverage"""
+    test_util.RunTestCoverage('tools/dtoc/test_fdt.py', None,
+            ['tools/patman/*.py', '*test_fdt.py'], options.build_dir)
+
+
+def RunTests(args):
+    """Run all the test we have for the fdt model
+
+    Args:
+        args: List of positional args provided to fdt. This can hold a test
+            name to execute (as in 'fdt -t testFdt', for example)
+    """
+    result = unittest.TestResult()
+    sys.argv = [sys.argv[0]]
+    test_name = args and args[0] or None
+    for module in (TestFdt, TestNode, TestProp, TestFdtUtil):
+        if test_name:
+            try:
+                suite = unittest.TestLoader().loadTestsFromName(test_name, module)
+            except AttributeError:
+                continue
+        else:
+            suite = unittest.TestLoader().loadTestsFromTestCase(module)
+        suite.run(result)
+
+    print result
+    for _, err in result.errors:
+        print err
+    for _, err in result.failures:
+        print err
+
+if __name__ != '__main__':
+    sys.exit(1)
+
+parser = OptionParser()
+parser.add_option('-B', '--build-dir', type='string', default='b',
+        help='Directory containing the build output')
+parser.add_option('-P', '--processes', type=int,
+                  help='set number of processes to use for running tests')
+parser.add_option('-t', '--test', action='store_true', dest='test',
+                  default=False, help='run tests')
+parser.add_option('-T', '--test-coverage', action='store_true',
+                default=False, help='run tests and check for 100% coverage')
+(options, args) = parser.parse_args()
+
+# Run our meagre tests
+if options.test:
+    RunTests(args)
+elif options.test_coverage:
+    RunTestCoverage()
diff --git a/tools/u-boot-tools/dumpimage b/tools/u-boot-tools/dumpimage
new file mode 100755
index 0000000000000000000000000000000000000000..ab612e30efe68ade449ca7b809ec133889ed006a
Binary files /dev/null and b/tools/u-boot-tools/dumpimage differ
diff --git a/tools/u-boot-tools/dumpimage.c b/tools/u-boot-tools/dumpimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..7115df04c120ee4a49b6626ffde62465b518f3b5
--- /dev/null
+++ b/tools/u-boot-tools/dumpimage.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on mkimage.c.
+ *
+ * Written by Guilherme Maciel Ferreira <guilherme.maciel.ferreira@gmail.com>
+ */
+
+#include "dumpimage.h"
+#include <image.h>
+#include <version.h>
+
+static void usage(void);
+
+/* parameters initialized by core will be used by the image type code */
+static struct image_tool_params params = {
+	.type = IH_TYPE_KERNEL,
+};
+
+/*
+ * dumpimage_extract_subimage -
+ *
+ * It scans all registered image types,
+ * verifies image_header for each supported image type
+ * if verification is successful, it extracts the desired file,
+ * indexed by pflag, from the image
+ *
+ * returns negative if input image format does not match with any of
+ * supported image types
+ */
+static int dumpimage_extract_subimage(struct image_type_params *tparams,
+		void *ptr, struct stat *sbuf)
+{
+	int retval = -1;
+
+	if (tparams->verify_header) {
+		retval = tparams->verify_header((unsigned char *)ptr,
+				sbuf->st_size, &params);
+		if (retval != 0)
+			return -1;
+		/*
+		 * Extract the file from the image
+		 * if verify is successful
+		 */
+		if (tparams->extract_subimage) {
+			retval = tparams->extract_subimage(ptr, &params);
+		} else {
+			fprintf(stderr,
+				"%s: extract_subimage undefined for %s\n",
+				params.cmdname, tparams->name);
+			return -2;
+		}
+	}
+
+	return retval;
+}
+
+int main(int argc, char **argv)
+{
+	int opt;
+	int ifd = -1;
+	struct stat sbuf;
+	char *ptr;
+	int retval = 0;
+	struct image_type_params *tparams = NULL;
+
+	params.cmdname = *argv;
+
+	while ((opt = getopt(argc, argv, "li:o:T:p:V")) != -1) {
+		switch (opt) {
+		case 'l':
+			params.lflag = 1;
+			break;
+		case 'i':
+			params.imagefile = optarg;
+			params.iflag = 1;
+			break;
+		case 'o':
+			params.outfile = optarg;
+			break;
+		case 'T':
+			params.type = genimg_get_type_id(optarg);
+			if (params.type < 0) {
+				usage();
+			}
+			break;
+		case 'p':
+			params.pflag = strtoul(optarg, &ptr, 10);
+			if (*ptr) {
+				fprintf(stderr,
+					"%s: invalid file position %s\n",
+					params.cmdname, *argv);
+				exit(EXIT_FAILURE);
+			}
+			break;
+		case 'V':
+			printf("dumpimage version %s\n", PLAIN_VERSION);
+			exit(EXIT_SUCCESS);
+		default:
+			usage();
+			break;
+		}
+	}
+
+	if (optind >= argc)
+		usage();
+
+	/* set tparams as per input type_id */
+	tparams = imagetool_get_type(params.type);
+	if (tparams == NULL) {
+		fprintf(stderr, "%s: unsupported type: %s\n",
+			params.cmdname, genimg_get_type_name(params.type));
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * check the passed arguments parameters meets the requirements
+	 * as per image type to be generated/listed
+	 */
+	if (tparams->check_params) {
+		if (tparams->check_params(&params))
+			usage();
+	}
+
+	if (params.iflag)
+		params.datafile = argv[optind];
+	else
+		params.imagefile = argv[optind];
+	if (!params.outfile)
+		params.outfile = params.datafile;
+
+	ifd = open(params.imagefile, O_RDONLY|O_BINARY);
+	if (ifd < 0) {
+		fprintf(stderr, "%s: Can't open \"%s\": %s\n",
+			params.cmdname, params.imagefile,
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (params.lflag || params.iflag) {
+		if (fstat(ifd, &sbuf) < 0) {
+			fprintf(stderr, "%s: Can't stat \"%s\": %s\n",
+				params.cmdname, params.imagefile,
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		if ((uint32_t)sbuf.st_size < tparams->header_size) {
+			fprintf(stderr,
+				"%s: Bad size: \"%s\" is not valid image\n",
+				params.cmdname, params.imagefile);
+			exit(EXIT_FAILURE);
+		}
+
+		ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, ifd, 0);
+		if (ptr == MAP_FAILED) {
+			fprintf(stderr, "%s: Can't read \"%s\": %s\n",
+				params.cmdname, params.imagefile,
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		/*
+		 * Both calls bellow scan through dumpimage registry for all
+		 * supported image types and verify the input image file
+		 * header for match
+		 */
+		if (params.iflag) {
+			/*
+			 * Extract the data files from within the matched
+			 * image type. Returns the error code if not matched
+			 */
+			retval = dumpimage_extract_subimage(tparams, ptr,
+					&sbuf);
+		} else {
+			/*
+			 * Print the image information for matched image type
+			 * Returns the error code if not matched
+			 */
+			retval = imagetool_verify_print_header(ptr, &sbuf,
+					tparams, &params);
+		}
+
+		(void)munmap((void *)ptr, sbuf.st_size);
+		(void)close(ifd);
+
+		return retval;
+	}
+
+	(void)close(ifd);
+
+	return EXIT_SUCCESS;
+}
+
+static void usage(void)
+{
+	fprintf(stderr, "Usage: %s -l image\n"
+		"          -l ==> list image header information\n",
+		params.cmdname);
+	fprintf(stderr,
+		"       %s -i image -T type [-p position] [-o outfile] data_file\n"
+		"          -i ==> extract from the 'image' a specific 'data_file'\n"
+		"          -T ==> set image type to 'type'\n"
+		"          -p ==> 'position' (starting at 0) of the 'data_file' inside the 'image'\n",
+		params.cmdname);
+	fprintf(stderr,
+		"       %s -V ==> print version information and exit\n",
+		params.cmdname);
+
+	exit(EXIT_FAILURE);
+}
diff --git a/tools/u-boot-tools/dumpimage.h b/tools/u-boot-tools/dumpimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..e31d163292eebea3f62a3953a24355e4f3cd58f3
--- /dev/null
+++ b/tools/u-boot-tools/dumpimage.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Based on mkimage.c.
+ *
+ * Written by Guilherme Maciel Ferreira <guilherme.maciel.ferreira@gmail.com>
+ */
+
+#ifndef _DUMPIMAGE_H_
+#define _DUMPIMAGE_H_
+
+#include "os_support.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <u-boot/sha1.h>
+#include "fdt_host.h"
+#include "imagetool.h"
+
+#undef DUMPIMAGE_DEBUG
+
+#ifdef DUMPIMAGE_DEBUG
+#define debug(fmt, args...)	printf(fmt, ##args)
+#else
+#define debug(fmt, args...)
+#endif /* DUMPIMAGE_DEBUG */
+
+#endif /* _DUMPIMAGE_H_ */
diff --git a/tools/u-boot-tools/dumpimage.o b/tools/u-boot-tools/dumpimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..980bb609b57b8b21d049f1350bb4df3cac783fc8
Binary files /dev/null and b/tools/u-boot-tools/dumpimage.o differ
diff --git a/tools/u-boot-tools/easylogo/Makefile b/tools/u-boot-tools/easylogo/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..9278837f5894cab1955ebbd8a7f803091c1bb211
--- /dev/null
+++ b/tools/u-boot-tools/easylogo/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+hostprogs-y := easylogo
+
+always := $(hostprogs-y)
diff --git a/tools/u-boot-tools/easylogo/easylogo.c b/tools/u-boot-tools/easylogo/easylogo.c
new file mode 100644
index 0000000000000000000000000000000000000000..4ba86bf76076fa0886dbdc4782d15c319c34f3db
--- /dev/null
+++ b/tools/u-boot-tools/easylogo/easylogo.c
@@ -0,0 +1,610 @@
+/*
+** Easylogo TGA->header converter
+** ==============================
+** (C) 2000 by Paolo Scaffardi (arsenio@tin.it)
+** AIRVENT SAM s.p.a - RIMINI(ITALY)
+** (C) 2007-2008 Mike Frysinger <vapier@gentoo.org>
+**
+** This is still under construction!
+*/
+
+#include <errno.h>
+#include <getopt.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+#pragma pack(1)
+
+/*#define ENABLE_ASCII_BANNERS */
+
+typedef struct {
+	unsigned char id;
+	unsigned char ColorMapType;
+	unsigned char ImageTypeCode;
+	unsigned short ColorMapOrigin;
+	unsigned short ColorMapLenght;
+	unsigned char ColorMapEntrySize;
+	unsigned short ImageXOrigin;
+	unsigned short ImageYOrigin;
+	unsigned short ImageWidth;
+	unsigned short ImageHeight;
+	unsigned char ImagePixelSize;
+	unsigned char ImageDescriptorByte;
+} tga_header_t;
+
+typedef struct {
+	unsigned char r, g, b;
+} rgb_t;
+
+typedef struct {
+	unsigned char b, g, r;
+} bgr_t;
+
+typedef struct {
+	unsigned char Cb, y1, Cr, y2;
+} yuyv_t;
+
+typedef struct {
+	void *data, *palette;
+	int width, height, pixels, bpp, pixel_size, size, palette_size, yuyv;
+} image_t;
+
+void *xmalloc (size_t size)
+{
+	void *ret = malloc (size);
+	if (!ret) {
+		fprintf (stderr, "\nerror: malloc(%zu) failed: %s",
+			size, strerror(errno));
+		exit (1);
+	}
+	return ret;
+}
+
+void StringUpperCase (char *str)
+{
+	int count = strlen (str);
+	char c;
+
+	while (count--) {
+		c = *str;
+		if ((c >= 'a') && (c <= 'z'))
+			*str = 'A' + (c - 'a');
+		str++;
+	}
+}
+
+void StringLowerCase (char *str)
+{
+	int count = strlen (str);
+	char c;
+
+	while (count--) {
+		c = *str;
+		if ((c >= 'A') && (c <= 'Z'))
+			*str = 'a' + (c - 'A');
+		str++;
+	}
+}
+void pixel_rgb_to_yuyv (rgb_t * rgb_pixel, yuyv_t * yuyv_pixel)
+{
+	unsigned int pR, pG, pB;
+
+	/* Transform (0-255) components to (0-100) */
+	pR = rgb_pixel->r * 100 / 255;
+	pG = rgb_pixel->g * 100 / 255;
+	pB = rgb_pixel->b * 100 / 255;
+
+	/* Calculate YUV values (0-255) from RGB beetween 0-100 */
+	yuyv_pixel->y1 = yuyv_pixel->y2 = 209 * (pR + pG + pB) / 300 + 16;
+	yuyv_pixel->Cb = pB - (pR / 4) - (pG * 3 / 4) + 128;
+	yuyv_pixel->Cr = pR - (pG * 3 / 4) - (pB / 4) + 128;
+
+	return;
+}
+
+void printlogo_rgb (rgb_t * data, int w, int h)
+{
+	int x, y;
+
+	for (y = 0; y < h; y++) {
+		for (x = 0; x < w; x++, data++)
+			if ((data->r <
+			     30) /*&&(data->g == 0)&&(data->b == 0) */ )
+				printf (" ");
+			else
+				printf ("X");
+		printf ("\n");
+	}
+}
+
+void printlogo_yuyv (unsigned short *data, int w, int h)
+{
+	int x, y;
+
+	for (y = 0; y < h; y++) {
+		for (x = 0; x < w; x++, data++)
+			if (*data == 0x1080)	/* Because of inverted on i386! */
+				printf (" ");
+			else
+				printf ("X");
+		printf ("\n");
+	}
+}
+
+static inline unsigned short le16_to_cpu (unsigned short val)
+{
+	union {
+		unsigned char pval[2];
+		unsigned short val;
+	} swapped;
+
+	swapped.val = val;
+	return (swapped.pval[1] << 8) + swapped.pval[0];
+}
+
+int image_load_tga (image_t * image, char *filename)
+{
+	FILE *file;
+	tga_header_t header;
+	int i;
+	unsigned char app;
+	rgb_t *p;
+
+	if ((file = fopen (filename, "rb")) == NULL)
+		return -1;
+
+	fread (&header, sizeof (header), 1, file);
+
+	/* byte swap: tga is little endian, host is ??? */
+	header.ColorMapOrigin = le16_to_cpu (header.ColorMapOrigin);
+	header.ColorMapLenght = le16_to_cpu (header.ColorMapLenght);
+	header.ImageXOrigin = le16_to_cpu (header.ImageXOrigin);
+	header.ImageYOrigin = le16_to_cpu (header.ImageYOrigin);
+	header.ImageWidth = le16_to_cpu (header.ImageWidth);
+	header.ImageHeight = le16_to_cpu (header.ImageHeight);
+
+	image->width = header.ImageWidth;
+	image->height = header.ImageHeight;
+
+	switch (header.ImageTypeCode) {
+	case 2:		/* Uncompressed RGB */
+		image->yuyv = 0;
+		image->palette_size = 0;
+		image->palette = NULL;
+		break;
+
+	default:
+		printf ("Format not supported!\n");
+		return -1;
+	}
+
+	image->bpp = header.ImagePixelSize;
+	image->pixel_size = ((image->bpp - 1) / 8) + 1;
+	image->pixels = image->width * image->height;
+	image->size = image->pixels * image->pixel_size;
+	image->data = xmalloc (image->size);
+
+	if (image->bpp != 24) {
+		printf ("Bpp not supported: %d!\n", image->bpp);
+		return -1;
+	}
+
+	fread (image->data, image->size, 1, file);
+
+/* Swapping R and B values */
+
+	p = image->data;
+	for (i = 0; i < image->pixels; i++, p++) {
+		app = p->r;
+		p->r = p->b;
+		p->b = app;
+	}
+
+/* Swapping image */
+
+	if (!(header.ImageDescriptorByte & 0x20)) {
+		unsigned char *temp = xmalloc (image->size);
+		int linesize = image->pixel_size * image->width;
+		void *dest = image->data,
+			*source = temp + image->size - linesize;
+
+		printf ("S");
+		if (temp == NULL) {
+			printf ("Cannot alloc temp buffer!\n");
+			return -1;
+		}
+
+		memcpy (temp, image->data, image->size);
+		for (i = 0; i < image->height;
+		     i++, dest += linesize, source -= linesize)
+			memcpy (dest, source, linesize);
+
+		free (temp);
+	}
+#ifdef ENABLE_ASCII_BANNERS
+	printlogo_rgb (image->data, image->width, image->height);
+#endif
+
+	fclose (file);
+	return 0;
+}
+
+void image_free (image_t * image)
+{
+	free (image->data);
+	free (image->palette);
+}
+
+int image_rgb_to_yuyv (image_t * rgb_image, image_t * yuyv_image)
+{
+	rgb_t *rgb_ptr = (rgb_t *) rgb_image->data;
+	yuyv_t yuyv;
+	unsigned short *dest;
+	int count = 0;
+
+	yuyv_image->pixel_size = 2;
+	yuyv_image->bpp = 16;
+	yuyv_image->yuyv = 1;
+	yuyv_image->width = rgb_image->width;
+	yuyv_image->height = rgb_image->height;
+	yuyv_image->pixels = yuyv_image->width * yuyv_image->height;
+	yuyv_image->size = yuyv_image->pixels * yuyv_image->pixel_size;
+	dest = (unsigned short *) (yuyv_image->data =
+				   xmalloc (yuyv_image->size));
+	yuyv_image->palette = 0;
+	yuyv_image->palette_size = 0;
+
+	while ((count++) < rgb_image->pixels) {
+		pixel_rgb_to_yuyv (rgb_ptr++, &yuyv);
+
+		if ((count & 1) == 0)	/* Was == 0 */
+			memcpy (dest, ((void *) &yuyv) + 2, sizeof (short));
+		else
+			memcpy (dest, (void *) &yuyv, sizeof (short));
+
+		dest++;
+	}
+
+#ifdef ENABLE_ASCII_BANNERS
+	printlogo_yuyv (yuyv_image->data, yuyv_image->width,
+			yuyv_image->height);
+#endif
+	return 0;
+}
+
+int image_rgb888_to_rgb565(image_t *rgb888_image, image_t *rgb565_image)
+{
+	rgb_t *rgb_ptr = (rgb_t *) rgb888_image->data;
+	unsigned short *dest;
+	int count = 0;
+
+	rgb565_image->pixel_size = 2;
+	rgb565_image->bpp = 16;
+	rgb565_image->yuyv = 0;
+	rgb565_image->width = rgb888_image->width;
+	rgb565_image->height = rgb888_image->height;
+	rgb565_image->pixels = rgb565_image->width * rgb565_image->height;
+	rgb565_image->size = rgb565_image->pixels * rgb565_image->pixel_size;
+	dest = (unsigned short *) (rgb565_image->data =
+				   xmalloc(rgb565_image->size));
+	rgb565_image->palette = 0;
+	rgb565_image->palette_size = 0;
+
+	while ((count++) < rgb888_image->pixels) {
+
+		*dest++ = ((rgb_ptr->b & 0xF8) << 8) |
+			((rgb_ptr->g & 0xFC) << 3) |
+			(rgb_ptr->r >> 3);
+		rgb_ptr++;
+	}
+
+	return 0;
+}
+
+enum comp_t {
+	COMP_NONE,
+	COMP_GZIP,
+	COMP_LZMA,
+};
+static enum comp_t compression = COMP_NONE;
+static bool bss_storage = false;
+
+int image_save_header (image_t * image, char *filename, char *varname)
+{
+	FILE *file = fopen (filename, "w");
+	char app[256], str[256] = "", def_name[64];
+	int count = image->size, col = 0;
+	unsigned char *dataptr = image->data;
+
+	if (file == NULL)
+		return -1;
+
+	/*  Author information */
+	fprintf (file,
+		 "/*\n * Generated by EasyLogo, (C) 2000 by Paolo Scaffardi\n *\n");
+	fprintf (file,
+		 " * To use this, include it and call: easylogo_plot(screen,&%s, width,x,y)\n *\n",
+		 varname);
+	fprintf (file,
+		 " * Where:\t'screen'\tis the pointer to the frame buffer\n");
+	fprintf (file, " *\t\t'width'\tis the screen width\n");
+	fprintf (file, " *\t\t'x'\t\tis the horizontal position\n");
+	fprintf (file, " *\t\t'y'\t\tis the vertical position\n */\n\n");
+
+	/* image compress */
+	if (compression != COMP_NONE) {
+		const char *errstr = NULL;
+		unsigned char *compressed;
+		const char *comp_name;
+		struct stat st;
+		FILE *compfp;
+		size_t filename_len = strlen(filename);
+		char *compfilename = xmalloc(filename_len + 20);
+		char *compcmd = xmalloc(filename_len + 50);
+
+		sprintf(compfilename, "%s.bin", filename);
+		switch (compression) {
+		case COMP_GZIP:
+			strcpy(compcmd, "gzip");
+			comp_name = "GZIP";
+			break;
+		case COMP_LZMA:
+			strcpy(compcmd, "lzma");
+			comp_name = "LZMA";
+			break;
+		default:
+			errstr = "\nerror: unknown compression method";
+			goto done;
+		}
+		strcat(compcmd, " > ");
+		strcat(compcmd, compfilename);
+		compfp = popen(compcmd, "w");
+		if (!compfp) {
+			errstr = "\nerror: popen() failed";
+			goto done;
+		}
+		if (fwrite(image->data, image->size, 1, compfp) != 1) {
+			errstr = "\nerror: writing data to gzip failed";
+			goto done;
+		}
+		if (pclose(compfp)) {
+			errstr = "\nerror: gzip process failed";
+			goto done;
+		}
+
+		compfp = fopen(compfilename, "r");
+		if (!compfp) {
+			errstr = "\nerror: open() on gzip data failed";
+			goto done;
+		}
+		if (stat(compfilename, &st)) {
+			errstr = "\nerror: stat() on gzip file failed";
+			goto done;
+		}
+		compressed = xmalloc(st.st_size);
+		if (fread(compressed, st.st_size, 1, compfp) != 1) {
+			errstr = "\nerror: reading gzip data failed";
+			goto done;
+		}
+		fclose(compfp);
+
+		unlink(compfilename);
+
+		dataptr = compressed;
+		count = st.st_size;
+		fprintf(file, "#define EASYLOGO_ENABLE_%s %i\n\n", comp_name, count);
+		if (bss_storage)
+			fprintf (file, "static unsigned char EASYLOGO_DECOMP_BUFFER[%i];\n\n", image->size);
+
+ done:
+		free(compfilename);
+		free(compcmd);
+
+		if (errstr) {
+			perror (errstr);
+			return -1;
+		}
+	}
+
+	/*	Headers */
+	fprintf (file, "#include <video_easylogo.h>\n\n");
+	/*	Macros */
+	strcpy (def_name, varname);
+	StringUpperCase (def_name);
+	fprintf (file, "#define	DEF_%s_WIDTH\t\t%d\n", def_name,
+		 image->width);
+	fprintf (file, "#define	DEF_%s_HEIGHT\t\t%d\n", def_name,
+		 image->height);
+	fprintf (file, "#define	DEF_%s_PIXELS\t\t%d\n", def_name,
+		 image->pixels);
+	fprintf (file, "#define	DEF_%s_BPP\t\t%d\n", def_name, image->bpp);
+	fprintf (file, "#define	DEF_%s_PIXEL_SIZE\t%d\n", def_name,
+		 image->pixel_size);
+	fprintf (file, "#define	DEF_%s_SIZE\t\t%d\n\n", def_name,
+		 image->size);
+	/*  Declaration */
+	fprintf (file, "unsigned char DEF_%s_DATA[] = {\n",
+		 def_name);
+
+	/*	Data */
+	while (count)
+		switch (col) {
+		case 0:
+			sprintf (str, " 0x%02x", *dataptr++);
+			col++;
+			count--;
+			break;
+
+		case 16:
+			fprintf (file, "%s", str);
+			if (count > 0)
+				fprintf (file, ",");
+			fprintf (file, "\n");
+
+			col = 0;
+			break;
+
+		default:
+			strcpy (app, str);
+			sprintf (str, "%s, 0x%02x", app, *dataptr++);
+			col++;
+			count--;
+			break;
+		}
+
+	if (col)
+		fprintf (file, "%s\n", str);
+
+	/*	End of declaration */
+	fprintf (file, "};\n\n");
+	/*	Variable */
+	fprintf (file, "fastimage_t %s = {\n", varname);
+	fprintf (file, "		DEF_%s_DATA,\n", def_name);
+	fprintf (file, "		DEF_%s_WIDTH,\n", def_name);
+	fprintf (file, "		DEF_%s_HEIGHT,\n", def_name);
+	fprintf (file, "		DEF_%s_BPP,\n", def_name);
+	fprintf (file, "		DEF_%s_PIXEL_SIZE,\n", def_name);
+	fprintf (file, "		DEF_%s_SIZE\n};\n", def_name);
+
+	fclose (file);
+
+	return 0;
+}
+
+#define DEF_FILELEN	256
+
+static void usage (int exit_status)
+{
+	puts (
+		"EasyLogo 1.0 (C) 2000 by Paolo Scaffardi\n"
+		"\n"
+		"Syntax:	easylogo [options] inputfile [outputvar [outputfile]]\n"
+		"\n"
+		"Options:\n"
+		"  -r     Output RGB888 instead of YUYV\n"
+		"  -s     Output RGB565 instead of YUYV\n"
+		"  -g     Compress with gzip\n"
+		"  -l     Compress with lzma\n"
+		"  -b     Preallocate space in bss for decompressing image\n"
+		"  -h     Help output\n"
+		"\n"
+		"Where: 'inputfile'   is the TGA image to load\n"
+		"       'outputvar'   is the variable name to create\n"
+		"       'outputfile'  is the output header file (default is 'inputfile.h')"
+	);
+	exit (exit_status);
+}
+
+int main (int argc, char *argv[])
+{
+	int c;
+	bool use_rgb888 = false;
+	bool use_rgb565 = false;
+	char inputfile[DEF_FILELEN],
+		outputfile[DEF_FILELEN], varname[DEF_FILELEN];
+
+	image_t rgb888_logo, rgb565_logo, yuyv_logo;
+
+	while ((c = getopt(argc, argv, "hrsglb")) > 0) {
+		switch (c) {
+		case 'h':
+			usage (0);
+			break;
+		case 'r':
+			use_rgb888 = true;
+			puts("Using 24-bit RGB888 Output Fromat");
+			break;
+		case 's':
+			use_rgb565 = true;
+			puts("Using 16-bit RGB565 Output Fromat");
+			break;
+		case 'g':
+			compression = COMP_GZIP;
+			puts("Compressing with gzip");
+			break;
+		case 'l':
+			compression = COMP_LZMA;
+			puts("Compressing with lzma");
+			break;
+		case 'b':
+			bss_storage = true;
+			puts("Preallocating bss space for decompressing image");
+			break;
+		default:
+			usage (1);
+			break;
+		}
+	}
+
+	c = argc - optind;
+	if (c > 4 || c < 1)
+		usage (1);
+
+	strcpy (inputfile, argv[optind]);
+
+	if (c > 1)
+		strcpy (varname, argv[optind + 1]);
+	else {
+		/* transform "input.tga" to just "input" */
+		char *dot;
+		strcpy (varname, inputfile);
+		dot = strchr (varname, '.');
+		if (dot)
+			*dot = '\0';
+	}
+
+	if (c > 2)
+		strcpy (outputfile, argv[optind + 2]);
+	else {
+		/* just append ".h" to input file name */
+		strcpy (outputfile, inputfile);
+		strcat (outputfile, ".h");
+	}
+
+	/* Make sure the output is sent as soon as we printf() */
+	setbuf(stdout, NULL);
+
+	printf ("Doing '%s' (%s) from '%s'...",
+		outputfile, varname, inputfile);
+
+	/* Import TGA logo */
+
+	printf ("L");
+	if (image_load_tga(&rgb888_logo, inputfile) < 0) {
+		printf ("input file not found!\n");
+		exit (1);
+	}
+
+	/* Convert, save, and free the image */
+
+	if (!use_rgb888 && !use_rgb565) {
+		printf ("C");
+		image_rgb_to_yuyv(&rgb888_logo, &yuyv_logo);
+
+		printf("S");
+		image_save_header(&yuyv_logo, outputfile, varname);
+		image_free(&yuyv_logo);
+	} else if (use_rgb565) {
+		printf("C");
+		image_rgb888_to_rgb565(&rgb888_logo, &rgb565_logo);
+
+		printf("S");
+		image_save_header(&rgb565_logo, outputfile, varname);
+		image_free(&rgb565_logo);
+	} else {
+		printf("S");
+		image_save_header(&rgb888_logo, outputfile, varname);
+	}
+
+	/* Free original image and copy */
+
+	image_free(&rgb888_logo);
+
+	printf ("\n");
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/easylogo/linux_blackfin.tga b/tools/u-boot-tools/easylogo/linux_blackfin.tga
new file mode 100644
index 0000000000000000000000000000000000000000..e2bb17b80b03b9c76a82f45cb062dcb195ad615b
Binary files /dev/null and b/tools/u-boot-tools/easylogo/linux_blackfin.tga differ
diff --git a/tools/u-boot-tools/easylogo/linux_logo.tga b/tools/u-boot-tools/easylogo/linux_logo.tga
new file mode 100644
index 0000000000000000000000000000000000000000..ac53def05c2790b429cb4ea8957eaaf362fc881f
Binary files /dev/null and b/tools/u-boot-tools/easylogo/linux_logo.tga differ
diff --git a/tools/u-boot-tools/easylogo/runme.sh b/tools/u-boot-tools/easylogo/runme.sh
new file mode 100644
index 0000000000000000000000000000000000000000..625ebaae19f420116c2c36bac03bec494049c415
--- /dev/null
+++ b/tools/u-boot-tools/easylogo/runme.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+make
+./easylogo linux_logo.tga u_boot_logo video_logo.h
+mv video_logo.h ../../include
diff --git a/tools/u-boot-tools/env/.embedded.o.cmd b/tools/u-boot-tools/env/.embedded.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..a986ff253f64616f5ce42a31a34b603b3722e570
--- /dev/null
+++ b/tools/u-boot-tools/env/.embedded.o.cmd
@@ -0,0 +1,287 @@
+cmd_tools/env/embedded.o := cc -Wp,-MD,tools/env/.embedded.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/env/embedded.o tools/env/embedded.c
+
+source_tools/env/embedded.o := tools/env/embedded.c
+
+deps_tools/env/embedded.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../env/embedded.c \
+    $(wildcard include/config/build/envcrc.h) \
+    $(wildcard include/config/env/addr/redund.h) \
+    $(wildcard include/config/env/offset.h) \
+  include/config.h \
+    $(wildcard include/config/boarddir.h) \
+  include/config_defaults.h \
+    $(wildcard include/config/defaults/h/.h) \
+    $(wildcard include/config/bootm/linux.h) \
+    $(wildcard include/config/bootm/netbsd.h) \
+    $(wildcard include/config/bootm/plan9.h) \
+    $(wildcard include/config/bootm/rtems.h) \
+    $(wildcard include/config/bootm/vxworks.h) \
+    $(wildcard include/config/gzip.h) \
+    $(wildcard include/config/zlib.h) \
+  include/config_uncmd_spl.h \
+    $(wildcard include/config/uncmd/spl/h//.h) \
+    $(wildcard include/config/spl/build.h) \
+    $(wildcard include/config/spl/dm.h) \
+    $(wildcard include/config/dm/serial.h) \
+    $(wildcard include/config/dm/gpio.h) \
+    $(wildcard include/config/dm/i2c.h) \
+    $(wildcard include/config/dm/spi.h) \
+    $(wildcard include/config/dm/warn.h) \
+    $(wildcard include/config/dm/stdio.h) \
+  include/configs/zynq-common.h \
+    $(wildcard include/config/zynq/common/h.h) \
+    $(wildcard include/config/cpu/freq/hz.h) \
+    $(wildcard include/config/remake/elf.h) \
+    $(wildcard include/config/sys/l2cache/off.h) \
+    $(wildcard include/config/sys/l2/pl310.h) \
+    $(wildcard include/config/sys/pl310/base.h) \
+    $(wildcard include/config/sys/timerbase.h) \
+    $(wildcard include/config/sys/timer/counts/down.h) \
+    $(wildcard include/config/sys/timer/counter.h) \
+    $(wildcard include/config/sys/baudrate/table.h) \
+    $(wildcard include/config/arm/dcc.h) \
+    $(wildcard include/config/zynq/gem.h) \
+    $(wildcard include/config/sys/fault/echo/link/down.h) \
+    $(wildcard include/config/bootp/may/fail.h) \
+    $(wildcard include/config/zynq/qspi.h) \
+    $(wildcard include/config/sf/default/speed.h) \
+    $(wildcard include/config/mtd/nor/flash.h) \
+    $(wildcard include/config/sys/flash/base.h) \
+    $(wildcard include/config/sys/flash/size.h) \
+    $(wildcard include/config/sys/max/flash/banks.h) \
+    $(wildcard include/config/sys/max/flash/sect.h) \
+    $(wildcard include/config/sys/flash/erase/tout.h) \
+    $(wildcard include/config/sys/flash/write/tout.h) \
+    $(wildcard include/config/flash/show/progress.h) \
+    $(wildcard include/config/sys/flash/empty/info.h) \
+    $(wildcard include/config/nand/zynq.h) \
+    $(wildcard include/config/sys/max/nand/device.h) \
+    $(wildcard include/config/sys/nand/onfi/detection.h) \
+    $(wildcard include/config/usb/ehci/zynq.h) \
+    $(wildcard include/config/ehci/is/tdi.h) \
+    $(wildcard include/config/sys/dfu/data/buf/size.h) \
+    $(wildcard include/config/thor/reset/off.h) \
+    $(wildcard include/config/mmc/sdhci/zynq.h) \
+    $(wildcard include/config/sys/i2c/zynq.h) \
+    $(wildcard include/config/sys/i2c.h) \
+    $(wildcard include/config/env/is/in/eeprom.h) \
+    $(wildcard include/config/sys/i2c/eeprom/addr/len.h) \
+    $(wildcard include/config/sys/i2c/eeprom/addr.h) \
+    $(wildcard include/config/sys/eeprom/page/write/bits.h) \
+    $(wildcard include/config/sys/eeprom/page/write/delay/ms.h) \
+    $(wildcard include/config/sys/eeprom/size.h) \
+    $(wildcard include/config/sys/i2c/mux/addr.h) \
+    $(wildcard include/config/sys/i2c/mux/eeprom/sel.h) \
+    $(wildcard include/config/extra/env/settings.h) \
+    $(wildcard include/config/env/overwrite.h) \
+    $(wildcard include/config/bootdelay.h) \
+    $(wildcard include/config/preboot.h) \
+    $(wildcard include/config/sys/load/addr.h) \
+    $(wildcard include/config/cmd/mmc.h) \
+    $(wildcard include/config/cmd/usb.h) \
+    $(wildcard include/config/cmd/pxe.h) \
+    $(wildcard include/config/cmd/dhcp.h) \
+    $(wildcard include/config/clocks.h) \
+    $(wildcard include/config/sys/maxargs.h) \
+    $(wildcard include/config/sys/cbsize.h) \
+    $(wildcard include/config/sys/pbsize.h) \
+    $(wildcard include/config/sys/prompt.h) \
+    $(wildcard include/config/sys/memtest/start.h) \
+    $(wildcard include/config/sys/memtest/end.h) \
+    $(wildcard include/config/sys/init/ram/addr.h) \
+    $(wildcard include/config/sys/init/ram/size.h) \
+    $(wildcard include/config/sys/init/sp/addr.h) \
+    $(wildcard include/config/sys/bootm/len.h) \
+    $(wildcard include/config/sys/mmc/max/device.h) \
+    $(wildcard include/config/sys/ldscript.h) \
+    $(wildcard include/config/sys/mmcsd/fs/boot/partition.h) \
+    $(wildcard include/config/spl/fs/load/payload/name.h) \
+    $(wildcard include/config/sys/dcache/off.h) \
+    $(wildcard include/config/sys/spl/args/addr.h) \
+    $(wildcard include/config/spl/fs/load/args/name.h) \
+    $(wildcard include/config/spl/fs/load/kernel/name.h) \
+    $(wildcard include/config/sys/mmcsd/raw/mode/args/sector.h) \
+    $(wildcard include/config/sys/mmcsd/raw/mode/args/sectors.h) \
+    $(wildcard include/config/sys/mmcsd/raw/mode/kernel/sector.h) \
+    $(wildcard include/config/sys/spi/u/boot/offs.h) \
+    $(wildcard include/config/sys/spi/args/offs.h) \
+    $(wildcard include/config/sys/spi/args/size.h) \
+    $(wildcard include/config/sys/spi/kernel/offs.h) \
+    $(wildcard include/config/spl/text/base.h) \
+    $(wildcard include/config/spl/max/size.h) \
+    $(wildcard include/config/sys/spl/malloc/start.h) \
+    $(wildcard include/config/spl/stack/r/addr.h) \
+    $(wildcard include/config/sys/spl/malloc/size.h) \
+    $(wildcard include/config/spl/stack.h) \
+    $(wildcard include/config/spl/bss/start/addr.h) \
+    $(wildcard include/config/spl/bss/max/size.h) \
+    $(wildcard include/config/spl/load/fit/address.h) \
+    $(wildcard include/config/sys/uboot/start.h) \
+    $(wildcard include/config/sys/text/base.h) \
+  include/config_distro_bootcmd.h \
+    $(wildcard include/config/cmd/distro/bootcmd/h.h) \
+    $(wildcard include/config/sandbox.h) \
+    $(wildcard include/config/cmd/ubifs.h) \
+    $(wildcard include/config/efi/loader.h) \
+    $(wildcard include/config/arm64.h) \
+    $(wildcard include/config/arm.h) \
+    $(wildcard include/config/x86/run/32bit.h) \
+    $(wildcard include/config/x86/run/64bit.h) \
+    $(wildcard include/config/arch/rv32i.h) \
+    $(wildcard include/config/arch/rv64i.h) \
+    $(wildcard include/config/sata.h) \
+    $(wildcard include/config/scsi.h) \
+    $(wildcard include/config/ide.h) \
+    $(wildcard include/config/dm/pci.h) \
+    $(wildcard include/config/cmd/virtio.h) \
+    $(wildcard include/config/x86.h) \
+    $(wildcard include/config/cmd/dhcp/or/pxe.h) \
+    $(wildcard include/config/bootcommand.h) \
+  arch/arm/include/asm/config.h \
+    $(wildcard include/config/h/.h) \
+    $(wildcard include/config/lmb.h) \
+    $(wildcard include/config/sys/boot/ramdisk/high.h) \
+    $(wildcard include/config/arch/ls1021a.h) \
+    $(wildcard include/config/cpu/pxa27x.h) \
+    $(wildcard include/config/cpu/monahans.h) \
+    $(wildcard include/config/cpu/pxa25x.h) \
+    $(wildcard include/config/fsl/layerscape.h) \
+  include/config_fallbacks.h \
+    $(wildcard include/config/fallbacks/h.h) \
+    $(wildcard include/config/spl.h) \
+    $(wildcard include/config/spl/pad/to.h) \
+    $(wildcard include/config/cmd/kgdb.h) \
+  include/environment.h \
+    $(wildcard include/config/env/is/in/flash.h) \
+    $(wildcard include/config/env/addr.h) \
+    $(wildcard include/config/env/offset/redund.h) \
+    $(wildcard include/config/env/sect/size.h) \
+    $(wildcard include/config/env/size.h) \
+    $(wildcard include/config/env/size/redund.h) \
+    $(wildcard include/config/sys/monitor/base.h) \
+    $(wildcard include/config/sys/monitor/len.h) \
+    $(wildcard include/config/sys/redundand/environment.h) \
+    $(wildcard include/config/env/is/embedded.h) \
+    $(wildcard include/config/env/is/in/mmc.h) \
+    $(wildcard include/config/env/is/in/nand.h) \
+    $(wildcard include/config/env/offset/oob.h) \
+    $(wildcard include/config/env/is/in/ubi.h) \
+    $(wildcard include/config/env/ubi/part.h) \
+    $(wildcard include/config/env/ubi/volume.h) \
+    $(wildcard include/config/env/ubi/volume/redund.h) \
+    $(wildcard include/config/cmd/ubi.h) \
+    $(wildcard include/config/env/is/in/onenand.h) \
+    $(wildcard include/config/env/is/in/spi/flash.h) \
+    $(wildcard include/config/needs/manual/reloc.h) \
+    $(wildcard include/config/sys/mmc/env/part.h) \
+    $(wildcard include/config/cmd/saveenv.h) \
+  include/compiler.h \
+  include/env_attr.h \
+  include/env_callback.h \
+    $(wildcard include/config/env/callback/list/static.h) \
+    $(wildcard include/config/silent/console.h) \
+    $(wildcard include/config/splashimage/guard.h) \
+    $(wildcard include/config/regex.h) \
+    $(wildcard include/config/cmd/dns.h) \
+    $(wildcard include/config/cmd/net.h) \
+  include/env_flags.h \
+    $(wildcard include/config/env/flags/list/static.h) \
+    $(wildcard include/config/overwrite/ethaddr/once.h) \
+    $(wildcard include/config/cmd/env/flags.h) \
+  include/linker_lists.h \
+  include/linux/compiler.h \
+    $(wildcard include/config/sparse/rcu/pointer.h) \
+    $(wildcard include/config/trace/branch/profiling.h) \
+    $(wildcard include/config/profile/all/branches.h) \
+    $(wildcard include/config/kasan.h) \
+    $(wildcard include/config/enable/must/check.h) \
+    $(wildcard include/config/enable/warn/deprecated.h) \
+    $(wildcard include/config/kprobes.h) \
+  /usr/include/search.h \
+  include/linux/stringify.h \
+
+tools/env/embedded.o: $(deps_tools/env/embedded.o)
+
+$(deps_tools/env/embedded.o):
diff --git a/tools/u-boot-tools/env/.gitignore b/tools/u-boot-tools/env/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8d28b2b70beb2bbc443591f9786a8ef754ae5c16
--- /dev/null
+++ b/tools/u-boot-tools/env/.gitignore
@@ -0,0 +1,3 @@
+embedded.c
+fw_printenv
+fw_printenv_unstripped
diff --git a/tools/u-boot-tools/env/Makefile b/tools/u-boot-tools/env/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..b627796e949ebbd8f6b421cfd949c34d49474292
--- /dev/null
+++ b/tools/u-boot-tools/env/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2002-2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+# fw_printenv is supposed to run on the target system, which means it should be
+# built with cross tools. Although it may look weird, we only replace "HOSTCC"
+# with "CC" here for the maximum code reuse of scripts/Makefile.host.
+override HOSTCC = $(CC)
+
+# Compile for a hosted environment on the target
+HOST_EXTRACFLAGS  = -I$(srctree)/tools \
+		$(patsubst -I%,-idirafter%, $(filter -I%, $(UBOOTINCLUDE))) \
+		-idirafter $(srctree)/tools/env \
+		-DUSE_HOSTCC \
+		-DTEXT_BASE=$(TEXT_BASE)
+
+ifeq ($(MTD_VERSION),old)
+HOST_EXTRACFLAGS += -DMTD_OLD
+endif
+
+always := fw_printenv
+hostprogs-y := fw_printenv
+
+lib-y += fw_env.o \
+	crc32.o ctype.o linux_string.o \
+	env_attr.o env_flags.o
+
+fw_printenv-objs := fw_env_main.o $(lib-y)
+
+quiet_cmd_crosstools_strip = STRIP   $^
+      cmd_crosstools_strip = $(STRIP) $^; touch $@
+
+$(obj)/.strip: $(obj)/fw_printenv
+	$(call cmd,crosstools_strip)
+
+always += .strip
diff --git a/tools/u-boot-tools/env/README b/tools/u-boot-tools/env/README
new file mode 100644
index 0000000000000000000000000000000000000000..709251383c6dc22eead9521b195fd389de6b45ed
--- /dev/null
+++ b/tools/u-boot-tools/env/README
@@ -0,0 +1,63 @@
+
+This is a demo implementation of a Linux command line tool to access
+the U-Boot's environment variables.
+
+In order to cross-compile fw_printenv, run
+    make CROSS_COMPILE=<your cross-compiler prefix> envtools
+in the root directory of the U-Boot distribution. For example,
+    make CROSS_COMPILE=arm-linux- envtools
+
+You should then create a symlink from fw_setenv to fw_printenv. They use
+the same program and its function depends on its basename.
+
+For the run-time utility configuration uncomment the line
+#define CONFIG_FILE  "/etc/fw_env.config"
+in fw_env.h.
+
+For building against older versions of the MTD headers (meaning before
+v2.6.8-rc1) it is required to pass the argument "MTD_VERSION=old" to
+make.
+
+See comments in the fw_env.config file for definitions for the
+particular board.
+
+Configuration can also be done via #defines in the fw_env.h file. The
+following lines are relevant:
+
+#define HAVE_REDUND	/* For systems with 2 env sectors */
+#define DEVICE1_NAME	"/dev/mtd1"
+#define DEVICE2_NAME	"/dev/mtd2"
+#define DEVICE1_OFFSET    0x0000
+#define ENV1_SIZE         0x4000
+#define DEVICE1_ESIZE     0x4000
+#define DEVICE1_ENVSECTORS     2
+#define DEVICE2_OFFSET    0x0000
+#define ENV2_SIZE         0x4000
+#define DEVICE2_ESIZE     0x4000
+#define DEVICE2_ENVSECTORS     2
+
+Un-define HAVE_REDUND, if you want to use the utilities on a system
+that does not have support for redundant environment enabled.
+If HAVE_REDUND is undefined, DEVICE2_NAME is ignored,
+as is ENV2_SIZE and DEVICE2_ESIZE.
+
+The DEVICEx_NAME constants define which MTD character devices are to
+be used to access the environment.
+
+The DEVICEx_OFFSET constants define the environment offset within the
+MTD character device.
+
+ENVx_SIZE defines the size in bytes taken by the environment, which
+may be less then flash sector size, if the environment takes less
+then 1 sector.
+
+DEVICEx_ESIZE defines the size of the first sector in the flash
+partition where the environment resides.
+
+DEVICEx_ENVSECTORS defines the number of sectors that may be used for
+this environment instance. On NAND this is used to limit the range
+within which bad blocks are skipped, on NOR it is not used.
+
+To prevent losing changes to the environment and to prevent confusing the MTD
+drivers, a lock file at /var/lock/fw_printenv.lock is used to serialize access
+to the environment.
diff --git a/tools/u-boot-tools/env/crc32.c b/tools/u-boot-tools/env/crc32.c
new file mode 100644
index 0000000000000000000000000000000000000000..34f8178e33ffe29e393ab3a3fe738c4a15938adf
--- /dev/null
+++ b/tools/u-boot-tools/env/crc32.c
@@ -0,0 +1 @@
+#include "../../lib/crc32.c"
diff --git a/tools/u-boot-tools/env/ctype.c b/tools/u-boot-tools/env/ctype.c
new file mode 100644
index 0000000000000000000000000000000000000000..21050e9373c8797c68f7c498eed709cfabe8ca94
--- /dev/null
+++ b/tools/u-boot-tools/env/ctype.c
@@ -0,0 +1 @@
+#include "../../lib/ctype.c"
diff --git a/tools/u-boot-tools/env/embedded.c b/tools/u-boot-tools/env/embedded.c
new file mode 100644
index 0000000000000000000000000000000000000000..68cb30f993873c7f7f2d11cf27928e6a8a6d52f7
--- /dev/null
+++ b/tools/u-boot-tools/env/embedded.c
@@ -0,0 +1 @@
+#include <../env/embedded.c>
diff --git a/tools/u-boot-tools/env/embedded.o b/tools/u-boot-tools/env/embedded.o
new file mode 100644
index 0000000000000000000000000000000000000000..f3b65f22917931145684e84cee483887e921df02
Binary files /dev/null and b/tools/u-boot-tools/env/embedded.o differ
diff --git a/tools/u-boot-tools/env/env_attr.c b/tools/u-boot-tools/env/env_attr.c
new file mode 100644
index 0000000000000000000000000000000000000000..4d8536335c3048930afded434a6d7fa288d57c64
--- /dev/null
+++ b/tools/u-boot-tools/env/env_attr.c
@@ -0,0 +1 @@
+#include "../../env/attr.c"
diff --git a/tools/u-boot-tools/env/env_flags.c b/tools/u-boot-tools/env/env_flags.c
new file mode 100644
index 0000000000000000000000000000000000000000..71e13e2021f686ae58e79bafbd13fd07a4bae44a
--- /dev/null
+++ b/tools/u-boot-tools/env/env_flags.c
@@ -0,0 +1 @@
+#include "../../env/flags.c"
diff --git a/tools/u-boot-tools/env/fw_env.c b/tools/u-boot-tools/env/fw_env.c
new file mode 100644
index 0000000000000000000000000000000000000000..a5d75958e1b65970ac5c8a486cab045959a3fdce
--- /dev/null
+++ b/tools/u-boot-tools/env/fw_env.c
@@ -0,0 +1,1802 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000-2010
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * (C) Copyright 2008
+ * Guennadi Liakhovetski, DENX Software Engineering, lg@denx.de.
+ */
+
+#define _GNU_SOURCE
+
+#include <compiler.h>
+#include <errno.h>
+#include <env_flags.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <linux/fs.h>
+#include <linux/stringify.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <dirent.h>
+
+#ifdef MTD_OLD
+# include <stdint.h>
+# include <linux/mtd/mtd.h>
+#else
+# define  __user	/* nothing */
+# include <mtd/mtd-user.h>
+#endif
+
+#include <mtd/ubi-user.h>
+
+#include "fw_env_private.h"
+#include "fw_env.h"
+
+struct env_opts default_opts = {
+#ifdef CONFIG_FILE
+	.config_file = CONFIG_FILE
+#endif
+};
+
+#define DIV_ROUND_UP(n, d)	(((n) + (d) - 1) / (d))
+
+#define min(x, y) ({				\
+	typeof(x) _min1 = (x);			\
+	typeof(y) _min2 = (y);			\
+	(void) (&_min1 == &_min2);		\
+	_min1 < _min2 ? _min1 : _min2; })
+
+struct envdev_s {
+	const char *devname;		/* Device name */
+	long long devoff;		/* Device offset */
+	ulong env_size;			/* environment size */
+	ulong erase_size;		/* device erase size */
+	ulong env_sectors;		/* number of environment sectors */
+	uint8_t mtd_type;		/* type of the MTD device */
+	int is_ubi;			/* set if we use UBI volume */
+};
+
+static struct envdev_s envdevices[2] = {
+	{
+		.mtd_type = MTD_ABSENT,
+	}, {
+		.mtd_type = MTD_ABSENT,
+	},
+};
+
+static int dev_current;
+
+#define DEVNAME(i)    envdevices[(i)].devname
+#define DEVOFFSET(i)  envdevices[(i)].devoff
+#define ENVSIZE(i)    envdevices[(i)].env_size
+#define DEVESIZE(i)   envdevices[(i)].erase_size
+#define ENVSECTORS(i) envdevices[(i)].env_sectors
+#define DEVTYPE(i)    envdevices[(i)].mtd_type
+#define IS_UBI(i)     envdevices[(i)].is_ubi
+
+#define CUR_ENVSIZE ENVSIZE(dev_current)
+
+static unsigned long usable_envsize;
+#define ENV_SIZE      usable_envsize
+
+struct env_image_single {
+	uint32_t crc;		/* CRC32 over data bytes    */
+	char data[];
+};
+
+struct env_image_redundant {
+	uint32_t crc;		/* CRC32 over data bytes    */
+	unsigned char flags;	/* active or obsolete */
+	char data[];
+};
+
+enum flag_scheme {
+	FLAG_NONE,
+	FLAG_BOOLEAN,
+	FLAG_INCREMENTAL,
+};
+
+struct environment {
+	void *image;
+	uint32_t *crc;
+	unsigned char *flags;
+	char *data;
+	enum flag_scheme flag_scheme;
+};
+
+static struct environment environment = {
+	.flag_scheme = FLAG_NONE,
+};
+
+static int have_redund_env;
+
+static unsigned char active_flag = 1;
+/* obsolete_flag must be 0 to efficiently set it on NOR flash without erasing */
+static unsigned char obsolete_flag = 0;
+
+#define DEFAULT_ENV_INSTANCE_STATIC
+#include <env_default.h>
+
+#define UBI_DEV_START "/dev/ubi"
+#define UBI_SYSFS "/sys/class/ubi"
+#define UBI_VOL_NAME_PATT "ubi%d_%d"
+
+static int is_ubi_devname(const char *devname)
+{
+	return !strncmp(devname, UBI_DEV_START, sizeof(UBI_DEV_START) - 1);
+}
+
+static int ubi_check_volume_sysfs_name(const char *volume_sysfs_name,
+				       const char *volname)
+{
+	char path[256];
+	FILE *file;
+	char *name;
+	int ret;
+
+	strcpy(path, UBI_SYSFS "/");
+	strcat(path, volume_sysfs_name);
+	strcat(path, "/name");
+
+	file = fopen(path, "r");
+	if (!file)
+		return -1;
+
+	ret = fscanf(file, "%ms", &name);
+	fclose(file);
+	if (ret <= 0 || !name) {
+		fprintf(stderr,
+			"Failed to read from file %s, ret = %d, name = %s\n",
+			path, ret, name);
+		return -1;
+	}
+
+	if (!strcmp(name, volname)) {
+		free(name);
+		return 0;
+	}
+	free(name);
+
+	return -1;
+}
+
+static int ubi_get_volnum_by_name(int devnum, const char *volname)
+{
+	DIR *sysfs_ubi;
+	struct dirent *dirent;
+	int ret;
+	int tmp_devnum;
+	int volnum;
+
+	sysfs_ubi = opendir(UBI_SYSFS);
+	if (!sysfs_ubi)
+		return -1;
+
+#ifdef DEBUG
+	fprintf(stderr, "Looking for volume name \"%s\"\n", volname);
+#endif
+
+	while (1) {
+		dirent = readdir(sysfs_ubi);
+		if (!dirent)
+			return -1;
+
+		ret = sscanf(dirent->d_name, UBI_VOL_NAME_PATT,
+			     &tmp_devnum, &volnum);
+		if (ret == 2 && devnum == tmp_devnum) {
+			if (ubi_check_volume_sysfs_name(dirent->d_name,
+							volname) == 0)
+				return volnum;
+		}
+	}
+
+	return -1;
+}
+
+static int ubi_get_devnum_by_devname(const char *devname)
+{
+	int devnum;
+	int ret;
+
+	ret = sscanf(devname + sizeof(UBI_DEV_START) - 1, "%d", &devnum);
+	if (ret != 1)
+		return -1;
+
+	return devnum;
+}
+
+static const char *ubi_get_volume_devname(const char *devname,
+					  const char *volname)
+{
+	char *volume_devname;
+	int volnum;
+	int devnum;
+	int ret;
+
+	devnum = ubi_get_devnum_by_devname(devname);
+	if (devnum < 0)
+		return NULL;
+
+	volnum = ubi_get_volnum_by_name(devnum, volname);
+	if (volnum < 0)
+		return NULL;
+
+	ret = asprintf(&volume_devname, "%s_%d", devname, volnum);
+	if (ret < 0)
+		return NULL;
+
+#ifdef DEBUG
+	fprintf(stderr, "Found ubi volume \"%s:%s\" -> %s\n",
+		devname, volname, volume_devname);
+#endif
+
+	return volume_devname;
+}
+
+static void ubi_check_dev(unsigned int dev_id)
+{
+	char *devname = (char *)DEVNAME(dev_id);
+	char *pname;
+	const char *volname = NULL;
+	const char *volume_devname;
+
+	if (!is_ubi_devname(DEVNAME(dev_id)))
+		return;
+
+	IS_UBI(dev_id) = 1;
+
+	for (pname = devname; *pname != '\0'; pname++) {
+		if (*pname == ':') {
+			*pname = '\0';
+			volname = pname + 1;
+			break;
+		}
+	}
+
+	if (volname) {
+		/* Let's find real volume device name */
+		volume_devname = ubi_get_volume_devname(devname, volname);
+		if (!volume_devname) {
+			fprintf(stderr, "Didn't found ubi volume \"%s\"\n",
+				volname);
+			return;
+		}
+
+		free(devname);
+		DEVNAME(dev_id) = volume_devname;
+	}
+}
+
+static int ubi_update_start(int fd, int64_t bytes)
+{
+	if (ioctl(fd, UBI_IOCVOLUP, &bytes))
+		return -1;
+	return 0;
+}
+
+static int ubi_read(int fd, void *buf, size_t count)
+{
+	ssize_t ret;
+
+	while (count > 0) {
+		ret = read(fd, buf, count);
+		if (ret > 0) {
+			count -= ret;
+			buf += ret;
+
+			continue;
+		}
+
+		if (ret == 0) {
+			/*
+			 * Happens in case of too short volume data size. If we
+			 * return error status we will fail it will be treated
+			 * as UBI device error.
+			 *
+			 * Leave catching this error to CRC check.
+			 */
+			fprintf(stderr, "Warning: end of data on ubi volume\n");
+			return 0;
+		} else if (errno == EBADF) {
+			/*
+			 * Happens in case of corrupted volume. The same as
+			 * above, we cannot return error now, as we will still
+			 * be able to successfully write environment later.
+			 */
+			fprintf(stderr, "Warning: corrupted volume?\n");
+			return 0;
+		} else if (errno == EINTR) {
+			continue;
+		}
+
+		fprintf(stderr, "Cannot read %u bytes from ubi volume, %s\n",
+			(unsigned int)count, strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ubi_write(int fd, const void *buf, size_t count)
+{
+	ssize_t ret;
+
+	while (count > 0) {
+		ret = write(fd, buf, count);
+		if (ret <= 0) {
+			if (ret < 0 && errno == EINTR)
+				continue;
+
+			fprintf(stderr, "Cannot write %u bytes to ubi volume\n",
+				(unsigned int)count);
+			return -1;
+		}
+
+		count -= ret;
+		buf += ret;
+	}
+
+	return 0;
+}
+
+static int flash_io(int mode);
+static int parse_config(struct env_opts *opts);
+
+#if defined(CONFIG_FILE)
+static int get_config(char *);
+#endif
+
+static char *skip_chars(char *s)
+{
+	for (; *s != '\0'; s++) {
+		if (isblank(*s) || *s == '=')
+			return s;
+	}
+	return NULL;
+}
+
+static char *skip_blanks(char *s)
+{
+	for (; *s != '\0'; s++) {
+		if (!isblank(*s))
+			return s;
+	}
+	return NULL;
+}
+
+/*
+ * s1 is either a simple 'name', or a 'name=value' pair.
+ * s2 is a 'name=value' pair.
+ * If the names match, return the value of s2, else NULL.
+ */
+static char *envmatch(char *s1, char *s2)
+{
+	if (s1 == NULL || s2 == NULL)
+		return NULL;
+
+	while (*s1 == *s2++)
+		if (*s1++ == '=')
+			return s2;
+	if (*s1 == '\0' && *(s2 - 1) == '=')
+		return s2;
+	return NULL;
+}
+
+/**
+ * Search the environment for a variable.
+ * Return the value, if found, or NULL, if not found.
+ */
+char *fw_getenv(char *name)
+{
+	char *env, *nxt;
+
+	for (env = environment.data; *env; env = nxt + 1) {
+		char *val;
+
+		for (nxt = env; *nxt; ++nxt) {
+			if (nxt >= &environment.data[ENV_SIZE]) {
+				fprintf(stderr, "## Error: "
+					"environment not terminated\n");
+				return NULL;
+			}
+		}
+		val = envmatch(name, env);
+		if (!val)
+			continue;
+		return val;
+	}
+	return NULL;
+}
+
+/*
+ * Search the default environment for a variable.
+ * Return the value, if found, or NULL, if not found.
+ */
+char *fw_getdefenv(char *name)
+{
+	char *env, *nxt;
+
+	for (env = default_environment; *env; env = nxt + 1) {
+		char *val;
+
+		for (nxt = env; *nxt; ++nxt) {
+			if (nxt >= &default_environment[ENV_SIZE]) {
+				fprintf(stderr, "## Error: "
+					"default environment not terminated\n");
+				return NULL;
+			}
+		}
+		val = envmatch(name, env);
+		if (!val)
+			continue;
+		return val;
+	}
+	return NULL;
+}
+
+/*
+ * Print the current definition of one, or more, or all
+ * environment variables
+ */
+int fw_printenv(int argc, char *argv[], int value_only, struct env_opts *opts)
+{
+	int i, rc = 0;
+
+	if (value_only && argc != 1) {
+		fprintf(stderr,
+			"## Error: `-n'/`--noheader' option requires exactly one argument\n");
+		return -1;
+	}
+
+	if (!opts)
+		opts = &default_opts;
+
+	if (fw_env_open(opts))
+		return -1;
+
+	if (argc == 0) {	/* Print all env variables  */
+		char *env, *nxt;
+		for (env = environment.data; *env; env = nxt + 1) {
+			for (nxt = env; *nxt; ++nxt) {
+				if (nxt >= &environment.data[ENV_SIZE]) {
+					fprintf(stderr, "## Error: "
+						"environment not terminated\n");
+					return -1;
+				}
+			}
+
+			printf("%s\n", env);
+		}
+		fw_env_close(opts);
+		return 0;
+	}
+
+	for (i = 0; i < argc; ++i) {	/* print a subset of env variables */
+		char *name = argv[i];
+		char *val = NULL;
+
+		val = fw_getenv(name);
+		if (!val) {
+			fprintf(stderr, "## Error: \"%s\" not defined\n", name);
+			rc = -1;
+			continue;
+		}
+
+		if (value_only) {
+			puts(val);
+			break;
+		}
+
+		printf("%s=%s\n", name, val);
+	}
+
+	fw_env_close(opts);
+
+	return rc;
+}
+
+int fw_env_flush(struct env_opts *opts)
+{
+	if (!opts)
+		opts = &default_opts;
+
+	/*
+	 * Update CRC
+	 */
+	*environment.crc = crc32(0, (uint8_t *) environment.data, ENV_SIZE);
+
+	/* write environment back to flash */
+	if (flash_io(O_RDWR)) {
+		fprintf(stderr, "Error: can't write fw_env to flash\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Set/Clear a single variable in the environment.
+ * This is called in sequence to update the environment
+ * in RAM without updating the copy in flash after each set
+ */
+int fw_env_write(char *name, char *value)
+{
+	int len;
+	char *env, *nxt;
+	char *oldval = NULL;
+	int deleting, creating, overwriting;
+
+	/*
+	 * search if variable with this name already exists
+	 */
+	for (nxt = env = environment.data; *env; env = nxt + 1) {
+		for (nxt = env; *nxt; ++nxt) {
+			if (nxt >= &environment.data[ENV_SIZE]) {
+				fprintf(stderr, "## Error: "
+					"environment not terminated\n");
+				errno = EINVAL;
+				return -1;
+			}
+		}
+		oldval = envmatch(name, env);
+		if (oldval)
+			break;
+	}
+
+	deleting = (oldval && !(value && strlen(value)));
+	creating = (!oldval && (value && strlen(value)));
+	overwriting = (oldval && (value && strlen(value)));
+
+	/* check for permission */
+	if (deleting) {
+		if (env_flags_validate_varaccess(name,
+		    ENV_FLAGS_VARACCESS_PREVENT_DELETE)) {
+			printf("Can't delete \"%s\"\n", name);
+			errno = EROFS;
+			return -1;
+		}
+	} else if (overwriting) {
+		if (env_flags_validate_varaccess(name,
+		    ENV_FLAGS_VARACCESS_PREVENT_OVERWR)) {
+			printf("Can't overwrite \"%s\"\n", name);
+			errno = EROFS;
+			return -1;
+		} else if (env_flags_validate_varaccess(name,
+			   ENV_FLAGS_VARACCESS_PREVENT_NONDEF_OVERWR)) {
+			const char *defval = fw_getdefenv(name);
+
+			if (defval == NULL)
+				defval = "";
+			if (strcmp(oldval, defval)
+			    != 0) {
+				printf("Can't overwrite \"%s\"\n", name);
+				errno = EROFS;
+				return -1;
+			}
+		}
+	} else if (creating) {
+		if (env_flags_validate_varaccess(name,
+		    ENV_FLAGS_VARACCESS_PREVENT_CREATE)) {
+			printf("Can't create \"%s\"\n", name);
+			errno = EROFS;
+			return -1;
+		}
+	} else
+		/* Nothing to do */
+		return 0;
+
+	if (deleting || overwriting) {
+		if (*++nxt == '\0') {
+			*env = '\0';
+		} else {
+			for (;;) {
+				*env = *nxt++;
+				if ((*env == '\0') && (*nxt == '\0'))
+					break;
+				++env;
+			}
+		}
+		*++env = '\0';
+	}
+
+	/* Delete only ? */
+	if (!value || !strlen(value))
+		return 0;
+
+	/*
+	 * Append new definition at the end
+	 */
+	for (env = environment.data; *env || *(env + 1); ++env)
+		;
+	if (env > environment.data)
+		++env;
+	/*
+	 * Overflow when:
+	 * "name" + "=" + "val" +"\0\0"  > CUR_ENVSIZE - (env-environment)
+	 */
+	len = strlen(name) + 2;
+	/* add '=' for first arg, ' ' for all others */
+	len += strlen(value) + 1;
+
+	if (len > (&environment.data[ENV_SIZE] - env)) {
+		fprintf(stderr,
+			"Error: environment overflow, \"%s\" deleted\n", name);
+		return -1;
+	}
+
+	while ((*env = *name++) != '\0')
+		env++;
+	*env = '=';
+	while ((*++env = *value++) != '\0')
+		;
+
+	/* end is marked with double '\0' */
+	*++env = '\0';
+
+	return 0;
+}
+
+/*
+ * Deletes or sets environment variables. Returns -1 and sets errno error codes:
+ * 0	  - OK
+ * EINVAL - need at least 1 argument
+ * EROFS  - certain variables ("ethaddr", "serial#") cannot be
+ *	    modified or deleted
+ *
+ */
+int fw_env_set(int argc, char *argv[], struct env_opts *opts)
+{
+	int i;
+	size_t len;
+	char *name, **valv;
+	char *oldval;
+	char *value = NULL;
+	int valc;
+	int ret;
+
+	if (!opts)
+		opts = &default_opts;
+
+	if (argc < 1) {
+		fprintf(stderr, "## Error: variable name missing\n");
+		errno = EINVAL;
+		return -1;
+	}
+
+	if (fw_env_open(opts)) {
+		fprintf(stderr, "Error: environment not initialized\n");
+		return -1;
+	}
+
+	name = argv[0];
+	valv = argv + 1;
+	valc = argc - 1;
+
+	if (env_flags_validate_env_set_params(name, valv, valc) < 0) {
+		fw_env_close(opts);
+		return -1;
+	}
+
+	len = 0;
+	for (i = 0; i < valc; ++i) {
+		char *val = valv[i];
+		size_t val_len = strlen(val);
+
+		if (value)
+			value[len - 1] = ' ';
+		oldval = value;
+		value = realloc(value, len + val_len + 1);
+		if (!value) {
+			fprintf(stderr,
+				"Cannot malloc %zu bytes: %s\n",
+				len, strerror(errno));
+			free(oldval);
+			return -1;
+		}
+
+		memcpy(value + len, val, val_len);
+		len += val_len;
+		value[len++] = '\0';
+	}
+
+	fw_env_write(name, value);
+
+	free(value);
+
+	ret = fw_env_flush(opts);
+	fw_env_close(opts);
+
+	return ret;
+}
+
+/*
+ * Parse  a file  and configure the u-boot variables.
+ * The script file has a very simple format, as follows:
+ *
+ * Each line has a couple with name, value:
+ * <white spaces>variable_name<white spaces>variable_value
+ *
+ * Both variable_name and variable_value are interpreted as strings.
+ * Any character after <white spaces> and before ending \r\n is interpreted
+ * as variable's value (no comment allowed on these lines !)
+ *
+ * Comments are allowed if the first character in the line is #
+ *
+ * Returns -1 and sets errno error codes:
+ * 0	  - OK
+ * -1     - Error
+ */
+int fw_parse_script(char *fname, struct env_opts *opts)
+{
+	FILE *fp;
+	char *line = NULL;
+	size_t linesize = 0;
+	char *name;
+	char *val;
+	int lineno = 0;
+	int len;
+	int ret = 0;
+
+	if (!opts)
+		opts = &default_opts;
+
+	if (fw_env_open(opts)) {
+		fprintf(stderr, "Error: environment not initialized\n");
+		return -1;
+	}
+
+	if (strcmp(fname, "-") == 0)
+		fp = stdin;
+	else {
+		fp = fopen(fname, "r");
+		if (fp == NULL) {
+			fprintf(stderr, "I cannot open %s for reading\n",
+				fname);
+			return -1;
+		}
+	}
+
+	while ((len = getline(&line, &linesize, fp)) != -1) {
+		lineno++;
+
+		/*
+		 * Read a whole line from the file. If the line is not
+		 * terminated, reports an error and exit.
+		 */
+		if (line[len - 1] != '\n') {
+			fprintf(stderr,
+				"Line %d not correctly terminated\n",
+				lineno);
+			ret = -1;
+			break;
+		}
+
+		/* Drop ending line feed / carriage return */
+		line[--len] = '\0';
+		if (len && line[len - 1] == '\r')
+			line[--len] = '\0';
+
+		/* Skip comment or empty lines */
+		if (len == 0 || line[0] == '#')
+			continue;
+
+		/*
+		 * Search for variable's name remove leading whitespaces
+		 */
+		name = skip_blanks(line);
+		if (!name)
+			continue;
+
+		/* The first white space is the end of variable name */
+		val = skip_chars(name);
+		len = strlen(name);
+		if (val) {
+			*val++ = '\0';
+			if ((val - name) < len)
+				val = skip_blanks(val);
+			else
+				val = NULL;
+		}
+#ifdef DEBUG
+		fprintf(stderr, "Setting %s : %s\n",
+			name, val ? val : " removed");
+#endif
+
+		if (env_flags_validate_type(name, val) < 0) {
+			ret = -1;
+			break;
+		}
+
+		/*
+		 * If there is an error setting a variable,
+		 * try to save the environment and returns an error
+		 */
+		if (fw_env_write(name, val)) {
+			fprintf(stderr,
+				"fw_env_write returns with error : %s\n",
+				strerror(errno));
+			ret = -1;
+			break;
+		}
+
+	}
+	free(line);
+
+	/* Close file if not stdin */
+	if (strcmp(fname, "-") != 0)
+		fclose(fp);
+
+	ret |= fw_env_flush(opts);
+
+	fw_env_close(opts);
+
+	return ret;
+}
+
+/**
+ * environment_end() - compute offset of first byte right after environment
+ * @dev - index of enviroment buffer
+ * Return:
+ *  device offset of first byte right after environment
+ */
+off_t environment_end(int dev)
+{
+	/* environment is block aligned */
+	return DEVOFFSET(dev) + ENVSECTORS(dev) * DEVESIZE(dev);
+}
+
+/*
+ * Test for bad block on NAND, just returns 0 on NOR, on NAND:
+ * 0	- block is good
+ * > 0	- block is bad
+ * < 0	- failed to test
+ */
+static int flash_bad_block(int fd, uint8_t mtd_type, loff_t blockstart)
+{
+	if (mtd_type == MTD_NANDFLASH) {
+		int badblock = ioctl(fd, MEMGETBADBLOCK, &blockstart);
+
+		if (badblock < 0) {
+			perror("Cannot read bad block mark");
+			return badblock;
+		}
+
+		if (badblock) {
+#ifdef DEBUG
+			fprintf(stderr, "Bad block at 0x%llx, skipping\n",
+				(unsigned long long)blockstart);
+#endif
+			return badblock;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Read data from flash at an offset into a provided buffer. On NAND it skips
+ * bad blocks but makes sure it stays within ENVSECTORS (dev) starting from
+ * the DEVOFFSET (dev) block. On NOR the loop is only run once.
+ */
+static int flash_read_buf(int dev, int fd, void *buf, size_t count,
+			  off_t offset)
+{
+	size_t blocklen;	/* erase / write length - one block on NAND,
+				   0 on NOR */
+	size_t processed = 0;	/* progress counter */
+	size_t readlen = count;	/* current read length */
+	off_t block_seek;	/* offset inside the current block to the start
+				   of the data */
+	loff_t blockstart;	/* running start of the current block -
+				   MEMGETBADBLOCK needs 64 bits */
+	int rc;
+
+	blockstart = (offset / DEVESIZE(dev)) * DEVESIZE(dev);
+
+	/* Offset inside a block */
+	block_seek = offset - blockstart;
+
+	if (DEVTYPE(dev) == MTD_NANDFLASH) {
+		/*
+		 * NAND: calculate which blocks we are reading. We have
+		 * to read one block at a time to skip bad blocks.
+		 */
+		blocklen = DEVESIZE(dev);
+
+		/* Limit to one block for the first read */
+		if (readlen > blocklen - block_seek)
+			readlen = blocklen - block_seek;
+	} else {
+		blocklen = 0;
+	}
+
+	/* This only runs once on NOR flash */
+	while (processed < count) {
+		rc = flash_bad_block(fd, DEVTYPE(dev), blockstart);
+		if (rc < 0)	/* block test failed */
+			return -1;
+
+		if (blockstart + block_seek + readlen > environment_end(dev)) {
+			/* End of range is reached */
+			fprintf(stderr, "Too few good blocks within range\n");
+			return -1;
+		}
+
+		if (rc) {	/* block is bad */
+			blockstart += blocklen;
+			continue;
+		}
+
+		/*
+		 * If a block is bad, we retry in the next block at the same
+		 * offset - see env/nand.c::writeenv()
+		 */
+		lseek(fd, blockstart + block_seek, SEEK_SET);
+
+		rc = read(fd, buf + processed, readlen);
+		if (rc != readlen) {
+			fprintf(stderr, "Read error on %s: %s\n",
+				DEVNAME(dev), strerror(errno));
+			return -1;
+		}
+#ifdef DEBUG
+		fprintf(stderr, "Read 0x%x bytes at 0x%llx on %s\n",
+			rc, (unsigned long long)blockstart + block_seek,
+			DEVNAME(dev));
+#endif
+		processed += readlen;
+		readlen = min(blocklen, count - processed);
+		block_seek = 0;
+		blockstart += blocklen;
+	}
+
+	return processed;
+}
+
+/*
+ * Write count bytes from begin of environment, but stay within
+ * ENVSECTORS(dev) sectors of
+ * DEVOFFSET (dev). Similar to the read case above, on NOR and dataflash we
+ * erase and write the whole data at once.
+ */
+static int flash_write_buf(int dev, int fd, void *buf, size_t count)
+{
+	void *data;
+	struct erase_info_user erase;
+	size_t blocklen;	/* length of NAND block / NOR erase sector */
+	size_t erase_len;	/* whole area that can be erased - may include
+				   bad blocks */
+	size_t erasesize;	/* erase / write length - one block on NAND,
+				   whole area on NOR */
+	size_t processed = 0;	/* progress counter */
+	size_t write_total;	/* total size to actually write - excluding
+				   bad blocks */
+	off_t erase_offset;	/* offset to the first erase block (aligned)
+				   below offset */
+	off_t block_seek;	/* offset inside the erase block to the start
+				   of the data */
+	loff_t blockstart;	/* running start of the current block -
+				   MEMGETBADBLOCK needs 64 bits */
+	int rc;
+
+	/*
+	 * For mtd devices only offset and size of the environment do matter
+	 */
+	if (DEVTYPE(dev) == MTD_ABSENT) {
+		blocklen = count;
+		erase_len = blocklen;
+		blockstart = DEVOFFSET(dev);
+		block_seek = 0;
+		write_total = blocklen;
+	} else {
+		blocklen = DEVESIZE(dev);
+
+		erase_offset = DEVOFFSET(dev);
+
+		/* Maximum area we may use */
+		erase_len = environment_end(dev) - erase_offset;
+
+		blockstart = erase_offset;
+
+		/* Offset inside a block */
+		block_seek = DEVOFFSET(dev) - erase_offset;
+
+		/*
+		 * Data size we actually write: from the start of the block
+		 * to the start of the data, then count bytes of data, and
+		 * to the end of the block
+		 */
+		write_total = ((block_seek + count + blocklen - 1) /
+			       blocklen) * blocklen;
+	}
+
+	/*
+	 * Support data anywhere within erase sectors: read out the complete
+	 * area to be erased, replace the environment image, write the whole
+	 * block back again.
+	 */
+	if (write_total > count) {
+		data = malloc(erase_len);
+		if (!data) {
+			fprintf(stderr,
+				"Cannot malloc %zu bytes: %s\n",
+				erase_len, strerror(errno));
+			return -1;
+		}
+
+		rc = flash_read_buf(dev, fd, data, write_total, erase_offset);
+		if (write_total != rc)
+			return -1;
+
+#ifdef DEBUG
+		fprintf(stderr, "Preserving data ");
+		if (block_seek != 0)
+			fprintf(stderr, "0x%x - 0x%lx", 0, block_seek - 1);
+		if (block_seek + count != write_total) {
+			if (block_seek != 0)
+				fprintf(stderr, " and ");
+			fprintf(stderr, "0x%lx - 0x%lx",
+				(unsigned long)block_seek + count,
+				(unsigned long)write_total - 1);
+		}
+		fprintf(stderr, "\n");
+#endif
+		/* Overwrite the old environment */
+		memcpy(data + block_seek, buf, count);
+	} else {
+		/*
+		 * We get here, iff offset is block-aligned and count is a
+		 * multiple of blocklen - see write_total calculation above
+		 */
+		data = buf;
+	}
+
+	if (DEVTYPE(dev) == MTD_NANDFLASH) {
+		/*
+		 * NAND: calculate which blocks we are writing. We have
+		 * to write one block at a time to skip bad blocks.
+		 */
+		erasesize = blocklen;
+	} else {
+		erasesize = erase_len;
+	}
+
+	erase.length = erasesize;
+
+	/* This only runs once on NOR flash and SPI-dataflash */
+	while (processed < write_total) {
+		rc = flash_bad_block(fd, DEVTYPE(dev), blockstart);
+		if (rc < 0)	/* block test failed */
+			return rc;
+
+		if (blockstart + erasesize > environment_end(dev)) {
+			fprintf(stderr, "End of range reached, aborting\n");
+			return -1;
+		}
+
+		if (rc) {	/* block is bad */
+			blockstart += blocklen;
+			continue;
+		}
+
+		if (DEVTYPE(dev) != MTD_ABSENT) {
+			erase.start = blockstart;
+			ioctl(fd, MEMUNLOCK, &erase);
+			/* These do not need an explicit erase cycle */
+			if (DEVTYPE(dev) != MTD_DATAFLASH)
+				if (ioctl(fd, MEMERASE, &erase) != 0) {
+					fprintf(stderr,
+						"MTD erase error on %s: %s\n",
+						DEVNAME(dev), strerror(errno));
+					return -1;
+				}
+		}
+
+		if (lseek(fd, blockstart, SEEK_SET) == -1) {
+			fprintf(stderr,
+				"Seek error on %s: %s\n",
+				DEVNAME(dev), strerror(errno));
+			return -1;
+		}
+#ifdef DEBUG
+		fprintf(stderr, "Write 0x%llx bytes at 0x%llx\n",
+			(unsigned long long)erasesize,
+			(unsigned long long)blockstart);
+#endif
+		if (write(fd, data + processed, erasesize) != erasesize) {
+			fprintf(stderr, "Write error on %s: %s\n",
+				DEVNAME(dev), strerror(errno));
+			return -1;
+		}
+
+		if (DEVTYPE(dev) != MTD_ABSENT)
+			ioctl(fd, MEMLOCK, &erase);
+
+		processed += erasesize;
+		block_seek = 0;
+		blockstart += erasesize;
+	}
+
+	if (write_total > count)
+		free(data);
+
+	return processed;
+}
+
+/*
+ * Set obsolete flag at offset - NOR flash only
+ */
+static int flash_flag_obsolete(int dev, int fd, off_t offset)
+{
+	int rc;
+	struct erase_info_user erase;
+
+	erase.start = DEVOFFSET(dev);
+	erase.length = DEVESIZE(dev);
+	/* This relies on the fact, that obsolete_flag == 0 */
+	rc = lseek(fd, offset, SEEK_SET);
+	if (rc < 0) {
+		fprintf(stderr, "Cannot seek to set the flag on %s\n",
+			DEVNAME(dev));
+		return rc;
+	}
+	ioctl(fd, MEMUNLOCK, &erase);
+	rc = write(fd, &obsolete_flag, sizeof(obsolete_flag));
+	ioctl(fd, MEMLOCK, &erase);
+	if (rc < 0)
+		perror("Could not set obsolete flag");
+
+	return rc;
+}
+
+static int flash_write(int fd_current, int fd_target, int dev_target)
+{
+	int rc;
+
+	switch (environment.flag_scheme) {
+	case FLAG_NONE:
+		break;
+	case FLAG_INCREMENTAL:
+		(*environment.flags)++;
+		break;
+	case FLAG_BOOLEAN:
+		*environment.flags = active_flag;
+		break;
+	default:
+		fprintf(stderr, "Unimplemented flash scheme %u\n",
+			environment.flag_scheme);
+		return -1;
+	}
+
+#ifdef DEBUG
+	fprintf(stderr, "Writing new environment at 0x%llx on %s\n",
+		DEVOFFSET(dev_target), DEVNAME(dev_target));
+#endif
+
+	if (IS_UBI(dev_target)) {
+		if (ubi_update_start(fd_target, CUR_ENVSIZE) < 0)
+			return 0;
+		return ubi_write(fd_target, environment.image, CUR_ENVSIZE);
+	}
+
+	rc = flash_write_buf(dev_target, fd_target, environment.image,
+			     CUR_ENVSIZE);
+	if (rc < 0)
+		return rc;
+
+	if (environment.flag_scheme == FLAG_BOOLEAN) {
+		/* Have to set obsolete flag */
+		off_t offset = DEVOFFSET(dev_current) +
+		    offsetof(struct env_image_redundant, flags);
+#ifdef DEBUG
+		fprintf(stderr,
+			"Setting obsolete flag in environment at 0x%llx on %s\n",
+			DEVOFFSET(dev_current), DEVNAME(dev_current));
+#endif
+		flash_flag_obsolete(dev_current, fd_current, offset);
+	}
+
+	return 0;
+}
+
+static int flash_read(int fd)
+{
+	int rc;
+
+	if (IS_UBI(dev_current)) {
+		DEVTYPE(dev_current) = MTD_ABSENT;
+
+		return ubi_read(fd, environment.image, CUR_ENVSIZE);
+	}
+
+	rc = flash_read_buf(dev_current, fd, environment.image, CUR_ENVSIZE,
+			    DEVOFFSET(dev_current));
+	if (rc != CUR_ENVSIZE)
+		return -1;
+
+	return 0;
+}
+
+static int flash_open_tempfile(const char **dname, const char **target_temp)
+{
+	char *dup_name = strdup(DEVNAME(dev_current));
+	char *temp_name = NULL;
+	int rc = -1;
+
+	if (!dup_name)
+		return -1;
+
+	*dname = dirname(dup_name);
+	if (!*dname)
+		goto err;
+
+	rc = asprintf(&temp_name, "%s/XXXXXX", *dname);
+	if (rc == -1)
+		goto err;
+
+	rc = mkstemp(temp_name);
+	if (rc == -1) {
+		/* fall back to in place write */
+		fprintf(stderr,
+			"Can't create %s: %s\n", temp_name, strerror(errno));
+		free(temp_name);
+	} else {
+		*target_temp = temp_name;
+		/* deliberately leak dup_name as dname /might/ point into
+		 * it and we need it for our caller
+		 */
+		dup_name = NULL;
+	}
+
+err:
+	if (dup_name)
+		free(dup_name);
+
+	return rc;
+}
+
+static int flash_io_write(int fd_current)
+{
+	int fd_target = -1, rc, dev_target;
+	const char *dname, *target_temp = NULL;
+
+	if (have_redund_env) {
+		/* switch to next partition for writing */
+		dev_target = !dev_current;
+		/* dev_target: fd_target, erase_target */
+		fd_target = open(DEVNAME(dev_target), O_RDWR);
+		if (fd_target < 0) {
+			fprintf(stderr,
+				"Can't open %s: %s\n",
+				DEVNAME(dev_target), strerror(errno));
+			rc = -1;
+			goto exit;
+		}
+	} else {
+		struct stat sb;
+
+		if (fstat(fd_current, &sb) == 0 && S_ISREG(sb.st_mode)) {
+			/* if any part of flash_open_tempfile() fails we fall
+			 * back to in-place writes
+			 */
+			fd_target = flash_open_tempfile(&dname, &target_temp);
+		}
+		dev_target = dev_current;
+		if (fd_target == -1)
+			fd_target = fd_current;
+	}
+
+	rc = flash_write(fd_current, fd_target, dev_target);
+
+	if (fsync(fd_current) && !(errno == EINVAL || errno == EROFS)) {
+		fprintf(stderr,
+			"fsync failed on %s: %s\n",
+			DEVNAME(dev_current), strerror(errno));
+	}
+
+	if (fd_current != fd_target) {
+		if (fsync(fd_target) &&
+		    !(errno == EINVAL || errno == EROFS)) {
+			fprintf(stderr,
+				"fsync failed on %s: %s\n",
+				DEVNAME(dev_current), strerror(errno));
+		}
+
+		if (close(fd_target)) {
+			fprintf(stderr,
+				"I/O error on %s: %s\n",
+				DEVNAME(dev_target), strerror(errno));
+			rc = -1;
+		}
+
+		if (target_temp) {
+			int dir_fd;
+
+			dir_fd = open(dname, O_DIRECTORY | O_RDONLY);
+			if (dir_fd == -1)
+				fprintf(stderr,
+					"Can't open %s: %s\n",
+					dname, strerror(errno));
+
+			if (rename(target_temp, DEVNAME(dev_target))) {
+				fprintf(stderr,
+					"rename failed %s => %s: %s\n",
+					target_temp, DEVNAME(dev_target),
+					strerror(errno));
+				rc = -1;
+			}
+
+			if (dir_fd != -1 && fsync(dir_fd))
+				fprintf(stderr,
+					"fsync failed on %s: %s\n",
+					dname, strerror(errno));
+
+			if (dir_fd != -1 && close(dir_fd))
+				fprintf(stderr,
+					"I/O error on %s: %s\n",
+					dname, strerror(errno));
+		}
+	}
+ exit:
+	return rc;
+}
+
+static int flash_io(int mode)
+{
+	int fd_current, rc;
+
+	/* dev_current: fd_current, erase_current */
+	fd_current = open(DEVNAME(dev_current), mode);
+	if (fd_current < 0) {
+		fprintf(stderr,
+			"Can't open %s: %s\n",
+			DEVNAME(dev_current), strerror(errno));
+		return -1;
+	}
+
+	if (mode == O_RDWR) {
+		rc = flash_io_write(fd_current);
+	} else {
+		rc = flash_read(fd_current);
+	}
+
+	if (close(fd_current)) {
+		fprintf(stderr,
+			"I/O error on %s: %s\n",
+			DEVNAME(dev_current), strerror(errno));
+		return -1;
+	}
+
+	return rc;
+}
+
+/*
+ * Prevent confusion if running from erased flash memory
+ */
+int fw_env_open(struct env_opts *opts)
+{
+	int crc0, crc0_ok;
+	unsigned char flag0;
+	void *addr0 = NULL;
+
+	int crc1, crc1_ok;
+	unsigned char flag1;
+	void *addr1 = NULL;
+
+	int ret;
+
+	struct env_image_single *single;
+	struct env_image_redundant *redundant;
+
+	if (!opts)
+		opts = &default_opts;
+
+	if (parse_config(opts))	/* should fill envdevices */
+		return -EINVAL;
+
+	addr0 = calloc(1, CUR_ENVSIZE);
+	if (addr0 == NULL) {
+		fprintf(stderr,
+			"Not enough memory for environment (%ld bytes)\n",
+			CUR_ENVSIZE);
+		ret = -ENOMEM;
+		goto open_cleanup;
+	}
+
+	/* read environment from FLASH to local buffer */
+	environment.image = addr0;
+
+	if (have_redund_env) {
+		redundant = addr0;
+		environment.crc = &redundant->crc;
+		environment.flags = &redundant->flags;
+		environment.data = redundant->data;
+	} else {
+		single = addr0;
+		environment.crc = &single->crc;
+		environment.flags = NULL;
+		environment.data = single->data;
+	}
+
+	dev_current = 0;
+	if (flash_io(O_RDONLY)) {
+		ret = -EIO;
+		goto open_cleanup;
+	}
+
+	crc0 = crc32(0, (uint8_t *)environment.data, ENV_SIZE);
+
+	crc0_ok = (crc0 == *environment.crc);
+	if (!have_redund_env) {
+		if (!crc0_ok) {
+			fprintf(stderr,
+				"Warning: Bad CRC, using default environment\n");
+			memcpy(environment.data, default_environment,
+			       sizeof(default_environment));
+		}
+	} else {
+		flag0 = *environment.flags;
+
+		dev_current = 1;
+		addr1 = calloc(1, CUR_ENVSIZE);
+		if (addr1 == NULL) {
+			fprintf(stderr,
+				"Not enough memory for environment (%ld bytes)\n",
+				CUR_ENVSIZE);
+			ret = -ENOMEM;
+			goto open_cleanup;
+		}
+		redundant = addr1;
+
+		/*
+		 * have to set environment.image for flash_read(), careful -
+		 * other pointers in environment still point inside addr0
+		 */
+		environment.image = addr1;
+		if (flash_io(O_RDONLY)) {
+			ret = -EIO;
+			goto open_cleanup;
+		}
+
+		/* Check flag scheme compatibility */
+		if (DEVTYPE(dev_current) == MTD_NORFLASH &&
+		    DEVTYPE(!dev_current) == MTD_NORFLASH) {
+			environment.flag_scheme = FLAG_BOOLEAN;
+		} else if (DEVTYPE(dev_current) == MTD_NANDFLASH &&
+			   DEVTYPE(!dev_current) == MTD_NANDFLASH) {
+			environment.flag_scheme = FLAG_INCREMENTAL;
+		} else if (DEVTYPE(dev_current) == MTD_DATAFLASH &&
+			   DEVTYPE(!dev_current) == MTD_DATAFLASH) {
+			environment.flag_scheme = FLAG_BOOLEAN;
+		} else if (DEVTYPE(dev_current) == MTD_UBIVOLUME &&
+			   DEVTYPE(!dev_current) == MTD_UBIVOLUME) {
+			environment.flag_scheme = FLAG_INCREMENTAL;
+		} else if (DEVTYPE(dev_current) == MTD_ABSENT &&
+			   DEVTYPE(!dev_current) == MTD_ABSENT &&
+			   IS_UBI(dev_current) == IS_UBI(!dev_current)) {
+			environment.flag_scheme = FLAG_INCREMENTAL;
+		} else {
+			fprintf(stderr, "Incompatible flash types!\n");
+			ret = -EINVAL;
+			goto open_cleanup;
+		}
+
+		crc1 = crc32(0, (uint8_t *)redundant->data, ENV_SIZE);
+
+		crc1_ok = (crc1 == redundant->crc);
+		flag1 = redundant->flags;
+
+		if (crc0_ok && !crc1_ok) {
+			dev_current = 0;
+		} else if (!crc0_ok && crc1_ok) {
+			dev_current = 1;
+		} else if (!crc0_ok && !crc1_ok) {
+			fprintf(stderr,
+				"Warning: Bad CRC, using default environment\n");
+			memcpy(environment.data, default_environment,
+			       sizeof(default_environment));
+			dev_current = 0;
+		} else {
+			switch (environment.flag_scheme) {
+			case FLAG_BOOLEAN:
+				if (flag0 == active_flag &&
+				    flag1 == obsolete_flag) {
+					dev_current = 0;
+				} else if (flag0 == obsolete_flag &&
+					   flag1 == active_flag) {
+					dev_current = 1;
+				} else if (flag0 == flag1) {
+					dev_current = 0;
+				} else if (flag0 == 0xFF) {
+					dev_current = 0;
+				} else if (flag1 == 0xFF) {
+					dev_current = 1;
+				} else {
+					dev_current = 0;
+				}
+				break;
+			case FLAG_INCREMENTAL:
+				if (flag0 == 255 && flag1 == 0)
+					dev_current = 1;
+				else if ((flag1 == 255 && flag0 == 0) ||
+					 flag0 >= flag1)
+					dev_current = 0;
+				else	/* flag1 > flag0 */
+					dev_current = 1;
+				break;
+			default:
+				fprintf(stderr, "Unknown flag scheme %u\n",
+					environment.flag_scheme);
+				return -1;
+			}
+		}
+
+		/*
+		 * If we are reading, we don't need the flag and the CRC any
+		 * more, if we are writing, we will re-calculate CRC and update
+		 * flags before writing out
+		 */
+		if (dev_current) {
+			environment.image = addr1;
+			environment.crc = &redundant->crc;
+			environment.flags = &redundant->flags;
+			environment.data = redundant->data;
+			free(addr0);
+		} else {
+			environment.image = addr0;
+			/* Other pointers are already set */
+			free(addr1);
+		}
+#ifdef DEBUG
+		fprintf(stderr, "Selected env in %s\n", DEVNAME(dev_current));
+#endif
+	}
+	return 0;
+
+ open_cleanup:
+	if (addr0)
+		free(addr0);
+
+	if (addr1)
+		free(addr0);
+
+	return ret;
+}
+
+/*
+ * Simply free allocated buffer with environment
+ */
+int fw_env_close(struct env_opts *opts)
+{
+	if (environment.image)
+		free(environment.image);
+
+	environment.image = NULL;
+
+	return 0;
+}
+
+static int check_device_config(int dev)
+{
+	struct stat st;
+	int32_t lnum = 0;
+	int fd, rc = 0;
+
+	/* Fills in IS_UBI(), converts DEVNAME() with ubi volume name */
+	ubi_check_dev(dev);
+
+	fd = open(DEVNAME(dev), O_RDONLY);
+	if (fd < 0) {
+		fprintf(stderr,
+			"Cannot open %s: %s\n", DEVNAME(dev), strerror(errno));
+		return -1;
+	}
+
+	rc = fstat(fd, &st);
+	if (rc < 0) {
+		fprintf(stderr, "Cannot stat the file %s\n", DEVNAME(dev));
+		goto err;
+	}
+
+	if (IS_UBI(dev)) {
+		rc = ioctl(fd, UBI_IOCEBISMAP, &lnum);
+		if (rc < 0) {
+			fprintf(stderr, "Cannot get UBI information for %s\n",
+				DEVNAME(dev));
+			goto err;
+		}
+	} else if (S_ISCHR(st.st_mode)) {
+		struct mtd_info_user mtdinfo;
+		rc = ioctl(fd, MEMGETINFO, &mtdinfo);
+		if (rc < 0) {
+			fprintf(stderr, "Cannot get MTD information for %s\n",
+				DEVNAME(dev));
+			goto err;
+		}
+		if (mtdinfo.type != MTD_NORFLASH &&
+		    mtdinfo.type != MTD_NANDFLASH &&
+		    mtdinfo.type != MTD_DATAFLASH &&
+		    mtdinfo.type != MTD_UBIVOLUME) {
+			fprintf(stderr, "Unsupported flash type %u on %s\n",
+				mtdinfo.type, DEVNAME(dev));
+			goto err;
+		}
+		DEVTYPE(dev) = mtdinfo.type;
+		if (DEVESIZE(dev) == 0)
+			/* Assume the erase size is the same as the env-size */
+			DEVESIZE(dev) = ENVSIZE(dev);
+	} else {
+		uint64_t size;
+		DEVTYPE(dev) = MTD_ABSENT;
+		if (DEVESIZE(dev) == 0)
+			/* Assume the erase size to be 512 bytes */
+			DEVESIZE(dev) = 0x200;
+
+		/*
+		 * Check for negative offsets, treat it as backwards offset
+		 * from the end of the block device
+		 */
+		if (DEVOFFSET(dev) < 0) {
+			rc = ioctl(fd, BLKGETSIZE64, &size);
+			if (rc < 0) {
+				fprintf(stderr,
+					"Could not get block device size on %s\n",
+					DEVNAME(dev));
+				goto err;
+			}
+
+			DEVOFFSET(dev) = DEVOFFSET(dev) + size;
+#ifdef DEBUG
+			fprintf(stderr,
+				"Calculated device offset 0x%llx on %s\n",
+				DEVOFFSET(dev), DEVNAME(dev));
+#endif
+		}
+	}
+
+	if (ENVSECTORS(dev) == 0)
+		/* Assume enough sectors to cover the environment */
+		ENVSECTORS(dev) = DIV_ROUND_UP(ENVSIZE(dev), DEVESIZE(dev));
+
+	if (DEVOFFSET(dev) % DEVESIZE(dev) != 0) {
+		fprintf(stderr,
+			"Environment does not start on (erase) block boundary\n");
+		errno = EINVAL;
+		return -1;
+	}
+
+	if (ENVSIZE(dev) > ENVSECTORS(dev) * DEVESIZE(dev)) {
+		fprintf(stderr,
+			"Environment does not fit into available sectors\n");
+		errno = EINVAL;
+		return -1;
+	}
+
+ err:
+	close(fd);
+	return rc;
+}
+
+static int parse_config(struct env_opts *opts)
+{
+	int rc;
+
+	if (!opts)
+		opts = &default_opts;
+
+#if defined(CONFIG_FILE)
+	/* Fills in DEVNAME(), ENVSIZE(), DEVESIZE(). Or don't. */
+	if (get_config(opts->config_file)) {
+		fprintf(stderr, "Cannot parse config file '%s': %m\n",
+			opts->config_file);
+		return -1;
+	}
+#else
+	DEVNAME(0) = DEVICE1_NAME;
+	DEVOFFSET(0) = DEVICE1_OFFSET;
+	ENVSIZE(0) = ENV1_SIZE;
+
+	/* Set defaults for DEVESIZE, ENVSECTORS later once we
+	 * know DEVTYPE
+	 */
+#ifdef DEVICE1_ESIZE
+	DEVESIZE(0) = DEVICE1_ESIZE;
+#endif
+#ifdef DEVICE1_ENVSECTORS
+	ENVSECTORS(0) = DEVICE1_ENVSECTORS;
+#endif
+
+#ifdef HAVE_REDUND
+	DEVNAME(1) = DEVICE2_NAME;
+	DEVOFFSET(1) = DEVICE2_OFFSET;
+	ENVSIZE(1) = ENV2_SIZE;
+
+	/* Set defaults for DEVESIZE, ENVSECTORS later once we
+	 * know DEVTYPE
+	 */
+#ifdef DEVICE2_ESIZE
+	DEVESIZE(1) = DEVICE2_ESIZE;
+#endif
+#ifdef DEVICE2_ENVSECTORS
+	ENVSECTORS(1) = DEVICE2_ENVSECTORS;
+#endif
+	have_redund_env = 1;
+#endif
+#endif
+	rc = check_device_config(0);
+	if (rc < 0)
+		return rc;
+
+	if (have_redund_env) {
+		rc = check_device_config(1);
+		if (rc < 0)
+			return rc;
+
+		if (ENVSIZE(0) != ENVSIZE(1)) {
+			fprintf(stderr,
+				"Redundant environments have unequal size");
+			return -1;
+		}
+	}
+
+	usable_envsize = CUR_ENVSIZE - sizeof(uint32_t);
+	if (have_redund_env)
+		usable_envsize -= sizeof(char);
+
+	return 0;
+}
+
+#if defined(CONFIG_FILE)
+static int get_config(char *fname)
+{
+	FILE *fp;
+	int i = 0;
+	int rc;
+	char *line = NULL;
+	size_t linesize = 0;
+	char *devname;
+
+	fp = fopen(fname, "r");
+	if (fp == NULL)
+		return -1;
+
+	while (i < 2 && getline(&line, &linesize, fp) != -1) {
+		/* Skip comment strings */
+		if (line[0] == '#')
+			continue;
+
+		rc = sscanf(line, "%ms %lli %lx %lx %lx",
+			    &devname,
+			    &DEVOFFSET(i),
+			    &ENVSIZE(i), &DEVESIZE(i), &ENVSECTORS(i));
+
+		if (rc < 3)
+			continue;
+
+		DEVNAME(i) = devname;
+
+		/* Set defaults for DEVESIZE, ENVSECTORS later once we
+		 * know DEVTYPE
+		 */
+
+		i++;
+	}
+	free(line);
+	fclose(fp);
+
+	have_redund_env = i - 1;
+	if (!i) {		/* No valid entries found */
+		errno = EINVAL;
+		return -1;
+	} else
+		return 0;
+}
+#endif
diff --git a/tools/u-boot-tools/env/fw_env.config b/tools/u-boot-tools/env/fw_env.config
new file mode 100644
index 0000000000000000000000000000000000000000..053895a2c07b23891e098c2370066204f04b8ea1
--- /dev/null
+++ b/tools/u-boot-tools/env/fw_env.config
@@ -0,0 +1,38 @@
+# Configuration file for fw_(printenv/setenv) utility.
+# Up to two entries are valid, in this case the redundant
+# environment sector is assumed present.
+# Notice, that the "Number of sectors" is not required on NOR and SPI-dataflash.
+# Futhermore, if the Flash sector size is omitted, this value is assumed to
+# be the same as the Environment size, which is valid for NOR and SPI-dataflash
+# Device offset must be prefixed with 0x to be parsed as a hexadecimal value.
+
+# NOR example
+# MTD device name	Device offset	Env. size	Flash sector size	Number of sectors
+/dev/mtd1		0x0000		0x4000		0x4000
+/dev/mtd2		0x0000		0x4000		0x4000
+
+# MTD SPI-dataflash example
+# MTD device name	Device offset	Env. size	Flash sector size	Number of sectors
+#/dev/mtd5		0x4200		0x4200
+#/dev/mtd6		0x4200		0x4200
+
+# NAND example
+#/dev/mtd0		0x4000		0x4000		0x20000			2
+
+# On a block device a negative offset is treated as a backwards offset from the
+# end of the device/partition, rather than a forwards offset from the start.
+
+# Block device example
+#/dev/mmcblk0		0xc0000		0x20000
+#/dev/mmcblk0		-0x20000	0x20000
+
+# VFAT example
+#/boot/uboot.env	0x0000          0x4000
+
+# UBI volume
+#/dev/ubi0_0		0x0		0x1f000		0x1f000
+#/dev/ubi0_1		0x0		0x1f000		0x1f000
+
+# UBI volume by name
+#/dev/ubi0:env		0x0		0x1f000		0x1f000
+#/dev/ubi0:env-redund	0x0		0x1f000		0x1f000
diff --git a/tools/u-boot-tools/env/fw_env.h b/tools/u-boot-tools/env/fw_env.h
new file mode 100644
index 0000000000000000000000000000000000000000..b250e2f3d75ec30b272bb1b396f4a1f348e1b56f
--- /dev/null
+++ b/tools/u-boot-tools/env/fw_env.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2002-2008
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ */
+
+#include <stdint.h>
+
+/*
+ * Programs using the library must check which API is available,
+ * that varies depending on the U-Boot version.
+ * This can be changed in future
+ */
+#define FW_ENV_API_VERSION	1
+
+struct env_opts {
+#ifdef CONFIG_FILE
+	char *config_file;
+#endif
+	char *lockname;
+};
+
+/**
+ * fw_printenv() - print one or several environment variables
+ *
+ * @argc: number of variables names to be printed, prints all if 0
+ * @argv: array of variable names to be printed, if argc != 0
+ * @value_only: do not repeat the variable name, print the bare value,
+ *          only one variable allowed with this option, argc must be 1
+ * @opts: encryption key, configuration file, defaults are used if NULL
+ *
+ * Description:
+ *  Uses fw_env_open, fw_getenv
+ *
+ * Return:
+ *  0 on success, -1 on failure (modifies errno)
+ */
+int fw_printenv(int argc, char *argv[], int value_only, struct env_opts *opts);
+
+/**
+ * fw_env_set() - adds or removes one variable to the environment
+ *
+ * @argc: number of strings in argv, argv[0] is variable name,
+ *          argc==1 means erase variable, argc > 1 means add a variable
+ * @argv: argv[0] is variable name, argv[1..argc-1] are concatenated separated
+ *           by single blank and set as the new value of the variable
+ * @opts: how to retrieve environment from flash, defaults are used if NULL
+ *
+ * Description:
+ *  Uses fw_env_open, fw_env_write, fw_env_flush
+ *
+ * Return:
+ *  0 on success, -1 on failure (modifies errno)
+ *
+ * ERRORS:
+ *  EROFS - some variables ("ethaddr", "serial#") cannot be modified
+ */
+int fw_env_set(int argc, char *argv[], struct env_opts *opts);
+
+/**
+ * fw_parse_script() - adds or removes multiple variables with a batch script
+ *
+ * @fname: batch script file name
+ * @opts: encryption key, configuration file, defaults are used if NULL
+ *
+ * Description:
+ *  Uses fw_env_open, fw_env_write, fw_env_flush
+ *
+ * Return:
+ *  0 success, -1 on failure (modifies errno)
+ *
+ * Script Syntax:
+ *
+ *  key [ [space]+ value]
+ *
+ *  lines starting with '#' treated as comment
+ *
+ *  A variable without value will be deleted. Any number of spaces are allowed
+ *  between key and value. The value starts with the first non-space character
+ *  and ends with newline. No comments allowed on these lines.  Spaces inside
+ *  the value are preserved verbatim.
+ *
+ * Script Example:
+ *
+ *  netdev         eth0
+ *
+ *  kernel_addr    400000
+ *
+ *  foo            spaces           are copied verbatim
+ *
+ *  # delete variable bar
+ *
+ *  bar
+ */
+int fw_parse_script(char *fname, struct env_opts *opts);
+
+
+/**
+ * fw_env_open() - read enviroment from flash into RAM cache
+ *
+ * @opts: encryption key, configuration file, defaults are used if NULL
+ *
+ * Return:
+ *  0 on success, -1 on failure (modifies errno)
+ */
+int fw_env_open(struct env_opts *opts);
+
+/**
+ * fw_getenv() - lookup variable in the RAM cache
+ *
+ * @name: variable to be searched
+ * Return:
+ *  pointer to start of value, NULL if not found
+ */
+char *fw_getenv(char *name);
+
+/**
+ * fw_env_write() - modify a variable held in the RAM cache
+ *
+ * @name: variable
+ * @value: delete variable if NULL, otherwise create or overwrite the variable
+ *
+ * This is called in sequence to update the environment in RAM without updating
+ * the copy in flash after each set
+ *
+ * Return:
+ *  0 on success, -1 on failure (modifies errno)
+ *
+ * ERRORS:
+ *  EROFS - some variables ("ethaddr", "serial#") cannot be modified
+ */
+int fw_env_write(char *name, char *value);
+
+/**
+ * fw_env_flush - write the environment from RAM cache back to flash
+ *
+ * @opts: encryption key, configuration file, defaults are used if NULL
+ *
+ * Return:
+ *  0 on success, -1 on failure (modifies errno)
+ */
+int fw_env_flush(struct env_opts *opts);
+
+/**
+ * fw_env_close - free allocated structure and close env
+ *
+ * @opts: encryption key, configuration file, defaults are used if NULL
+ *
+ * Return:
+ *  0 on success, -1 on failure (modifies errno)
+ */
+int fw_env_close(struct env_opts *opts);
+
+
+/**
+ * fw_env_version - return the current version of the library
+ *
+ * Return:
+ *  version string of the library
+ */
+char *fw_env_version(void);
+
+unsigned long crc32(unsigned long, const unsigned char *, unsigned);
diff --git a/tools/u-boot-tools/env/fw_env_main.c b/tools/u-boot-tools/env/fw_env_main.c
new file mode 100644
index 0000000000000000000000000000000000000000..26ba6624cd19cb2be078e683122c95506ca11d26
--- /dev/null
+++ b/tools/u-boot-tools/env/fw_env_main.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000-2008
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ */
+
+/*
+ * Command line user interface to firmware (=U-Boot) environment.
+ *
+ * Implements:
+ *	fw_printenv [ -a key ] [[ -n name ] | [ name ... ]]
+ *              - prints the value of a single environment variable
+ *                "name", the ``name=value'' pairs of one or more
+ *                environment variables "name", or the whole
+ *                environment if no names are specified.
+ *	fw_setenv [ -a key ] name [ value ... ]
+ *		- If a name without any values is given, the variable
+ *		  with this name is deleted from the environment;
+ *		  otherwise, all "value" arguments are concatenated,
+ *		  separated by single blank characters, and the
+ *		  resulting string is assigned to the environment
+ *		  variable "name"
+ *
+ * If '-a key' is specified, the env block is encrypted with AES 128 CBC.
+ * The 'key' argument is in the format of 32 hexadecimal numbers (16 bytes
+ * of AES key), eg. '-a aabbccddeeff00112233445566778899'.
+ */
+
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/file.h>
+#include <unistd.h>
+#include <version.h>
+#include "fw_env_private.h"
+#include "fw_env.h"
+
+#define CMD_PRINTENV	"fw_printenv"
+#define CMD_SETENV	"fw_setenv"
+static int do_printenv;
+
+static struct option long_options[] = {
+	{"config", required_argument, NULL, 'c'},
+	{"help", no_argument, NULL, 'h'},
+	{"script", required_argument, NULL, 's'},
+	{"noheader", no_argument, NULL, 'n'},
+	{"lock", required_argument, NULL, 'l'},
+	{"version", no_argument, NULL, 'v'},
+	{NULL, 0, NULL, 0}
+};
+
+static struct env_opts env_opts;
+
+/* setenv options */
+static int noheader;
+
+/* getenv options */
+static char *script_file;
+
+void usage_printenv(void)
+{
+
+	fprintf(stderr,
+		"Usage: fw_printenv [OPTIONS]... [VARIABLE]...\n"
+		"Print variables from U-Boot environment\n"
+		"\n"
+		" -h, --help           print this help.\n"
+		" -v, --version        display version\n"
+#ifdef CONFIG_FILE
+		" -c, --config         configuration file, default:" CONFIG_FILE "\n"
+#endif
+		" -n, --noheader       do not repeat variable name in output\n"
+		" -l, --lock           lock node, default:/var/lock\n"
+		"\n");
+}
+
+void usage_env_set(void)
+{
+	fprintf(stderr,
+		"Usage: fw_setenv [OPTIONS]... [VARIABLE]...\n"
+		"Modify variables in U-Boot environment\n"
+		"\n"
+		" -h, --help           print this help.\n"
+		" -v, --version        display version\n"
+#ifdef CONFIG_FILE
+		" -c, --config         configuration file, default:" CONFIG_FILE "\n"
+#endif
+		" -l, --lock           lock node, default:/var/lock\n"
+		" -s, --script         batch mode to minimize writes\n"
+		"\n"
+		"Examples:\n"
+		"  fw_setenv foo bar   set variable foo equal bar\n"
+		"  fw_setenv foo       clear variable foo\n"
+		"  fw_setenv --script file run batch script\n"
+		"\n"
+		"Script Syntax:\n"
+		"  key [space] value\n"
+		"  lines starting with '#' are treated as comment\n"
+		"\n"
+		"  A variable without value will be deleted. Any number of spaces are\n"
+		"  allowed between key and value. Space inside of the value is treated\n"
+		"  as part of the value itself.\n"
+		"\n"
+		"Script Example:\n"
+		"  netdev         eth0\n"
+		"  kernel_addr    400000\n"
+		"  foo            empty empty empty    empty empty empty\n"
+		"  bar\n"
+		"\n");
+}
+
+static void parse_common_args(int argc, char *argv[])
+{
+	int c;
+
+#ifdef CONFIG_FILE
+	env_opts.config_file = CONFIG_FILE;
+#endif
+
+	while ((c = getopt_long(argc, argv, ":a:c:l:h:v", long_options, NULL)) !=
+	       EOF) {
+		switch (c) {
+#ifdef CONFIG_FILE
+		case 'c':
+			env_opts.config_file = optarg;
+			break;
+#endif
+		case 'l':
+			env_opts.lockname = optarg;
+			break;
+		case 'h':
+			do_printenv ? usage_printenv() : usage_env_set();
+			exit(EXIT_SUCCESS);
+			break;
+		case 'v':
+			fprintf(stderr, "Compiled with " U_BOOT_VERSION "\n");
+			exit(EXIT_SUCCESS);
+			break;
+		default:
+			/* ignore unknown options */
+			break;
+		}
+	}
+
+	/* Reset getopt for the next pass. */
+	opterr = 1;
+	optind = 1;
+}
+
+int parse_printenv_args(int argc, char *argv[])
+{
+	int c;
+
+	parse_common_args(argc, argv);
+
+	while ((c = getopt_long(argc, argv, "a:c:ns:l:h:v", long_options, NULL))
+		!= EOF) {
+		switch (c) {
+		case 'n':
+			noheader = 1;
+			break;
+		case 'a':
+		case 'c':
+		case 'h':
+		case 'l':
+			/* ignore common options */
+			break;
+		default: /* '?' */
+			usage_printenv();
+			exit(EXIT_FAILURE);
+			break;
+		}
+	}
+	return 0;
+}
+
+int parse_setenv_args(int argc, char *argv[])
+{
+	int c;
+
+	parse_common_args(argc, argv);
+
+	while ((c = getopt_long(argc, argv, "a:c:ns:l:h:v", long_options, NULL))
+		!= EOF) {
+		switch (c) {
+		case 's':
+			script_file = optarg;
+			break;
+		case 'a':
+		case 'c':
+		case 'h':
+		case 'l':
+			/* ignore common options */
+			break;
+		default: /* '?' */
+			usage_env_set();
+			exit(EXIT_FAILURE);
+			break;
+		}
+	}
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	char *lockname = "/var/lock/" CMD_PRINTENV ".lock";
+	int lockfd = -1;
+	int retval = EXIT_SUCCESS;
+	char *_cmdname;
+
+	_cmdname = *argv;
+	if (strrchr(_cmdname, '/') != NULL)
+		_cmdname = strrchr(_cmdname, '/') + 1;
+
+	if (strcmp(_cmdname, CMD_PRINTENV) == 0) {
+		do_printenv = 1;
+	} else if (strcmp(_cmdname, CMD_SETENV) == 0) {
+		do_printenv = 0;
+	} else {
+		fprintf(stderr,
+			"Identity crisis - may be called as `%s' or as `%s' but not as `%s'\n",
+			CMD_PRINTENV, CMD_SETENV, _cmdname);
+		exit(EXIT_FAILURE);
+	}
+
+	if (do_printenv) {
+		if (parse_printenv_args(argc, argv))
+			exit(EXIT_FAILURE);
+	} else {
+		if (parse_setenv_args(argc, argv))
+			exit(EXIT_FAILURE);
+	}
+
+	/* shift parsed flags, jump to non-option arguments */
+	argc -= optind;
+	argv += optind;
+
+	if (env_opts.lockname) {
+		lockname = malloc(strlen(env_opts.lockname) +
+				sizeof(CMD_PRINTENV) + 10);
+		if (!lockname) {
+			fprintf(stderr, "Unable allocate memory");
+			exit(EXIT_FAILURE);
+		}
+
+		sprintf(lockname, "%s/%s.lock",
+			env_opts.lockname, CMD_PRINTENV);
+	}
+
+	lockfd = open(lockname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+	if (-1 == lockfd) {
+		fprintf(stderr, "Error opening lock file %s\n", lockname);
+		return EXIT_FAILURE;
+	}
+
+	if (-1 == flock(lockfd, LOCK_EX)) {
+		fprintf(stderr, "Error locking file %s\n", lockname);
+		close(lockfd);
+		return EXIT_FAILURE;
+	}
+
+	if (do_printenv) {
+		if (fw_printenv(argc, argv, noheader, &env_opts) != 0)
+			retval = EXIT_FAILURE;
+	} else {
+		if (!script_file) {
+			if (fw_env_set(argc, argv, &env_opts) != 0)
+				retval = EXIT_FAILURE;
+		} else {
+			if (fw_parse_script(script_file, &env_opts) != 0)
+				retval = EXIT_FAILURE;
+		}
+	}
+
+	if (env_opts.lockname)
+		free(lockname);
+
+	flock(lockfd, LOCK_UN);
+	close(lockfd);
+	return retval;
+}
diff --git a/tools/u-boot-tools/env/fw_env_private.h b/tools/u-boot-tools/env/fw_env_private.h
new file mode 100644
index 0000000000000000000000000000000000000000..86be16dabc621b61e2f97274457042f76d39d0f6
--- /dev/null
+++ b/tools/u-boot-tools/env/fw_env_private.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2002-2008
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ */
+
+/* Pull in the current config to define the default environment */
+#include <linux/kconfig.h>
+
+#ifndef __ASSEMBLY__
+#define __ASSEMBLY__ /* get only #defines from config.h */
+#include <config.h>
+#undef	__ASSEMBLY__
+#else
+#include <config.h>
+#endif
+
+/*
+ * To build the utility with the static configuration
+ * comment out the next line.
+ * See included "fw_env.config" sample file
+ * for notes on configuration.
+ */
+#define CONFIG_FILE     "/etc/fw_env.config"
+
+#ifndef CONFIG_FILE
+#define HAVE_REDUND /* For systems with 2 env sectors */
+#define DEVICE1_NAME      "/dev/mtd1"
+#define DEVICE2_NAME      "/dev/mtd2"
+#define DEVICE1_OFFSET    0x0000
+#define ENV1_SIZE         0x4000
+#define DEVICE1_ESIZE     0x4000
+#define DEVICE1_ENVSECTORS     2
+#define DEVICE2_OFFSET    0x0000
+#define ENV2_SIZE         0x4000
+#define DEVICE2_ESIZE     0x4000
+#define DEVICE2_ENVSECTORS     2
+#endif
+
+#ifndef CONFIG_BAUDRATE
+#define CONFIG_BAUDRATE		115200
+#endif
+
+#ifndef CONFIG_BOOTDELAY
+#define CONFIG_BOOTDELAY	5	/* autoboot after 5 seconds	*/
+#endif
+
+#ifndef CONFIG_BOOTCOMMAND
+#define CONFIG_BOOTCOMMAND						\
+	"bootp; "							\
+	"setenv bootargs root=/dev/nfs nfsroot=${serverip}:${rootpath} "\
+	"ip=${ipaddr}:${serverip}:${gatewayip}:${netmask}:${hostname}::off; "\
+	"bootm"
+#endif
diff --git a/tools/u-boot-tools/env/linux_string.c b/tools/u-boot-tools/env/linux_string.c
new file mode 100644
index 0000000000000000000000000000000000000000..6c01addadf9f39d1616619ff9718995423da1c30
--- /dev/null
+++ b/tools/u-boot-tools/env/linux_string.c
@@ -0,0 +1 @@
+#include "../../lib/linux_string.c"
diff --git a/tools/u-boot-tools/envcrc b/tools/u-boot-tools/envcrc
new file mode 100755
index 0000000000000000000000000000000000000000..23d57ef4f623e1bc6229d3a9dc29d78ecb0ed6a9
Binary files /dev/null and b/tools/u-boot-tools/envcrc differ
diff --git a/tools/u-boot-tools/envcrc.c b/tools/u-boot-tools/envcrc.c
new file mode 100644
index 0000000000000000000000000000000000000000..505949289fa57552dc9e7c09a52836be9dd0ae86
--- /dev/null
+++ b/tools/u-boot-tools/envcrc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2001
+ * Paolo Scaffardi, AIRVENT SAM s.p.a - RIMINI(ITALY), arsenio@tin.it
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <linux/kconfig.h>
+
+#ifndef __ASSEMBLY__
+#define	__ASSEMBLY__			/* Dirty trick to get only #defines	*/
+#endif
+#define	__ASM_STUB_PROCESSOR_H__	/* don't include asm/processor.		*/
+#include <config.h>
+#undef	__ASSEMBLY__
+
+#if defined(CONFIG_ENV_IS_IN_FLASH)
+# ifndef  CONFIG_ENV_ADDR
+#  define CONFIG_ENV_ADDR	(CONFIG_SYS_FLASH_BASE + CONFIG_ENV_OFFSET)
+# endif
+# ifndef  CONFIG_ENV_OFFSET
+#  define CONFIG_ENV_OFFSET (CONFIG_ENV_ADDR - CONFIG_SYS_FLASH_BASE)
+# endif
+# if !defined(CONFIG_ENV_ADDR_REDUND) && defined(CONFIG_ENV_OFFSET_REDUND)
+#  define CONFIG_ENV_ADDR_REDUND	(CONFIG_SYS_FLASH_BASE + CONFIG_ENV_OFFSET_REDUND)
+# endif
+# ifndef  CONFIG_ENV_SIZE
+#  define CONFIG_ENV_SIZE	CONFIG_ENV_SECT_SIZE
+# endif
+# if defined(CONFIG_ENV_ADDR_REDUND) && !defined(CONFIG_ENV_SIZE_REDUND)
+#  define CONFIG_ENV_SIZE_REDUND	CONFIG_ENV_SIZE
+# endif
+# if (CONFIG_ENV_ADDR >= CONFIG_SYS_MONITOR_BASE) && \
+     ((CONFIG_ENV_ADDR + CONFIG_ENV_SIZE) <= (CONFIG_SYS_MONITOR_BASE + CONFIG_SYS_MONITOR_LEN))
+#  define ENV_IS_EMBEDDED
+# endif
+# if defined(CONFIG_ENV_ADDR_REDUND) || defined(CONFIG_ENV_OFFSET_REDUND)
+#  define CONFIG_SYS_REDUNDAND_ENVIRONMENT
+# endif
+#endif	/* CONFIG_ENV_IS_IN_FLASH */
+
+#if defined(ENV_IS_EMBEDDED) && !defined(CONFIG_BUILD_ENVCRC)
+# define CONFIG_BUILD_ENVCRC
+#endif
+
+#ifdef CONFIG_SYS_REDUNDAND_ENVIRONMENT
+# define ENV_HEADER_SIZE	(sizeof(uint32_t) + 1)
+#else
+# define ENV_HEADER_SIZE	(sizeof(uint32_t))
+#endif
+
+#define ENV_SIZE (CONFIG_ENV_SIZE - ENV_HEADER_SIZE)
+
+
+#ifdef CONFIG_BUILD_ENVCRC
+# include <environment.h>
+extern unsigned int env_size;
+extern env_t environment;
+#endif	/* CONFIG_BUILD_ENVCRC */
+
+extern uint32_t crc32 (uint32_t, const unsigned char *, unsigned int);
+
+int main (int argc, char **argv)
+{
+#ifdef CONFIG_BUILD_ENVCRC
+	unsigned char pad = 0x00;
+	uint32_t crc;
+	unsigned char *envptr = (unsigned char *)&environment,
+		*dataptr = envptr + ENV_HEADER_SIZE;
+	unsigned int datasize = ENV_SIZE;
+	unsigned int eoe;
+
+	if (argv[1] && !strncmp(argv[1], "--binary", 8)) {
+		int ipad = 0xff;
+		if (argv[1][8] == '=')
+			sscanf(argv[1] + 9, "%i", &ipad);
+		pad = ipad;
+	}
+
+	if (pad) {
+		/* find the end of env */
+		for (eoe = 0; eoe < datasize - 1; ++eoe)
+			if (!dataptr[eoe] && !dataptr[eoe+1]) {
+				eoe += 2;
+				break;
+			}
+		if (eoe < datasize - 1)
+			memset(dataptr + eoe, pad, datasize - eoe);
+	}
+
+	crc = crc32 (0, dataptr, datasize);
+
+	/* Check if verbose mode is activated passing a parameter to the program */
+	if (argc > 1) {
+		if (!strncmp(argv[1], "--binary", 8)) {
+			int le = (argc > 2 ? !strcmp(argv[2], "le") : 1);
+			size_t i, start, end, step;
+			if (le) {
+				start = 0;
+				end = ENV_HEADER_SIZE;
+				step = 1;
+			} else {
+				start = ENV_HEADER_SIZE - 1;
+				end = -1;
+				step = -1;
+			}
+			for (i = start; i != end; i += step)
+				printf("%c", (crc & (0xFF << (i * 8))) >> (i * 8));
+			if (fwrite(dataptr, 1, datasize, stdout) != datasize)
+				fprintf(stderr, "fwrite() failed: %s\n", strerror(errno));
+		} else {
+			printf("CRC32 from offset %08X to %08X of environment = %08X\n",
+				(unsigned int) (dataptr - envptr),
+				(unsigned int) (dataptr - envptr) + datasize,
+				crc);
+		}
+	} else {
+		printf ("0x%08X\n", crc);
+	}
+#else
+	printf ("0\n");
+#endif
+	return EXIT_SUCCESS;
+}
diff --git a/tools/u-boot-tools/envcrc.o b/tools/u-boot-tools/envcrc.o
new file mode 100644
index 0000000000000000000000000000000000000000..a85ab9d69c8b6a681c30a31ebd5298acbef95934
Binary files /dev/null and b/tools/u-boot-tools/envcrc.o differ
diff --git a/tools/u-boot-tools/fdt_host.h b/tools/u-boot-tools/fdt_host.h
new file mode 100644
index 0000000000000000000000000000000000000000..99b009b2210928717d6f2a6183e61f247a25a6d7
--- /dev/null
+++ b/tools/u-boot-tools/fdt_host.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2008 Semihalf
+ */
+
+#ifndef __FDT_HOST_H__
+#define __FDT_HOST_H__
+
+/* Make sure to include u-boot version of libfdt include files */
+#include "../include/linux/libfdt.h"
+#include "../include/fdt_support.h"
+
+/**
+ * fdt_remove_unused_strings() - Remove any unused strings from an FDT
+ *
+ * This creates a new device tree in @new with unused strings removed. The
+ * called can then use fdt_pack() to minimise the space consumed.
+ *
+ * @old:	Old device tree blog
+ * @new:	Place to put new device tree blob, which must be as large as
+ *		@old
+ * @return
+ *	0, on success
+ *	-FDT_ERR_BADOFFSET, corrupt device tree
+ *	-FDT_ERR_NOSPACE, out of space, which should not happen unless there
+ *		is something very wrong with the device tree input
+ */
+int fdt_remove_unused_strings(const void *old, void *new);
+
+int fit_check_sign(const void *working_fdt, const void *key);
+
+#endif /* __FDT_HOST_H__ */
diff --git a/tools/u-boot-tools/fdtgrep b/tools/u-boot-tools/fdtgrep
new file mode 100755
index 0000000000000000000000000000000000000000..17c3e6d2704816838a684db2beed0928bc1e34bd
Binary files /dev/null and b/tools/u-boot-tools/fdtgrep differ
diff --git a/tools/u-boot-tools/fdtgrep.c b/tools/u-boot-tools/fdtgrep.c
new file mode 100644
index 0000000000000000000000000000000000000000..8f44f599c1c437525b9bfe9cb405ce6e9838a257
--- /dev/null
+++ b/tools/u-boot-tools/fdtgrep.c
@@ -0,0 +1,1236 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2013, Google Inc.
+ * Written by Simon Glass <sjg@chromium.org>
+ *
+ * Perform a grep of an FDT either displaying the source subset or producing
+ * a new .dtb subset which can be used as required.
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "fdt_host.h"
+#include "libfdt_internal.h"
+
+/* Define DEBUG to get some debugging output on stderr */
+#ifdef DEBUG
+#define debug(a, b...) fprintf(stderr, a, ## b)
+#else
+#define debug(a, b...)
+#endif
+
+/* A linked list of values we are grepping for */
+struct value_node {
+	int type;		/* Types this value matches (FDT_IS... mask) */
+	int include;		/* 1 to include matches, 0 to exclude */
+	const char *string;	/* String to match */
+	struct value_node *next;	/* Pointer to next node, or NULL */
+};
+
+/* Output formats we support */
+enum output_t {
+	OUT_DTS,		/* Device tree source */
+	OUT_DTB,		/* Valid device tree binary */
+	OUT_BIN,		/* Fragment of .dtb, for hashing */
+};
+
+/* Holds information which controls our output and options */
+struct display_info {
+	enum output_t output;	/* Output format */
+	int add_aliases;	/* Add aliases node to output */
+	int all;		/* Display all properties/nodes */
+	int colour;		/* Display output in ANSI colour */
+	int region_list;	/* Output a region list */
+	int flags;		/* Flags (FDT_REG_...) */
+	int list_strings;	/* List strings in string table */
+	int show_offset;	/* Show offset */
+	int show_addr;		/* Show address */
+	int header;		/* Output an FDT header */
+	int diff;		/* Show +/- diff markers */
+	int include_root;	/* Include the root node and all properties */
+	int remove_strings;	/* Remove unused strings */
+	int show_dts_version;	/* Put '/dts-v1/;' on the first line */
+	int types_inc;		/* Mask of types that we include (FDT_IS...) */
+	int types_exc;		/* Mask of types that we exclude (FDT_IS...) */
+	int invert;		/* Invert polarity of match */
+	struct value_node *value_head;	/* List of values to match */
+	const char *output_fname;	/* Output filename */
+	FILE *fout;		/* File to write dts/dtb output */
+};
+
+static void report_error(const char *where, int err)
+{
+	fprintf(stderr, "Error at '%s': %s\n", where, fdt_strerror(err));
+}
+
+/* Supported ANSI colours */
+enum {
+	COL_BLACK,
+	COL_RED,
+	COL_GREEN,
+	COL_YELLOW,
+	COL_BLUE,
+	COL_MAGENTA,
+	COL_CYAN,
+	COL_WHITE,
+
+	COL_NONE = -1,
+};
+
+/**
+ * print_ansi_colour() - Print out the ANSI sequence for a colour
+ *
+ * @fout:	Output file
+ * @col:	Colour to output (COL_...), or COL_NONE to reset colour
+ */
+static void print_ansi_colour(FILE *fout, int col)
+{
+	if (col == COL_NONE)
+		fprintf(fout, "\033[0m");
+	else
+		fprintf(fout, "\033[1;%dm", col + 30);
+}
+
+
+/**
+ * value_add() - Add a new value to our list of things to grep for
+ *
+ * @disp:	Display structure, holding info about our options
+ * @headp:	Pointer to header pointer of list
+ * @type:	Type of this value (FDT_IS_...)
+ * @include:	1 if we want to include matches, 0 to exclude
+ * @str:	String value to match
+ */
+static int value_add(struct display_info *disp, struct value_node **headp,
+		     int type, int include, const char *str)
+{
+	struct value_node *node;
+
+	/*
+	 * Keep track of which types we are excluding/including. We don't
+	 * allow both including and excluding things, because it doesn't make
+	 * sense. 'Including' means that everything not mentioned is
+	 * excluded. 'Excluding' means that everything not mentioned is
+	 * included. So using the two together would be meaningless.
+	 */
+	if (include)
+		disp->types_inc |= type;
+	else
+		disp->types_exc |= type;
+	if (disp->types_inc & disp->types_exc & type) {
+		fprintf(stderr,
+			"Cannot use both include and exclude for '%s'\n", str);
+		return -1;
+	}
+
+	str = strdup(str);
+	if (!str)
+		goto err_mem;
+	node = malloc(sizeof(*node));
+	if (!node)
+		goto err_mem;
+	node->next = *headp;
+	node->type = type;
+	node->include = include;
+	node->string = str;
+	*headp = node;
+
+	return 0;
+err_mem:
+	fprintf(stderr, "Out of memory\n");
+	return -1;
+}
+
+static bool util_is_printable_string(const void *data, int len)
+{
+	const char *s = data;
+	const char *ss, *se;
+
+	/* zero length is not */
+	if (len == 0)
+		return 0;
+
+	/* must terminate with zero */
+	if (s[len - 1] != '\0')
+		return 0;
+
+	se = s + len;
+
+	while (s < se) {
+		ss = s;
+		while (s < se && *s && isprint((unsigned char)*s))
+			s++;
+
+		/* not zero, or not done yet */
+		if (*s != '\0' || s == ss)
+			return 0;
+
+		s++;
+	}
+
+	return 1;
+}
+
+static void utilfdt_print_data(const char *data, int len)
+{
+	int i;
+	const char *p = data;
+	const char *s;
+
+	/* no data, don't print */
+	if (len == 0)
+		return;
+
+	if (util_is_printable_string(data, len)) {
+		printf(" = ");
+
+		s = data;
+		do {
+			printf("\"%s\"", s);
+			s += strlen(s) + 1;
+			if (s < data + len)
+				printf(", ");
+		} while (s < data + len);
+
+	} else if ((len % 4) == 0) {
+		const uint32_t *cell = (const uint32_t *)data;
+
+		printf(" = <");
+		for (i = 0, len /= 4; i < len; i++)
+			printf("0x%08x%s", fdt32_to_cpu(cell[i]),
+			       i < (len - 1) ? " " : "");
+		printf(">");
+	} else {
+		printf(" = [");
+		for (i = 0; i < len; i++)
+			printf("%02x%s", *p++, i < len - 1 ? " " : "");
+		printf("]");
+	}
+}
+
+/**
+ * display_fdt_by_regions() - Display regions of an FDT source
+ *
+ * This dumps an FDT as source, but only certain regions of it. This is the
+ * final stage of the grep - we have a list of regions we want to display,
+ * and this function displays them.
+ *
+ * @disp:	Display structure, holding info about our options
+ * @blob:	FDT blob to display
+ * @region:	List of regions to display
+ * @count:	Number of regions
+ */
+static int display_fdt_by_regions(struct display_info *disp, const void *blob,
+		struct fdt_region region[], int count)
+{
+	struct fdt_region *reg = region, *reg_end = region + count;
+	uint32_t off_mem_rsvmap = fdt_off_mem_rsvmap(blob);
+	int base = fdt_off_dt_struct(blob);
+	int version = fdt_version(blob);
+	int offset, nextoffset;
+	int tag, depth, shift;
+	FILE *f = disp->fout;
+	uint64_t addr, size;
+	int in_region;
+	int file_ofs;
+	int i;
+
+	if (disp->show_dts_version)
+		fprintf(f, "/dts-v1/;\n");
+
+	if (disp->header) {
+		fprintf(f, "// magic:\t\t0x%x\n", fdt_magic(blob));
+		fprintf(f, "// totalsize:\t\t0x%x (%d)\n", fdt_totalsize(blob),
+			fdt_totalsize(blob));
+		fprintf(f, "// off_dt_struct:\t0x%x\n",
+			fdt_off_dt_struct(blob));
+		fprintf(f, "// off_dt_strings:\t0x%x\n",
+			fdt_off_dt_strings(blob));
+		fprintf(f, "// off_mem_rsvmap:\t0x%x\n", off_mem_rsvmap);
+		fprintf(f, "// version:\t\t%d\n", version);
+		fprintf(f, "// last_comp_version:\t%d\n",
+			fdt_last_comp_version(blob));
+		if (version >= 2) {
+			fprintf(f, "// boot_cpuid_phys:\t0x%x\n",
+				fdt_boot_cpuid_phys(blob));
+		}
+		if (version >= 3) {
+			fprintf(f, "// size_dt_strings:\t0x%x\n",
+				fdt_size_dt_strings(blob));
+		}
+		if (version >= 17) {
+			fprintf(f, "// size_dt_struct:\t0x%x\n",
+				fdt_size_dt_struct(blob));
+		}
+		fprintf(f, "\n");
+	}
+
+	if (disp->flags & FDT_REG_ADD_MEM_RSVMAP) {
+		const struct fdt_reserve_entry *p_rsvmap;
+
+		p_rsvmap = (const struct fdt_reserve_entry *)
+				((const char *)blob + off_mem_rsvmap);
+		for (i = 0; ; i++) {
+			addr = fdt64_to_cpu(p_rsvmap[i].address);
+			size = fdt64_to_cpu(p_rsvmap[i].size);
+			if (addr == 0 && size == 0)
+				break;
+
+			fprintf(f, "/memreserve/ %llx %llx;\n",
+				(unsigned long long)addr,
+				(unsigned long long)size);
+		}
+	}
+
+	depth = 0;
+	nextoffset = 0;
+	shift = 4;	/* 4 spaces per indent */
+	do {
+		const struct fdt_property *prop;
+		const char *name;
+		int show;
+		int len;
+
+		offset = nextoffset;
+
+		/*
+		 * Work out the file offset of this offset, and decide
+		 * whether it is in the region list or not
+		 */
+		file_ofs = base + offset;
+		if (reg < reg_end && file_ofs >= reg->offset + reg->size)
+			reg++;
+		in_region = reg < reg_end && file_ofs >= reg->offset &&
+				file_ofs < reg->offset + reg->size;
+		tag = fdt_next_tag(blob, offset, &nextoffset);
+
+		if (tag == FDT_END)
+			break;
+		show = in_region || disp->all;
+		if (show && disp->diff)
+			fprintf(f, "%c", in_region ? '+' : '-');
+
+		if (!show) {
+			/* Do this here to avoid 'if (show)' in every 'case' */
+			if (tag == FDT_BEGIN_NODE)
+				depth++;
+			else if (tag == FDT_END_NODE)
+				depth--;
+			continue;
+		}
+		if (tag != FDT_END) {
+			if (disp->show_addr)
+				fprintf(f, "%4x: ", file_ofs);
+			if (disp->show_offset)
+				fprintf(f, "%4x: ", file_ofs - base);
+		}
+
+		/* Green means included, red means excluded */
+		if (disp->colour)
+			print_ansi_colour(f, in_region ? COL_GREEN : COL_RED);
+
+		switch (tag) {
+		case FDT_PROP:
+			prop = fdt_get_property_by_offset(blob, offset, NULL);
+			name = fdt_string(blob, fdt32_to_cpu(prop->nameoff));
+			fprintf(f, "%*s%s", depth * shift, "", name);
+			utilfdt_print_data(prop->data,
+					   fdt32_to_cpu(prop->len));
+			fprintf(f, ";");
+			break;
+
+		case FDT_NOP:
+			fprintf(f, "%*s// [NOP]", depth * shift, "");
+			break;
+
+		case FDT_BEGIN_NODE:
+			name = fdt_get_name(blob, offset, &len);
+			fprintf(f, "%*s%s {", depth++ * shift, "",
+				*name ? name : "/");
+			break;
+
+		case FDT_END_NODE:
+			fprintf(f, "%*s};", --depth * shift, "");
+			break;
+		}
+
+		/* Reset colour back to normal before end of line */
+		if (disp->colour)
+			print_ansi_colour(f, COL_NONE);
+		fprintf(f, "\n");
+	} while (1);
+
+	/* Print a list of strings if requested */
+	if (disp->list_strings) {
+		const char *str;
+		int str_base = fdt_off_dt_strings(blob);
+
+		for (offset = 0; offset < fdt_size_dt_strings(blob);
+				offset += strlen(str) + 1) {
+			str = fdt_string(blob, offset);
+			int len = strlen(str) + 1;
+			int show;
+
+			/* Only print strings that are in the region */
+			file_ofs = str_base + offset;
+			in_region = reg < reg_end &&
+					file_ofs >= reg->offset &&
+					file_ofs + len < reg->offset +
+						reg->size;
+			show = in_region || disp->all;
+			if (show && disp->diff)
+				printf("%c", in_region ? '+' : '-');
+			if (disp->show_addr)
+				printf("%4x: ", file_ofs);
+			if (disp->show_offset)
+				printf("%4x: ", offset);
+			printf("%s\n", str);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * dump_fdt_regions() - Dump regions of an FDT as binary data
+ *
+ * This dumps an FDT as binary, but only certain regions of it. This is the
+ * final stage of the grep - we have a list of regions we want to dump,
+ * and this function dumps them.
+ *
+ * The output of this function may or may not be a valid FDT. To ensure it
+ * is, these disp->flags must be set:
+ *
+ *   FDT_REG_SUPERNODES: ensures that subnodes are preceded by their
+ *		parents. Without this option, fragments of subnode data may be
+ *		output without the supernodes above them. This is useful for
+ *		hashing but cannot produce a valid FDT.
+ *   FDT_REG_ADD_STRING_TAB: Adds a string table to the end of the FDT.
+ *		Without this none of the properties will have names
+ *   FDT_REG_ADD_MEM_RSVMAP: Adds a mem_rsvmap table - an FDT is invalid
+ *		without this.
+ *
+ * @disp:	Display structure, holding info about our options
+ * @blob:	FDT blob to display
+ * @region:	List of regions to display
+ * @count:	Number of regions
+ * @out:	Output destination
+ */
+static int dump_fdt_regions(struct display_info *disp, const void *blob,
+		struct fdt_region region[], int count, char *out)
+{
+	struct fdt_header *fdt;
+	int size, struct_start;
+	int ptr;
+	int i;
+
+	/* Set up a basic header (even if we don't actually write it) */
+	fdt = (struct fdt_header *)out;
+	memset(fdt, '\0', sizeof(*fdt));
+	fdt_set_magic(fdt, FDT_MAGIC);
+	struct_start = FDT_ALIGN(sizeof(struct fdt_header),
+					sizeof(struct fdt_reserve_entry));
+	fdt_set_off_mem_rsvmap(fdt, struct_start);
+	fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
+	fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
+
+	/*
+	 * Calculate the total size of the regions we are writing out. The
+	 * first will be the mem_rsvmap if the FDT_REG_ADD_MEM_RSVMAP flag
+	 * is set. The last will be the string table if FDT_REG_ADD_STRING_TAB
+	 * is set.
+	 */
+	for (i = size = 0; i < count; i++)
+		size += region[i].size;
+
+	/* Bring in the mem_rsvmap section from the old file if requested */
+	if (count > 0 && (disp->flags & FDT_REG_ADD_MEM_RSVMAP)) {
+		struct_start += region[0].size;
+		size -= region[0].size;
+	}
+	fdt_set_off_dt_struct(fdt, struct_start);
+
+	/* Update the header to have the correct offsets/sizes */
+	if (count >= 2 && (disp->flags & FDT_REG_ADD_STRING_TAB)) {
+		int str_size;
+
+		str_size = region[count - 1].size;
+		fdt_set_size_dt_struct(fdt, size - str_size);
+		fdt_set_off_dt_strings(fdt, struct_start + size - str_size);
+		fdt_set_size_dt_strings(fdt, str_size);
+		fdt_set_totalsize(fdt, struct_start + size);
+	}
+
+	/* Write the header if required */
+	ptr = 0;
+	if (disp->header) {
+		ptr = sizeof(*fdt);
+		while (ptr < fdt_off_mem_rsvmap(fdt))
+			out[ptr++] = '\0';
+	}
+
+	/* Output all the nodes including any mem_rsvmap/string table */
+	for (i = 0; i < count; i++) {
+		struct fdt_region *reg = &region[i];
+
+		memcpy(out + ptr, (const char *)blob + reg->offset, reg->size);
+		ptr += reg->size;
+	}
+
+	return ptr;
+}
+
+/**
+ * show_region_list() - Print out a list of regions
+ *
+ * The list includes the region offset (absolute offset from start of FDT
+ * blob in bytes) and size
+ *
+ * @reg:	List of regions to print
+ * @count:	Number of regions
+ */
+static void show_region_list(struct fdt_region *reg, int count)
+{
+	int i;
+
+	printf("Regions: %d\n", count);
+	for (i = 0; i < count; i++, reg++) {
+		printf("%d:  %-10x  %-10x\n", i, reg->offset,
+		       reg->offset + reg->size);
+	}
+}
+
+static int check_type_include(void *priv, int type, const char *data, int size)
+{
+	struct display_info *disp = priv;
+	struct value_node *val;
+	int match, none_match = FDT_IS_ANY;
+
+	/* If none of our conditions mention this type, we know nothing */
+	debug("type=%x, data=%s\n", type, data ? data : "(null)");
+	if (!((disp->types_inc | disp->types_exc) & type)) {
+		debug("   - not in any condition\n");
+		return -1;
+	}
+
+	/*
+	 * Go through the list of conditions. For inclusive conditions, we
+	 * return 1 at the first match. For exclusive conditions, we must
+	 * check that there are no matches.
+	 */
+	if (data) {
+		for (val = disp->value_head; val; val = val->next) {
+			if (!(type & val->type))
+				continue;
+			match = fdt_stringlist_contains(data, size,
+							val->string);
+			debug("      - val->type=%x, str='%s', match=%d\n",
+			      val->type, val->string, match);
+			if (match && val->include) {
+				debug("   - match inc %s\n", val->string);
+				return 1;
+			}
+			if (match)
+				none_match &= ~val->type;
+		}
+	}
+
+	/*
+	 * If this is an exclusive condition, and nothing matches, then we
+	 * should return 1.
+	 */
+	if ((type & disp->types_exc) && (none_match & type)) {
+		debug("   - match exc\n");
+		/*
+		 * Allow FDT_IS_COMPAT to make the final decision in the
+		 * case where there is no specific type
+		 */
+		if (type == FDT_IS_NODE && disp->types_exc == FDT_ANY_GLOBAL) {
+			debug("   - supressed exc node\n");
+			return -1;
+		}
+		return 1;
+	}
+
+	/*
+	 * Allow FDT_IS_COMPAT to make the final decision in the
+	 * case where there is no specific type (inclusive)
+	 */
+	if (type == FDT_IS_NODE && disp->types_inc == FDT_ANY_GLOBAL)
+		return -1;
+
+	debug("   - no match, types_inc=%x, types_exc=%x, none_match=%x\n",
+	      disp->types_inc, disp->types_exc, none_match);
+
+	return 0;
+}
+
+/**
+ * h_include() - Include handler function for fdt_find_regions()
+ *
+ * This function decides whether to include or exclude a node, property or
+ * compatible string. The function is defined by fdt_find_regions().
+ *
+ * The algorithm is documented in the code - disp->invert is 0 for normal
+ * operation, and 1 to invert the sense of all matches.
+ *
+ * See
+ */
+static int h_include(void *priv, const void *fdt, int offset, int type,
+		     const char *data, int size)
+{
+	struct display_info *disp = priv;
+	int inc, len;
+
+	inc = check_type_include(priv, type, data, size);
+	if (disp->include_root && type == FDT_IS_PROP && offset == 0 && inc)
+		return 1;
+
+	/*
+	 * If the node name does not tell us anything, check the
+	 * compatible string
+	 */
+	if (inc == -1 && type == FDT_IS_NODE) {
+		debug("   - checking compatible2\n");
+		data = fdt_getprop(fdt, offset, "compatible", &len);
+		inc = check_type_include(priv, FDT_IS_COMPAT, data, len);
+	}
+
+	/* If we still have no idea, check for properties in the node */
+	if (inc != 1 && type == FDT_IS_NODE &&
+	    (disp->types_inc & FDT_NODE_HAS_PROP)) {
+		debug("   - checking node '%s'\n",
+		      fdt_get_name(fdt, offset, NULL));
+		for (offset = fdt_first_property_offset(fdt, offset);
+		     offset > 0 && inc != 1;
+		     offset = fdt_next_property_offset(fdt, offset)) {
+			const struct fdt_property *prop;
+			const char *str;
+
+			prop = fdt_get_property_by_offset(fdt, offset, NULL);
+			if (!prop)
+				continue;
+			str = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+			inc = check_type_include(priv, FDT_NODE_HAS_PROP, str,
+						 strlen(str));
+		}
+		if (inc == -1)
+			inc = 0;
+	}
+
+	switch (inc) {
+	case 1:
+		inc = !disp->invert;
+		break;
+	case 0:
+		inc = disp->invert;
+		break;
+	}
+	debug("   - returning %d\n", inc);
+
+	return inc;
+}
+
+static int h_cmp_region(const void *v1, const void *v2)
+{
+	const struct fdt_region *region1 = v1, *region2 = v2;
+
+	return region1->offset - region2->offset;
+}
+
+static int fdtgrep_find_regions(const void *fdt,
+		int (*include_func)(void *priv, const void *fdt, int offset,
+				 int type, const char *data, int size),
+		struct display_info *disp, struct fdt_region *region,
+		int max_regions, char *path, int path_len, int flags)
+{
+	struct fdt_region_state state;
+	int count;
+	int ret;
+
+	count = 0;
+	ret = fdt_first_region(fdt, include_func, disp,
+			&region[count++], path, path_len,
+			disp->flags, &state);
+	while (ret == 0) {
+		ret = fdt_next_region(fdt, include_func, disp,
+				count < max_regions ? &region[count] : NULL,
+				path, path_len, disp->flags, &state);
+		if (!ret)
+			count++;
+	}
+	if (ret && ret != -FDT_ERR_NOTFOUND)
+		return ret;
+
+	/* Find all the aliases and add those regions back in */
+	if (disp->add_aliases && count < max_regions) {
+		int new_count;
+
+		new_count = fdt_add_alias_regions(fdt, region, count,
+						  max_regions, &state);
+		if (new_count == -FDT_ERR_NOTFOUND) {
+			/* No alias node found */
+		} else if (new_count < 0) {
+			return new_count;
+		} else if (new_count <= max_regions) {
+			/*
+			* The alias regions will now be at the end of the list.
+			* Sort the regions by offset to get things into the
+			* right order
+			*/
+			count = new_count;
+			qsort(region, count, sizeof(struct fdt_region),
+			      h_cmp_region);
+		}
+	}
+
+	return count;
+}
+
+int utilfdt_read_err_len(const char *filename, char **buffp, off_t *len)
+{
+	int fd = 0;	/* assume stdin */
+	char *buf = NULL;
+	off_t bufsize = 1024, offset = 0;
+	int ret = 0;
+
+	*buffp = NULL;
+	if (strcmp(filename, "-") != 0) {
+		fd = open(filename, O_RDONLY);
+		if (fd < 0)
+			return errno;
+	}
+
+	/* Loop until we have read everything */
+	buf = malloc(bufsize);
+	if (!buf)
+		return -ENOMEM;
+	do {
+		/* Expand the buffer to hold the next chunk */
+		if (offset == bufsize) {
+			bufsize *= 2;
+			buf = realloc(buf, bufsize);
+			if (!buf)
+				return -ENOMEM;
+		}
+
+		ret = read(fd, &buf[offset], bufsize - offset);
+		if (ret < 0) {
+			ret = errno;
+			break;
+		}
+		offset += ret;
+	} while (ret != 0);
+
+	/* Clean up, including closing stdin; return errno on error */
+	close(fd);
+	if (ret)
+		free(buf);
+	else
+		*buffp = buf;
+	*len = bufsize;
+	return ret;
+}
+
+int utilfdt_read_err(const char *filename, char **buffp)
+{
+	off_t len;
+	return utilfdt_read_err_len(filename, buffp, &len);
+}
+
+char *utilfdt_read_len(const char *filename, off_t *len)
+{
+	char *buff;
+	int ret = utilfdt_read_err_len(filename, &buff, len);
+
+	if (ret) {
+		fprintf(stderr, "Couldn't open blob from '%s': %s\n", filename,
+			strerror(ret));
+		return NULL;
+	}
+	/* Successful read */
+	return buff;
+}
+
+char *utilfdt_read(const char *filename)
+{
+	off_t len;
+	return utilfdt_read_len(filename, &len);
+}
+
+/**
+ * Run the main fdtgrep operation, given a filename and valid arguments
+ *
+ * @param disp		Display information / options
+ * @param filename	Filename of blob file
+ * @param return 0 if ok, -ve on error
+ */
+static int do_fdtgrep(struct display_info *disp, const char *filename)
+{
+	struct fdt_region *region = NULL;
+	int max_regions;
+	int count = 100;
+	char path[1024];
+	char *blob;
+	int i, ret;
+
+	blob = utilfdt_read(filename);
+	if (!blob)
+		return -1;
+	ret = fdt_check_header(blob);
+	if (ret) {
+		fprintf(stderr, "Error: %s\n", fdt_strerror(ret));
+		return ret;
+	}
+
+	/* Allow old files, but they are untested */
+	if (fdt_version(blob) < 17 && disp->value_head) {
+		fprintf(stderr,
+			"Warning: fdtgrep does not fully support version %d files\n",
+			fdt_version(blob));
+	}
+
+	/*
+	 * We do two passes, since we don't know how many regions we need.
+	 * The first pass will count the regions, but if it is too many,
+	 * we do another pass to actually record them.
+	 */
+	for (i = 0; i < 2; i++) {
+		region = malloc(count * sizeof(struct fdt_region));
+		if (!region) {
+			fprintf(stderr, "Out of memory for %d regions\n",
+				count);
+			return -1;
+		}
+		max_regions = count;
+		count = fdtgrep_find_regions(blob,
+				h_include, disp,
+				region, max_regions, path, sizeof(path),
+				disp->flags);
+		if (count < 0) {
+			report_error("fdt_find_regions", count);
+			free(region);
+			return -1;
+		}
+		if (count <= max_regions)
+			break;
+		free(region);
+		fprintf(stderr, "Internal error with fdtgrep_find_region)(\n");
+		return -1;
+	}
+
+	/* Optionally print a list of regions */
+	if (disp->region_list)
+		show_region_list(region, count);
+
+	/* Output either source .dts or binary .dtb */
+	if (disp->output == OUT_DTS) {
+		ret = display_fdt_by_regions(disp, blob, region, count);
+	} else {
+		void *fdt;
+		/* Allow reserved memory section to expand slightly */
+		int size = fdt_totalsize(blob) + 16;
+
+		fdt = malloc(size);
+		if (!fdt) {
+			fprintf(stderr, "Out_of_memory\n");
+			ret = -1;
+			goto err;
+		}
+		size = dump_fdt_regions(disp, blob, region, count, fdt);
+		if (disp->remove_strings) {
+			void *out;
+
+			out = malloc(size);
+			if (!out) {
+				fprintf(stderr, "Out_of_memory\n");
+				ret = -1;
+				goto err;
+			}
+			ret = fdt_remove_unused_strings(fdt, out);
+			if (ret < 0) {
+				fprintf(stderr,
+					"Failed to remove unused strings: err=%d\n",
+					ret);
+				goto err;
+			}
+			free(fdt);
+			fdt = out;
+			ret = fdt_pack(fdt);
+			if (ret < 0) {
+				fprintf(stderr, "Failed to pack: err=%d\n",
+					ret);
+				goto err;
+			}
+			size = fdt_totalsize(fdt);
+		}
+
+		if (size != fwrite(fdt, 1, size, disp->fout)) {
+			fprintf(stderr, "Write failure, %d bytes\n", size);
+			free(fdt);
+			ret = 1;
+			goto err;
+		}
+		free(fdt);
+	}
+err:
+	free(blob);
+	free(region);
+
+	return ret;
+}
+
+static const char usage_synopsis[] =
+	"fdtgrep - extract portions from device tree\n"
+	"\n"
+	"Usage:\n"
+	"	fdtgrep <options> <dt file>|-\n\n"
+	"Output formats are:\n"
+	"\tdts - device tree soure text\n"
+	"\tdtb - device tree blob (sets -Hmt automatically)\n"
+	"\tbin - device tree fragment (may not be a valid .dtb)";
+
+/* Helper for usage_short_opts string constant */
+#define USAGE_COMMON_SHORT_OPTS "hV"
+
+/* Helper for aligning long_opts array */
+#define a_argument required_argument
+
+/* Helper for usage_long_opts option array */
+#define USAGE_COMMON_LONG_OPTS \
+	{"help",      no_argument, NULL, 'h'}, \
+	{"version",   no_argument, NULL, 'V'}, \
+	{NULL,        no_argument, NULL, 0x0}
+
+/* Helper for usage_opts_help array */
+#define USAGE_COMMON_OPTS_HELP \
+	"Print this help and exit", \
+	"Print version and exit", \
+	NULL
+
+/* Helper for getopt case statements */
+#define case_USAGE_COMMON_FLAGS \
+	case 'h': usage(NULL); \
+	case 'V': util_version(); \
+	case '?': usage("unknown option");
+
+static const char usage_short_opts[] =
+		"haAc:b:C:defg:G:HIlLmn:N:o:O:p:P:rRsStTv"
+		USAGE_COMMON_SHORT_OPTS;
+static struct option const usage_long_opts[] = {
+	{"show-address",	no_argument, NULL, 'a'},
+	{"colour",		no_argument, NULL, 'A'},
+	{"include-node-with-prop", a_argument, NULL, 'b'},
+	{"include-compat",	a_argument, NULL, 'c'},
+	{"exclude-compat",	a_argument, NULL, 'C'},
+	{"diff",		no_argument, NULL, 'd'},
+	{"enter-node",		no_argument, NULL, 'e'},
+	{"show-offset",		no_argument, NULL, 'f'},
+	{"include-match",	a_argument, NULL, 'g'},
+	{"exclude-match",	a_argument, NULL, 'G'},
+	{"show-header",		no_argument, NULL, 'H'},
+	{"show-version",	no_argument, NULL, 'I'},
+	{"list-regions",	no_argument, NULL, 'l'},
+	{"list-strings",	no_argument, NULL, 'L'},
+	{"include-mem",		no_argument, NULL, 'm'},
+	{"include-node",	a_argument, NULL, 'n'},
+	{"exclude-node",	a_argument, NULL, 'N'},
+	{"include-prop",	a_argument, NULL, 'p'},
+	{"exclude-prop",	a_argument, NULL, 'P'},
+	{"remove-strings",	no_argument, NULL, 'r'},
+	{"include-root",	no_argument, NULL, 'R'},
+	{"show-subnodes",	no_argument, NULL, 's'},
+	{"skip-supernodes",	no_argument, NULL, 'S'},
+	{"show-stringtab",	no_argument, NULL, 't'},
+	{"show-aliases",	no_argument, NULL, 'T'},
+	{"out",			a_argument, NULL, 'o'},
+	{"out-format",		a_argument, NULL, 'O'},
+	{"invert-match",	no_argument, NULL, 'v'},
+	USAGE_COMMON_LONG_OPTS,
+};
+static const char * const usage_opts_help[] = {
+	"Display address",
+	"Show all nodes/tags, colour those that match",
+	"Include contains containing property",
+	"Compatible nodes to include in grep",
+	"Compatible nodes to exclude in grep",
+	"Diff: Mark matching nodes with +, others with -",
+	"Enter direct subnode names of matching nodes",
+	"Display offset",
+	"Node/property/compatible string to include in grep",
+	"Node/property/compatible string to exclude in grep",
+	"Output a header",
+	"Put \"/dts-v1/;\" on first line of dts output",
+	"Output a region list",
+	"List strings in string table",
+	"Include mem_rsvmap section in binary output",
+	"Node to include in grep",
+	"Node to exclude in grep",
+	"Property to include in grep",
+	"Property to exclude in grep",
+	"Remove unused strings from string table",
+	"Include root node and all properties",
+	"Show all subnodes matching nodes",
+	"Don't include supernodes of matching nodes",
+	"Include string table in binary output",
+	"Include matching aliases in output",
+	"-o <output file>",
+	"-O <output format>",
+	"Invert the sense of matching (select non-matching lines)",
+	USAGE_COMMON_OPTS_HELP
+};
+
+/**
+ * Call getopt_long() with standard options
+ *
+ * Since all util code runs getopt in the same way, provide a helper.
+ */
+#define util_getopt_long() getopt_long(argc, argv, usage_short_opts, \
+				       usage_long_opts, NULL)
+
+void util_usage(const char *errmsg, const char *synopsis,
+		const char *short_opts, struct option const long_opts[],
+		const char * const opts_help[])
+{
+	FILE *fp = errmsg ? stderr : stdout;
+	const char a_arg[] = "<arg>";
+	size_t a_arg_len = strlen(a_arg) + 1;
+	size_t i;
+	int optlen;
+
+	fprintf(fp,
+		"Usage: %s\n"
+		"\n"
+		"Options: -[%s]\n", synopsis, short_opts);
+
+	/* prescan the --long opt length to auto-align */
+	optlen = 0;
+	for (i = 0; long_opts[i].name; ++i) {
+		/* +1 is for space between --opt and help text */
+		int l = strlen(long_opts[i].name) + 1;
+		if (long_opts[i].has_arg == a_argument)
+			l += a_arg_len;
+		if (optlen < l)
+			optlen = l;
+	}
+
+	for (i = 0; long_opts[i].name; ++i) {
+		/* helps when adding new applets or options */
+		assert(opts_help[i] != NULL);
+
+		/* first output the short flag if it has one */
+		if (long_opts[i].val > '~')
+			fprintf(fp, "      ");
+		else
+			fprintf(fp, "  -%c, ", long_opts[i].val);
+
+		/* then the long flag */
+		if (long_opts[i].has_arg == no_argument) {
+			fprintf(fp, "--%-*s", optlen, long_opts[i].name);
+		} else {
+			fprintf(fp, "--%s %s%*s", long_opts[i].name, a_arg,
+				(int)(optlen - strlen(long_opts[i].name) -
+				a_arg_len), "");
+		}
+
+		/* finally the help text */
+		fprintf(fp, "%s\n", opts_help[i]);
+	}
+
+	if (errmsg) {
+		fprintf(fp, "\nError: %s\n", errmsg);
+		exit(EXIT_FAILURE);
+	} else {
+		exit(EXIT_SUCCESS);
+	}
+}
+
+/**
+ * Show usage and exit
+ *
+ * If you name all your usage variables with usage_xxx, then you can call this
+ * help macro rather than expanding all arguments yourself.
+ *
+ * @param errmsg	If non-NULL, an error message to display
+ */
+#define usage(errmsg) \
+	util_usage(errmsg, usage_synopsis, usage_short_opts, \
+		   usage_long_opts, usage_opts_help)
+
+void util_version(void)
+{
+	printf("Version: %s\n", "(U-Boot)");
+	exit(0);
+}
+
+static void scan_args(struct display_info *disp, int argc, char *argv[])
+{
+	int opt;
+
+	while ((opt = util_getopt_long()) != EOF) {
+		int type = 0;
+		int inc = 1;
+
+		switch (opt) {
+		case_USAGE_COMMON_FLAGS
+		case 'a':
+			disp->show_addr = 1;
+			break;
+		case 'A':
+			disp->all = 1;
+			break;
+		case 'b':
+			type = FDT_NODE_HAS_PROP;
+			break;
+		case 'C':
+			inc = 0;
+			/* no break */
+		case 'c':
+			type = FDT_IS_COMPAT;
+			break;
+		case 'd':
+			disp->diff = 1;
+			break;
+		case 'e':
+			disp->flags |= FDT_REG_DIRECT_SUBNODES;
+			break;
+		case 'f':
+			disp->show_offset = 1;
+			break;
+		case 'G':
+			inc = 0;
+			/* no break */
+		case 'g':
+			type = FDT_ANY_GLOBAL;
+			break;
+		case 'H':
+			disp->header = 1;
+			break;
+		case 'l':
+			disp->region_list = 1;
+			break;
+		case 'L':
+			disp->list_strings = 1;
+			break;
+		case 'm':
+			disp->flags |= FDT_REG_ADD_MEM_RSVMAP;
+			break;
+		case 'N':
+			inc = 0;
+			/* no break */
+		case 'n':
+			type = FDT_IS_NODE;
+			break;
+		case 'o':
+			disp->output_fname = optarg;
+			break;
+		case 'O':
+			if (!strcmp(optarg, "dtb"))
+				disp->output = OUT_DTB;
+			else if (!strcmp(optarg, "dts"))
+				disp->output = OUT_DTS;
+			else if (!strcmp(optarg, "bin"))
+				disp->output = OUT_BIN;
+			else
+				usage("Unknown output format");
+			break;
+		case 'P':
+			inc = 0;
+			/* no break */
+		case 'p':
+			type = FDT_IS_PROP;
+			break;
+		case 'r':
+			disp->remove_strings = 1;
+			break;
+		case 'R':
+			disp->include_root = 1;
+			break;
+		case 's':
+			disp->flags |= FDT_REG_ALL_SUBNODES;
+			break;
+		case 'S':
+			disp->flags &= ~FDT_REG_SUPERNODES;
+			break;
+		case 't':
+			disp->flags |= FDT_REG_ADD_STRING_TAB;
+			break;
+		case 'T':
+			disp->add_aliases = 1;
+			break;
+		case 'v':
+			disp->invert = 1;
+			break;
+		case 'I':
+			disp->show_dts_version = 1;
+			break;
+		}
+
+		if (type && value_add(disp, &disp->value_head, type, inc,
+				      optarg))
+			usage("Cannot add value");
+	}
+
+	if (disp->invert && disp->types_exc)
+		usage("-v has no meaning when used with 'exclude' conditions");
+}
+
+int main(int argc, char *argv[])
+{
+	char *filename = NULL;
+	struct display_info disp;
+	int ret;
+
+	/* set defaults */
+	memset(&disp, '\0', sizeof(disp));
+	disp.flags = FDT_REG_SUPERNODES;	/* Default flags */
+
+	scan_args(&disp, argc, argv);
+
+	/* Show matched lines in colour if we can */
+	disp.colour = disp.all && isatty(0);
+
+	/* Any additional arguments can match anything, just like -g */
+	while (optind < argc - 1) {
+		if (value_add(&disp, &disp.value_head, FDT_IS_ANY, 1,
+			      argv[optind++]))
+			usage("Cannot add value");
+	}
+
+	if (optind < argc)
+		filename = argv[optind++];
+	if (!filename)
+		usage("Missing filename");
+
+	/* If a valid .dtb is required, set flags to ensure we get one */
+	if (disp.output == OUT_DTB) {
+		disp.header = 1;
+		disp.flags |= FDT_REG_ADD_MEM_RSVMAP | FDT_REG_ADD_STRING_TAB;
+	}
+
+	if (disp.output_fname) {
+		disp.fout = fopen(disp.output_fname, "w");
+		if (!disp.fout)
+			usage("Cannot open output file");
+	} else {
+		disp.fout = stdout;
+	}
+
+	/* Run the grep and output the results */
+	ret = do_fdtgrep(&disp, filename);
+	if (disp.output_fname)
+		fclose(disp.fout);
+	if (ret)
+		return 1;
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/fdtgrep.o b/tools/u-boot-tools/fdtgrep.o
new file mode 100644
index 0000000000000000000000000000000000000000..0a68319e2faf6bcdc1ce989a5b29c037ac3de9e7
Binary files /dev/null and b/tools/u-boot-tools/fdtgrep.o differ
diff --git a/tools/u-boot-tools/file2include.c b/tools/u-boot-tools/file2include.c
new file mode 100644
index 0000000000000000000000000000000000000000..775440cba9f1072e558494968b14a05eacd9e184
--- /dev/null
+++ b/tools/u-boot-tools/file2include.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Convert a file image to a C define
+ *
+ * Copyright (c) 2017 Heinrich Schuchardt <xypron.glpk@gmx.de>
+ *
+ * For testing EFI disk management we need an in memory image of
+ * a disk.
+ *
+ * The tool file2include converts a file to a C include. The file
+ * is separated into strings of 8 bytes. Only the non-zero strings
+ * are written to the include. The output format has been designed
+ * to maintain readability.
+ *
+ * As the disk image needed for testing contains mostly zeroes a high
+ * compression ratio can be attained.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+/* Size of the blocks written to the compressed file */
+#define BLOCK_SIZE 8
+
+int main(int argc, char *argv[])
+{
+	FILE *file;
+	int ret;
+	unsigned char *buf;
+	size_t count, i, j;
+
+	/* Provide usage help */
+	if (argc != 2) {
+		printf("Usage:\n%s FILENAME\n", argv[0]);
+		return EXIT_FAILURE;
+	}
+	/* Open file */
+	file = fopen(argv[1], "r");
+	if (!file) {
+		perror("fopen");
+		return EXIT_FAILURE;
+	}
+	/* Get file length */
+	ret = fseek(file, 0, SEEK_END);
+	if (ret < 0) {
+		perror("fseek");
+		return EXIT_FAILURE;
+	}
+	count = ftell(file);
+	if (!count) {
+		fprintf(stderr, "File %s has length 0\n", argv[1]);
+		return EXIT_FAILURE;
+	}
+	rewind(file);
+	/* Read file */
+	buf = malloc(count);
+	if (!buf) {
+		perror("calloc");
+		return EXIT_FAILURE;
+	}
+	count = fread(buf, 1, count, file);
+
+	/* Generate output */
+	printf("/* SPDX-License-Identifier: GPL-2.0+ */\n");
+	printf("/*\n");
+	printf(" *  Non-zero %u byte strings of a disk image\n", BLOCK_SIZE);
+	printf(" *\n");
+	printf(" *  Generated with tools/file2include\n");
+	printf(" */\n\n");
+	printf("#define EFI_ST_DISK_IMG { 0x%08zx, { \\\n", count);
+
+	for (i = 0; i < count; i += BLOCK_SIZE) {
+		int c = 0;
+
+		for (j = i; j < i + BLOCK_SIZE && j < count; ++j) {
+			if (buf[j])
+				c = 1;
+		}
+		if (!c)
+			continue;
+		printf("\t{0x%08zx, \"", i);
+		for (j = i; j < i + BLOCK_SIZE && j < count; ++j)
+			printf("\\x%02x", buf[j]);
+		printf("\"}, /* ");
+		for (j = i; j < i + BLOCK_SIZE && j < count; ++j) {
+			if (buf[j] != '*' && buf[j] >= 0x20 && buf[j] <= 0x7e)
+				printf("%c", buf[j]);
+			else
+				printf(".");
+		}
+		printf(" */ \\\n");
+	}
+	printf("\t{0, NULL} } }\n");
+
+	/* Release resources */
+	free(buf);
+	ret = fclose(file);
+	if (ret) {
+		perror("fclose");
+		return EXIT_FAILURE;
+	}
+	return EXIT_SUCCESS;
+}
diff --git a/tools/u-boot-tools/fit_check_sign.c b/tools/u-boot-tools/fit_check_sign.c
new file mode 100644
index 0000000000000000000000000000000000000000..62adc751cbce4a8ab236ee2abd627e91c4912a44
--- /dev/null
+++ b/tools/u-boot-tools/fit_check_sign.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * DENX Software Engineering
+ * Heiko Schocher <hs@denx.de>
+ *
+ * Based on:
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2004
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ *
+ * Updated-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *		FIT image specific code abstracted from mkimage.c
+ *		some functions added to address abstraction
+ *
+ * All rights reserved.
+ */
+
+#include "mkimage.h"
+#include "fit_common.h"
+#include <image.h>
+#include <u-boot/crc.h>
+
+void usage(char *cmdname)
+{
+	fprintf(stderr, "Usage: %s -f fit file -k key file\n"
+			 "          -f ==> set fit file which should be checked'\n"
+			 "          -k ==> set key file which contains the key'\n",
+		cmdname);
+	exit(EXIT_FAILURE);
+}
+
+int main(int argc, char **argv)
+{
+	int ffd = -1;
+	int kfd = -1;
+	struct stat fsbuf;
+	struct stat ksbuf;
+	void *fit_blob;
+	char *fdtfile = NULL;
+	char *keyfile = NULL;
+	char cmdname[256];
+	int ret;
+	void *key_blob;
+	int c;
+
+	strncpy(cmdname, *argv, sizeof(cmdname) - 1);
+	cmdname[sizeof(cmdname) - 1] = '\0';
+	while ((c = getopt(argc, argv, "f:k:")) != -1)
+		switch (c) {
+		case 'f':
+			fdtfile = optarg;
+			break;
+		case 'k':
+			keyfile = optarg;
+			break;
+		default:
+			usage(cmdname);
+			break;
+	}
+
+	if (!fdtfile) {
+		fprintf(stderr, "%s: Missing fdt file\n", *argv);
+		usage(*argv);
+	}
+	if (!keyfile) {
+		fprintf(stderr, "%s: Missing key file\n", *argv);
+		usage(*argv);
+	}
+
+	ffd = mmap_fdt(cmdname, fdtfile, 0, &fit_blob, &fsbuf, false);
+	if (ffd < 0)
+		return EXIT_FAILURE;
+	kfd = mmap_fdt(cmdname, keyfile, 0, &key_blob, &ksbuf, false);
+	if (kfd < 0)
+		return EXIT_FAILURE;
+
+	image_set_host_blob(key_blob);
+	ret = fit_check_sign(fit_blob, key_blob);
+	if (!ret) {
+		ret = EXIT_SUCCESS;
+		fprintf(stderr, "Signature check OK\n");
+	} else {
+		ret = EXIT_FAILURE;
+		fprintf(stderr, "Signature check Bad (error %d)\n", ret);
+	}
+
+	(void) munmap((void *)fit_blob, fsbuf.st_size);
+	(void) munmap((void *)key_blob, ksbuf.st_size);
+
+	close(ffd);
+	close(kfd);
+	exit(ret);
+}
diff --git a/tools/u-boot-tools/fit_common.c b/tools/u-boot-tools/fit_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..d96085eaad3278fc443ffbac7b2bda3bc98c6038
--- /dev/null
+++ b/tools/u-boot-tools/fit_common.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * DENX Software Engineering
+ * Heiko Schocher <hs@denx.de>
+ *
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2004
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ *
+ * Updated-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *		FIT image specific code abstracted from mkimage.c
+ *		some functions added to address abstraction
+ *
+ * All rights reserved.
+ */
+
+#include "imagetool.h"
+#include "mkimage.h"
+#include "fit_common.h"
+#include <image.h>
+#include <u-boot/crc.h>
+
+int fit_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	return fdt_check_header(ptr);
+}
+
+int fit_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_FLATDT)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+int mmap_fdt(const char *cmdname, const char *fname, size_t size_inc,
+	     void **blobp, struct stat *sbuf, bool delete_on_error)
+{
+	void *ptr;
+	int fd;
+
+	/* Load FIT blob into memory (we need to write hashes/signatures) */
+	fd = open(fname, O_RDWR | O_BINARY);
+
+	if (fd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			cmdname, fname, strerror(errno));
+		goto err;
+	}
+
+	if (fstat(fd, sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat %s: %s\n",
+			cmdname, fname, strerror(errno));
+		goto err;
+	}
+
+	if (size_inc) {
+		sbuf->st_size += size_inc;
+		if (ftruncate(fd, sbuf->st_size)) {
+			fprintf(stderr, "%s: Can't expand %s: %s\n",
+				cmdname, fname, strerror(errno));
+		goto err;
+		}
+	}
+
+	errno = 0;
+	ptr = mmap(0, sbuf->st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+	if ((ptr == MAP_FAILED) || (errno != 0)) {
+		fprintf(stderr, "%s: Can't read %s: %s\n",
+			cmdname, fname, strerror(errno));
+		goto err;
+	}
+
+	/* check if ptr has a valid blob */
+	if (fdt_check_header(ptr)) {
+		fprintf(stderr, "%s: Invalid FIT blob\n", cmdname);
+		goto err;
+	}
+
+	/* expand if needed */
+	if (size_inc) {
+		int ret;
+
+		ret = fdt_open_into(ptr, ptr, sbuf->st_size);
+		if (ret) {
+			fprintf(stderr, "%s: Cannot expand FDT: %s\n",
+				cmdname, fdt_strerror(ret));
+			goto err;
+		}
+	}
+
+	*blobp = ptr;
+	return fd;
+
+err:
+	if (fd >= 0)
+		close(fd);
+	if (delete_on_error)
+		unlink(fname);
+
+	return -1;
+}
diff --git a/tools/u-boot-tools/fit_common.h b/tools/u-boot-tools/fit_common.h
new file mode 100644
index 0000000000000000000000000000000000000000..71e792e3c489d9e48d8eaabb943f58c0aa08ef13
--- /dev/null
+++ b/tools/u-boot-tools/fit_common.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2014
+ */
+
+#ifndef _FIT_COMMON_H_
+#define _FIT_COMMON_H_
+
+#include "imagetool.h"
+#include "mkimage.h"
+#include <image.h>
+
+int fit_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params);
+
+int fit_check_image_types(uint8_t type);
+
+/**
+ * Map an FDT into memory, optionally increasing its size
+ *
+ * @cmdname:	Tool name (for displaying with error messages)
+ * @fname:	Filename containing FDT
+ * @size_inc:	Amount to increase size by (0 = leave it alone)
+ * @blobp:	Returns pointer to FDT blob
+ * @sbuf:	File status information is stored here
+ * @delete_on_error:	true to delete the file if we get an error
+ * @return 0 if OK, -1 on error.
+ */
+int mmap_fdt(const char *cmdname, const char *fname, size_t size_inc,
+	     void **blobp, struct stat *sbuf, bool delete_on_error);
+
+#endif /* _FIT_COMMON_H_ */
diff --git a/tools/u-boot-tools/fit_common.o b/tools/u-boot-tools/fit_common.o
new file mode 100644
index 0000000000000000000000000000000000000000..2535d80eed761949039a29489df82e5178639bb2
Binary files /dev/null and b/tools/u-boot-tools/fit_common.o differ
diff --git a/tools/u-boot-tools/fit_image.c b/tools/u-boot-tools/fit_image.c
new file mode 100644
index 0000000000000000000000000000000000000000..4b626354fb3f1e235af40a3c1110de5830236ae8
--- /dev/null
+++ b/tools/u-boot-tools/fit_image.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2004
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ *
+ * Updated-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *		FIT image specific code abstracted from mkimage.c
+ *		some functions added to address abstraction
+ *
+ * All rights reserved.
+ */
+
+#include "imagetool.h"
+#include "fit_common.h"
+#include "mkimage.h"
+#include <image.h>
+#include <stdarg.h>
+#include <version.h>
+#include <u-boot/crc.h>
+
+static image_header_t header;
+
+static int fit_add_file_data(struct image_tool_params *params, size_t size_inc,
+			     const char *tmpfile)
+{
+	int tfd, destfd = 0;
+	void *dest_blob = NULL;
+	off_t destfd_size = 0;
+	struct stat sbuf;
+	void *ptr;
+	int ret = 0;
+
+	tfd = mmap_fdt(params->cmdname, tmpfile, size_inc, &ptr, &sbuf, true);
+	if (tfd < 0)
+		return -EIO;
+
+	if (params->keydest) {
+		struct stat dest_sbuf;
+
+		destfd = mmap_fdt(params->cmdname, params->keydest, size_inc,
+				  &dest_blob, &dest_sbuf, false);
+		if (destfd < 0) {
+			ret = -EIO;
+			goto err_keydest;
+		}
+		destfd_size = dest_sbuf.st_size;
+	}
+
+	/* for first image creation, add a timestamp at offset 0 i.e., root  */
+	if (params->datafile) {
+		time_t time = imagetool_get_source_date(params->cmdname,
+							sbuf.st_mtime);
+		ret = fit_set_timestamp(ptr, 0, time);
+	}
+
+	if (!ret) {
+		ret = fit_add_verification_data(params->keydir, dest_blob, ptr,
+						params->comment,
+						params->require_keys,
+						params->engine_id,
+						params->cmdname);
+	}
+
+	if (dest_blob) {
+		munmap(dest_blob, destfd_size);
+		close(destfd);
+	}
+
+err_keydest:
+	munmap(ptr, sbuf.st_size);
+	close(tfd);
+
+	return ret;
+}
+
+/**
+ * fit_calc_size() - Calculate the approximate size of the FIT we will generate
+ */
+static int fit_calc_size(struct image_tool_params *params)
+{
+	struct content_info *cont;
+	int size, total_size;
+
+	size = imagetool_get_filesize(params, params->datafile);
+	if (size < 0)
+		return -1;
+	total_size = size;
+
+	if (params->fit_ramdisk) {
+		size = imagetool_get_filesize(params, params->fit_ramdisk);
+		if (size < 0)
+			return -1;
+		total_size += size;
+	}
+
+	for (cont = params->content_head; cont; cont = cont->next) {
+		size = imagetool_get_filesize(params, cont->fname);
+		if (size < 0)
+			return -1;
+
+		/* Add space for properties */
+		total_size += size + 300;
+	}
+
+	/* Add plenty of space for headers, properties, nodes, etc. */
+	total_size += 4096;
+
+	return total_size;
+}
+
+static int fdt_property_file(struct image_tool_params *params,
+			     void *fdt, const char *name, const char *fname)
+{
+	struct stat sbuf;
+	void *ptr;
+	int ret;
+	int fd;
+
+	fd = open(fname, O_RDWR | O_BINARY);
+	if (fd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		return -1;
+	}
+
+	if (fstat(fd, &sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		goto err;
+	}
+
+	ret = fdt_property_placeholder(fdt, "data", sbuf.st_size, &ptr);
+	if (ret)
+		goto err;
+	ret = read(fd, ptr, sbuf.st_size);
+	if (ret != sbuf.st_size) {
+		fprintf(stderr, "%s: Can't read %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		goto err;
+	}
+	close(fd);
+
+	return 0;
+err:
+	close(fd);
+	return -1;
+}
+
+static int fdt_property_strf(void *fdt, const char *name, const char *fmt, ...)
+{
+	char str[100];
+	va_list ptr;
+
+	va_start(ptr, fmt);
+	vsnprintf(str, sizeof(str), fmt, ptr);
+	va_end(ptr);
+	return fdt_property_string(fdt, name, str);
+}
+
+static void get_basename(char *str, int size, const char *fname)
+{
+	const char *p, *start, *end;
+	int len;
+
+	/*
+	 * Use the base name as the 'name' field. So for example:
+	 *
+	 * "arch/arm/dts/sun7i-a20-bananapro.dtb"
+	 * becomes "sun7i-a20-bananapro"
+	 */
+	p = strrchr(fname, '/');
+	start = p ? p + 1 : fname;
+	p = strrchr(fname, '.');
+	end = p ? p : fname + strlen(fname);
+	len = end - start;
+	if (len >= size)
+		len = size - 1;
+	memcpy(str, start, len);
+	str[len] = '\0';
+}
+
+/**
+ * fit_write_images() - Write out a list of images to the FIT
+ *
+ * We always include the main image (params->datafile). If there are device
+ * tree files, we include an fdt- node for each of those too.
+ */
+static int fit_write_images(struct image_tool_params *params, char *fdt)
+{
+	struct content_info *cont;
+	const char *typename;
+	char str[100];
+	int upto;
+	int ret;
+
+	fdt_begin_node(fdt, "images");
+
+	/* First the main image */
+	typename = genimg_get_type_short_name(params->fit_image_type);
+	snprintf(str, sizeof(str), "%s-1", typename);
+	fdt_begin_node(fdt, str);
+	fdt_property_string(fdt, FIT_DESC_PROP, params->imagename);
+	fdt_property_string(fdt, FIT_TYPE_PROP, typename);
+	fdt_property_string(fdt, FIT_ARCH_PROP,
+			    genimg_get_arch_short_name(params->arch));
+	fdt_property_string(fdt, FIT_OS_PROP,
+			    genimg_get_os_short_name(params->os));
+	fdt_property_string(fdt, FIT_COMP_PROP,
+			    genimg_get_comp_short_name(params->comp));
+	fdt_property_u32(fdt, FIT_LOAD_PROP, params->addr);
+	fdt_property_u32(fdt, FIT_ENTRY_PROP, params->ep);
+
+	/*
+	 * Put data last since it is large. SPL may only load the first part
+	 * of the DT, so this way it can access all the above fields.
+	 */
+	ret = fdt_property_file(params, fdt, FIT_DATA_PROP, params->datafile);
+	if (ret)
+		return ret;
+	fdt_end_node(fdt);
+
+	/* Now the device tree files if available */
+	upto = 0;
+	for (cont = params->content_head; cont; cont = cont->next) {
+		if (cont->type != IH_TYPE_FLATDT)
+			continue;
+		snprintf(str, sizeof(str), "%s-%d", FIT_FDT_PROP, ++upto);
+		fdt_begin_node(fdt, str);
+
+		get_basename(str, sizeof(str), cont->fname);
+		fdt_property_string(fdt, FIT_DESC_PROP, str);
+		ret = fdt_property_file(params, fdt, FIT_DATA_PROP,
+					cont->fname);
+		if (ret)
+			return ret;
+		fdt_property_string(fdt, FIT_TYPE_PROP, typename);
+		fdt_property_string(fdt, FIT_ARCH_PROP,
+				    genimg_get_arch_short_name(params->arch));
+		fdt_property_string(fdt, FIT_COMP_PROP,
+				    genimg_get_comp_short_name(IH_COMP_NONE));
+		fdt_end_node(fdt);
+	}
+
+	/* And a ramdisk file if available */
+	if (params->fit_ramdisk) {
+		fdt_begin_node(fdt, FIT_RAMDISK_PROP "-1");
+
+		fdt_property_string(fdt, FIT_TYPE_PROP, FIT_RAMDISK_PROP);
+		fdt_property_string(fdt, FIT_OS_PROP,
+				    genimg_get_os_short_name(params->os));
+
+		ret = fdt_property_file(params, fdt, FIT_DATA_PROP,
+					params->fit_ramdisk);
+		if (ret)
+			return ret;
+
+		fdt_end_node(fdt);
+	}
+
+	fdt_end_node(fdt);
+
+	return 0;
+}
+
+/**
+ * fit_write_configs() - Write out a list of configurations to the FIT
+ *
+ * If there are device tree files, we include a configuration for each, which
+ * selects the main image (params->datafile) and its corresponding device
+ * tree file.
+ *
+ * Otherwise we just create a configuration with the main image in it.
+ */
+static void fit_write_configs(struct image_tool_params *params, char *fdt)
+{
+	struct content_info *cont;
+	const char *typename;
+	char str[100];
+	int upto;
+
+	fdt_begin_node(fdt, "configurations");
+	fdt_property_string(fdt, FIT_DEFAULT_PROP, "conf-1");
+
+	upto = 0;
+	for (cont = params->content_head; cont; cont = cont->next) {
+		if (cont->type != IH_TYPE_FLATDT)
+			continue;
+		typename = genimg_get_type_short_name(cont->type);
+		snprintf(str, sizeof(str), "conf-%d", ++upto);
+		fdt_begin_node(fdt, str);
+
+		get_basename(str, sizeof(str), cont->fname);
+		fdt_property_string(fdt, FIT_DESC_PROP, str);
+
+		typename = genimg_get_type_short_name(params->fit_image_type);
+		snprintf(str, sizeof(str), "%s-1", typename);
+		fdt_property_string(fdt, typename, str);
+
+		if (params->fit_ramdisk)
+			fdt_property_string(fdt, FIT_RAMDISK_PROP,
+					    FIT_RAMDISK_PROP "-1");
+
+		snprintf(str, sizeof(str), FIT_FDT_PROP "-%d", upto);
+		fdt_property_string(fdt, FIT_FDT_PROP, str);
+		fdt_end_node(fdt);
+	}
+
+	if (!upto) {
+		fdt_begin_node(fdt, "conf-1");
+		typename = genimg_get_type_short_name(params->fit_image_type);
+		snprintf(str, sizeof(str), "%s-1", typename);
+		fdt_property_string(fdt, typename, str);
+
+		if (params->fit_ramdisk)
+			fdt_property_string(fdt, FIT_RAMDISK_PROP,
+					    FIT_RAMDISK_PROP "-1");
+
+		fdt_end_node(fdt);
+	}
+
+	fdt_end_node(fdt);
+}
+
+static int fit_build_fdt(struct image_tool_params *params, char *fdt, int size)
+{
+	int ret;
+
+	ret = fdt_create(fdt, size);
+	if (ret)
+		return ret;
+	fdt_finish_reservemap(fdt);
+	fdt_begin_node(fdt, "");
+	fdt_property_strf(fdt, FIT_DESC_PROP,
+			  "%s image with one or more FDT blobs",
+			  genimg_get_type_name(params->fit_image_type));
+	fdt_property_strf(fdt, "creator", "U-Boot mkimage %s", PLAIN_VERSION);
+	fdt_property_u32(fdt, "#address-cells", 1);
+	ret = fit_write_images(params, fdt);
+	if (ret)
+		return ret;
+	fit_write_configs(params, fdt);
+	fdt_end_node(fdt);
+	ret = fdt_finish(fdt);
+	if (ret)
+		return ret;
+
+	return fdt_totalsize(fdt);
+}
+
+static int fit_build(struct image_tool_params *params, const char *fname)
+{
+	char *buf;
+	int size;
+	int ret;
+	int fd;
+
+	size = fit_calc_size(params);
+	if (size < 0)
+		return -1;
+	buf = malloc(size);
+	if (!buf) {
+		fprintf(stderr, "%s: Out of memory (%d bytes)\n",
+			params->cmdname, size);
+		return -1;
+	}
+	ret = fit_build_fdt(params, buf, size);
+	if (ret < 0) {
+		fprintf(stderr, "%s: Failed to build FIT image\n",
+			params->cmdname);
+		goto err_buf;
+	}
+	size = ret;
+	fd = open(fname, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0666);
+	if (fd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		goto err_buf;
+	}
+	ret = write(fd, buf, size);
+	if (ret != size) {
+		fprintf(stderr, "%s: Can't write %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		goto err;
+	}
+	close(fd);
+	free(buf);
+
+	return 0;
+err:
+	close(fd);
+err_buf:
+	free(buf);
+	return -1;
+}
+
+/**
+ * fit_extract_data() - Move all data outside the FIT
+ *
+ * This takes a normal FIT file and removes all the 'data' properties from it.
+ * The data is placed in an area after the FIT so that it can be accessed
+ * using an offset into that area. The 'data' properties turn into
+ * 'data-offset' properties.
+ *
+ * This function cannot cope with FITs with 'data-offset' properties. All
+ * data must be in 'data' properties on entry.
+ */
+static int fit_extract_data(struct image_tool_params *params, const char *fname)
+{
+	void *buf;
+	int buf_ptr;
+	int fit_size, new_size;
+	int fd;
+	struct stat sbuf;
+	void *fdt;
+	int ret;
+	int images;
+	int node;
+
+	fd = mmap_fdt(params->cmdname, fname, 0, &fdt, &sbuf, false);
+	if (fd < 0)
+		return -EIO;
+	fit_size = fdt_totalsize(fdt);
+
+	/* Allocate space to hold the image data we will extract */
+	buf = malloc(fit_size);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_munmap;
+	}
+	buf_ptr = 0;
+
+	images = fdt_path_offset(fdt, FIT_IMAGES_PATH);
+	if (images < 0) {
+		debug("%s: Cannot find /images node: %d\n", __func__, images);
+		ret = -EINVAL;
+		goto err_munmap;
+	}
+
+	for (node = fdt_first_subnode(fdt, images);
+	     node >= 0;
+	     node = fdt_next_subnode(fdt, node)) {
+		const char *data;
+		int len;
+
+		data = fdt_getprop(fdt, node, FIT_DATA_PROP, &len);
+		if (!data)
+			continue;
+		memcpy(buf + buf_ptr, data, len);
+		debug("Extracting data size %x\n", len);
+
+		ret = fdt_delprop(fdt, node, FIT_DATA_PROP);
+		if (ret) {
+			ret = -EPERM;
+			goto err_munmap;
+		}
+		if (params->external_offset > 0) {
+			/* An external offset positions the data absolutely. */
+			fdt_setprop_u32(fdt, node, FIT_DATA_POSITION_PROP,
+					params->external_offset + buf_ptr);
+		} else {
+			fdt_setprop_u32(fdt, node, FIT_DATA_OFFSET_PROP,
+					buf_ptr);
+		}
+		fdt_setprop_u32(fdt, node, FIT_DATA_SIZE_PROP, len);
+
+		buf_ptr += (len + 3) & ~3;
+	}
+
+	/* Pack the FDT and place the data after it */
+	fdt_pack(fdt);
+
+	debug("Size reduced from %x to %x\n", fit_size, fdt_totalsize(fdt));
+	debug("External data size %x\n", buf_ptr);
+	new_size = fdt_totalsize(fdt);
+	new_size = (new_size + 3) & ~3;
+	munmap(fdt, sbuf.st_size);
+
+	if (ftruncate(fd, new_size)) {
+		debug("%s: Failed to truncate file: %s\n", __func__,
+		      strerror(errno));
+		ret = -EIO;
+		goto err;
+	}
+
+	/* Check if an offset for the external data was set. */
+	if (params->external_offset > 0) {
+		if (params->external_offset < new_size) {
+			debug("External offset %x overlaps FIT length %x",
+			      params->external_offset, new_size);
+			ret = -EINVAL;
+			goto err;
+		}
+		new_size = params->external_offset;
+	}
+	if (lseek(fd, new_size, SEEK_SET) < 0) {
+		debug("%s: Failed to seek to end of file: %s\n", __func__,
+		      strerror(errno));
+		ret = -EIO;
+		goto err;
+	}
+	if (write(fd, buf, buf_ptr) != buf_ptr) {
+		debug("%s: Failed to write external data to file %s\n",
+		      __func__, strerror(errno));
+		ret = -EIO;
+		goto err;
+	}
+	free(buf);
+	close(fd);
+	return 0;
+
+err_munmap:
+	munmap(fdt, sbuf.st_size);
+err:
+	if (buf)
+		free(buf);
+	close(fd);
+	return ret;
+}
+
+static int fit_import_data(struct image_tool_params *params, const char *fname)
+{
+	void *fdt, *old_fdt;
+	int fit_size, new_size, size, data_base;
+	int fd;
+	struct stat sbuf;
+	int ret;
+	int images;
+	int node;
+
+	fd = mmap_fdt(params->cmdname, fname, 0, &old_fdt, &sbuf, false);
+	if (fd < 0)
+		return -EIO;
+	fit_size = fdt_totalsize(old_fdt);
+	data_base = (fit_size + 3) & ~3;
+
+	/* Allocate space to hold the new FIT */
+	size = sbuf.st_size + 16384;
+	fdt = malloc(size);
+	if (!fdt) {
+		fprintf(stderr, "%s: Failed to allocate memory (%d bytes)\n",
+			__func__, size);
+		ret = -ENOMEM;
+		goto err_has_fd;
+	}
+	ret = fdt_open_into(old_fdt, fdt, size);
+	if (ret) {
+		debug("%s: Failed to expand FIT: %s\n", __func__,
+		      fdt_strerror(errno));
+		ret = -EINVAL;
+		goto err_has_fd;
+	}
+
+	images = fdt_path_offset(fdt, FIT_IMAGES_PATH);
+	if (images < 0) {
+		debug("%s: Cannot find /images node: %d\n", __func__, images);
+		ret = -EINVAL;
+		goto err_has_fd;
+	}
+
+	for (node = fdt_first_subnode(fdt, images);
+	     node >= 0;
+	     node = fdt_next_subnode(fdt, node)) {
+		int buf_ptr;
+		int len;
+
+		buf_ptr = fdtdec_get_int(fdt, node, "data-offset", -1);
+		len = fdtdec_get_int(fdt, node, "data-size", -1);
+		if (buf_ptr == -1 || len == -1)
+			continue;
+		debug("Importing data size %x\n", len);
+
+		ret = fdt_setprop(fdt, node, "data", fdt + data_base + buf_ptr,
+				  len);
+		if (ret) {
+			debug("%s: Failed to write property: %s\n", __func__,
+			      fdt_strerror(ret));
+			ret = -EINVAL;
+			goto err_has_fd;
+		}
+	}
+
+	/* Close the old fd so we can re-use it. */
+	close(fd);
+
+	/* Pack the FDT and place the data after it */
+	fdt_pack(fdt);
+
+	new_size = fdt_totalsize(fdt);
+	debug("Size expanded from %x to %x\n", fit_size, new_size);
+
+	fd = open(fname, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0666);
+	if (fd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		ret = -EIO;
+		goto err_no_fd;
+	}
+	if (write(fd, fdt, new_size) != new_size) {
+		debug("%s: Failed to write external data to file %s\n",
+		      __func__, strerror(errno));
+		ret = -EIO;
+		goto err_has_fd;
+	}
+
+	ret = 0;
+
+err_has_fd:
+	close(fd);
+err_no_fd:
+	munmap(old_fdt, sbuf.st_size);
+	free(fdt);
+	return ret;
+}
+
+/**
+ * fit_handle_file - main FIT file processing function
+ *
+ * fit_handle_file() runs dtc to convert .its to .itb, includes
+ * binary data, updates timestamp property and calculates hashes.
+ *
+ * datafile  - .its file
+ * imagefile - .itb file
+ *
+ * returns:
+ *     only on success, otherwise calls exit (EXIT_FAILURE);
+ */
+static int fit_handle_file(struct image_tool_params *params)
+{
+	char tmpfile[MKIMAGE_MAX_TMPFILE_LEN];
+	char cmd[MKIMAGE_MAX_DTC_CMDLINE_LEN];
+	size_t size_inc;
+	int ret;
+
+	/* Flattened Image Tree (FIT) format  handling */
+	debug ("FIT format handling\n");
+
+	/* call dtc to include binary properties into the tmp file */
+	if (strlen (params->imagefile) +
+		strlen (MKIMAGE_TMPFILE_SUFFIX) + 1 > sizeof (tmpfile)) {
+		fprintf (stderr, "%s: Image file name (%s) too long, "
+				"can't create tmpfile",
+				params->imagefile, params->cmdname);
+		return (EXIT_FAILURE);
+	}
+	sprintf (tmpfile, "%s%s", params->imagefile, MKIMAGE_TMPFILE_SUFFIX);
+
+	/* We either compile the source file, or use the existing FIT image */
+	if (params->auto_its) {
+		if (fit_build(params, tmpfile)) {
+			fprintf(stderr, "%s: failed to build FIT\n",
+				params->cmdname);
+			return EXIT_FAILURE;
+		}
+		*cmd = '\0';
+	} else if (params->datafile) {
+		/* dtc -I dts -O dtb -p 500 -o tmpfile datafile */
+		snprintf(cmd, sizeof(cmd), "%s %s -o \"%s\" \"%s\"",
+			 MKIMAGE_DTC, params->dtc, tmpfile, params->datafile);
+		debug("Trying to execute \"%s\"\n", cmd);
+	} else {
+		snprintf(cmd, sizeof(cmd), "cp \"%s\" \"%s\"",
+			 params->imagefile, tmpfile);
+	}
+	if (*cmd && system(cmd) == -1) {
+		fprintf (stderr, "%s: system(%s) failed: %s\n",
+				params->cmdname, cmd, strerror(errno));
+		goto err_system;
+	}
+
+	/* Move the data so it is internal to the FIT, if needed */
+	ret = fit_import_data(params, tmpfile);
+	if (ret)
+		goto err_system;
+
+	/*
+	 * Set hashes for images in the blob. Unfortunately we may need more
+	 * space in either FDT, so keep trying until we succeed.
+	 *
+	 * Note: this is pretty inefficient for signing, since we must
+	 * calculate the signature every time. It would be better to calculate
+	 * all the data and then store it in a separate step. However, this
+	 * would be considerably more complex to implement. Generally a few
+	 * steps of this loop is enough to sign with several keys.
+	 */
+	for (size_inc = 0; size_inc < 64 * 1024; size_inc += 1024) {
+		ret = fit_add_file_data(params, size_inc, tmpfile);
+		if (!ret || ret != -ENOSPC)
+			break;
+	}
+
+	if (ret) {
+		fprintf(stderr, "%s Can't add hashes to FIT blob: %d\n",
+			params->cmdname, ret);
+		goto err_system;
+	}
+
+	/* Move the data so it is external to the FIT, if requested */
+	if (params->external_data) {
+		ret = fit_extract_data(params, tmpfile);
+		if (ret)
+			goto err_system;
+	}
+
+	if (rename (tmpfile, params->imagefile) == -1) {
+		fprintf (stderr, "%s: Can't rename %s to %s: %s\n",
+				params->cmdname, tmpfile, params->imagefile,
+				strerror (errno));
+		unlink (tmpfile);
+		unlink (params->imagefile);
+		return EXIT_FAILURE;
+	}
+	return EXIT_SUCCESS;
+
+err_system:
+	unlink(tmpfile);
+	return -1;
+}
+
+/**
+ * fit_image_extract - extract a FIT component image
+ * @fit: pointer to the FIT format image header
+ * @image_noffset: offset of the component image node
+ * @file_name: name of the file to store the FIT sub-image
+ *
+ * returns:
+ *     zero in case of success or a negative value if fail.
+ */
+static int fit_image_extract(
+	const void *fit,
+	int image_noffset,
+	const char *file_name)
+{
+	const void *file_data;
+	size_t file_size = 0;
+
+	/* get the "data" property of component at offset "image_noffset" */
+	fit_image_get_data(fit, image_noffset, &file_data, &file_size);
+
+	/* save the "file_data" into the file specified by "file_name" */
+	return imagetool_save_subimage(file_name, (ulong) file_data, file_size);
+}
+
+/**
+ * fit_extract_contents - retrieve a sub-image component from the FIT image
+ * @ptr: pointer to the FIT format image header
+ * @params: command line parameters
+ *
+ * returns:
+ *     zero in case of success or a negative value if fail.
+ */
+static int fit_extract_contents(void *ptr, struct image_tool_params *params)
+{
+	int images_noffset;
+	int noffset;
+	int ndepth;
+	const void *fit = ptr;
+	int count = 0;
+	const char *p;
+
+	/* Indent string is defined in header image.h */
+	p = IMAGE_INDENT_STRING;
+
+	if (!fit_check_format(fit)) {
+		printf("Bad FIT image format\n");
+		return -1;
+	}
+
+	/* Find images parent node offset */
+	images_noffset = fdt_path_offset(fit, FIT_IMAGES_PATH);
+	if (images_noffset < 0) {
+		printf("Can't find images parent node '%s' (%s)\n",
+		       FIT_IMAGES_PATH, fdt_strerror(images_noffset));
+		return -1;
+	}
+
+	/* Avoid any overrun */
+	count = fit_get_subimage_count(fit, images_noffset);
+	if ((params->pflag < 0) || (count <= params->pflag)) {
+		printf("No such component at '%d'\n", params->pflag);
+		return -1;
+	}
+
+	/* Process its subnodes, extract the desired component from image */
+	for (ndepth = 0, count = 0,
+		noffset = fdt_next_node(fit, images_noffset, &ndepth);
+		(noffset >= 0) && (ndepth > 0);
+		noffset = fdt_next_node(fit, noffset, &ndepth)) {
+		if (ndepth == 1) {
+			/*
+			 * Direct child node of the images parent node,
+			 * i.e. component image node.
+			 */
+			if (params->pflag == count) {
+				printf("Extracted:\n%s Image %u (%s)\n", p,
+				       count, fit_get_name(fit, noffset, NULL));
+
+				fit_image_print(fit, noffset, p);
+
+				return fit_image_extract(fit, noffset,
+						params->outfile);
+			}
+
+			count++;
+		}
+	}
+
+	return 0;
+}
+
+static int fit_check_params(struct image_tool_params *params)
+{
+	if (params->auto_its)
+		return 0;
+	return	((params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)));
+}
+
+U_BOOT_IMAGE_TYPE(
+	fitimage,
+	"FIT Image support",
+	sizeof(image_header_t),
+	(void *)&header,
+	fit_check_params,
+	fit_verify_header,
+	fit_print_contents,
+	NULL,
+	fit_extract_contents,
+	fit_check_image_types,
+	fit_handle_file,
+	NULL /* FIT images use DTB header */
+);
diff --git a/tools/u-boot-tools/fit_image.o b/tools/u-boot-tools/fit_image.o
new file mode 100644
index 0000000000000000000000000000000000000000..ba6cb9e94b0ca5204e264e5ff639996c1d5a4248
Binary files /dev/null and b/tools/u-boot-tools/fit_image.o differ
diff --git a/tools/u-boot-tools/fit_info.c b/tools/u-boot-tools/fit_info.c
new file mode 100644
index 0000000000000000000000000000000000000000..45e0b310f778791e1c4d53e8b2cf136f5db5bb62
--- /dev/null
+++ b/tools/u-boot-tools/fit_info.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * DENX Software Engineering
+ * Heiko Schocher <hs@denx.de>
+ *
+ * fit_info: print the offset and the len of a property from
+ *	     node in a fit file.
+ *
+ * Based on:
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2004
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ *
+ * Updated-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *		FIT image specific code abstracted from mkimage.c
+ *		some functions added to address abstraction
+ *
+ * All rights reserved.
+ */
+
+#include "mkimage.h"
+#include "fit_common.h"
+#include <image.h>
+#include <u-boot/crc.h>
+
+void usage(char *cmdname)
+{
+	fprintf(stderr, "Usage: %s -f fit file -n node -p property\n"
+			 "          -f ==> set fit file which is used'\n"
+			 "          -n ==> set node name'\n"
+			 "          -p ==> set property name'\n",
+		cmdname);
+	exit(EXIT_FAILURE);
+}
+
+int main(int argc, char **argv)
+{
+	int ffd = -1;
+	struct stat fsbuf;
+	void *fit_blob;
+	int len;
+	int  nodeoffset;	/* node offset from libfdt */
+	const void *nodep;	/* property node pointer */
+	char *fdtfile = NULL;
+	char *nodename = NULL;
+	char *propertyname = NULL;
+	char cmdname[256];
+	int c;
+
+	strncpy(cmdname, *argv, sizeof(cmdname) - 1);
+	cmdname[sizeof(cmdname) - 1] = '\0';
+	while ((c = getopt(argc, argv, "f:n:p:")) != -1)
+		switch (c) {
+		case 'f':
+			fdtfile = optarg;
+			break;
+		case 'n':
+			nodename = optarg;
+			break;
+		case 'p':
+			propertyname = optarg;
+			break;
+		default:
+			usage(cmdname);
+			break;
+		}
+
+	if (!fdtfile) {
+		fprintf(stderr, "%s: Missing fdt file\n", *argv);
+		usage(*argv);
+	}
+	if (!nodename) {
+		fprintf(stderr, "%s: Missing node name\n", *argv);
+		usage(*argv);
+	}
+	if (!propertyname) {
+		fprintf(stderr, "%s: Missing property name\n", *argv);
+		usage(*argv);
+	}
+	ffd = mmap_fdt(cmdname, fdtfile, 0, &fit_blob, &fsbuf, false);
+
+	if (ffd < 0) {
+		printf("Could not open %s\n", fdtfile);
+		exit(EXIT_FAILURE);
+	}
+
+	nodeoffset = fdt_path_offset(fit_blob, nodename);
+	if (nodeoffset < 0) {
+		printf("%s not found.", nodename);
+		exit(EXIT_FAILURE);
+	}
+	nodep = fdt_getprop(fit_blob, nodeoffset, propertyname, &len);
+	if (len == 0) {
+		printf("len == 0 %s\n", propertyname);
+		exit(EXIT_FAILURE);
+	}
+
+	printf("NAME: %s\n", fit_get_name(fit_blob, nodeoffset, NULL));
+	printf("LEN: %d\n", len);
+	printf("OFF: %d\n", (int)(nodep - fit_blob));
+	(void) munmap((void *)fit_blob, fsbuf.st_size);
+
+	close(ffd);
+	exit(EXIT_SUCCESS);
+}
diff --git a/tools/u-boot-tools/gdb/Makefile b/tools/u-boot-tools/gdb/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..24152dcc3c5262aa20e3c6e987e47cfb8de7967e
--- /dev/null
+++ b/tools/u-boot-tools/gdb/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# (C) Copyright 2000
+# Murray Jensen <Murray.Jensen@csiro.au>
+
+ifneq ($(HOSTOS),cygwin)
+
+# Location of a usable BFD library, where we define "usable" as
+# "built for ${HOST}, supports ${TARGET}".  Sensible values are
+# - When cross-compiling: the root of the cross-environment
+# - Linux/ppc (native): /usr
+# - NetBSD/ppc (native): you lose ... (must extract these from the
+#   binutils build directory, plus the native and U-Boot include
+#   files don't like each other)
+
+ifeq ($(HOSTOS),darwin)
+BFD_ROOT_DIR =		/usr/local/tools
+else
+ifeq ($(HOSTARCH),$(ARCH))
+# native
+BFD_ROOT_DIR =		/usr
+else
+#BFD_ROOT_DIR =		/LinuxPPC/CDK		# Linux/i386
+#BFD_ROOT_DIR =		/usr/pkg/cross		# NetBSD/i386
+BFD_ROOT_DIR =		/opt/powerpc
+endif
+endif
+
+#
+# Use native tools and options
+#
+HOST_EXTRACFLAGS := -I$(BFD_ROOT_DIR)/include -pedantic
+
+hostprogs-y := gdbsend gdbcont
+
+gdbsend-objs := gdbsend.o error.o remote.o serial.o
+gdbcont-objs := gdbcont.o error.o remote.o serial.o
+
+always := $(hostprogs-y)
+
+endif	# cygwin
diff --git a/tools/u-boot-tools/gdb/error.c b/tools/u-boot-tools/gdb/error.c
new file mode 100644
index 0000000000000000000000000000000000000000..22a9b43e08301071a55f62d93778ac96358454e5
--- /dev/null
+++ b/tools/u-boot-tools/gdb/error.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "error.h"
+
+char *pname;
+
+void
+Warning(char *fmt, ...)
+{
+    va_list args;
+
+    fprintf(stderr, "%s: WARNING: ", pname);
+
+    va_start(args, fmt);
+    vfprintf(stderr, fmt, args);
+    va_end(args);
+
+    fprintf(stderr, "\n");
+}
+
+void
+Error(char *fmt, ...)
+{
+    va_list args;
+
+    fprintf(stderr, "%s: ERROR: ", pname);
+
+    va_start(args, fmt);
+    vfprintf(stderr, fmt, args);
+    va_end(args);
+
+    fprintf(stderr, "\n");
+
+    exit(1);
+}
+
+void
+Perror(char *fmt, ...)
+{
+    va_list args;
+    int e = errno;
+    char *p;
+
+    fprintf(stderr, "%s: ERROR: ", pname);
+
+    va_start(args, fmt);
+    vfprintf(stderr, fmt, args);
+    va_end(args);
+
+    if ((p = strerror(e)) == NULL || *p == '\0')
+	fprintf(stderr, ": Unknown Error (%d)\n", e);
+    else
+	fprintf(stderr, ": %s\n", p);
+
+    exit(1);
+}
diff --git a/tools/u-boot-tools/gdb/error.h b/tools/u-boot-tools/gdb/error.h
new file mode 100644
index 0000000000000000000000000000000000000000..252e89f9272fcfa9bf237df168e1afc3bd8cd01b
--- /dev/null
+++ b/tools/u-boot-tools/gdb/error.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+#include <stdarg.h>
+
+extern char *pname;
+
+extern void Warning(char *, ...);
+extern void Error(char *, ...);
+extern void Perror(char *, ...);
diff --git a/tools/u-boot-tools/gdb/gdbcont.c b/tools/u-boot-tools/gdb/gdbcont.c
new file mode 100644
index 0000000000000000000000000000000000000000..9291a71749a91e5cbba1c38686b4378d667db71e
--- /dev/null
+++ b/tools/u-boot-tools/gdb/gdbcont.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include "serial.h"
+#include "error.h"
+#include "remote.h"
+
+char *serialdev = "/dev/term/b";
+speed_t speed = B230400;
+int verbose = 0;
+
+int
+main(int ac, char **av)
+{
+    int c, sfd;
+
+    if ((pname = strrchr(av[0], '/')) == NULL)
+	pname = av[0];
+    else
+	pname++;
+
+    while ((c = getopt(ac, av, "b:p:v")) != EOF)
+	switch (c) {
+
+	case 'b':
+	    if ((speed = cvtspeed(optarg)) == B0)
+		Error("can't decode baud rate specified in -b option");
+	    break;
+
+	case 'p':
+	    serialdev = optarg;
+	    break;
+
+	case 'v':
+	    verbose = 1;
+	    break;
+
+	default:
+	usage:
+	    fprintf(stderr, "Usage: %s [-b bps] [-p dev] [-v]\n", pname);
+	    exit(1);
+	}
+    if (optind != ac)
+	goto usage;
+
+    if (verbose)
+	fprintf(stderr, "Opening serial port and sending continue...\n");
+
+    if ((sfd = serialopen(serialdev, speed)) < 0)
+	Perror("open of serial device '%s' failed", serialdev);
+
+    remote_desc = sfd;
+    remote_reset();
+    remote_continue();
+
+    if (serialclose(sfd) < 0)
+	Perror("close of serial device '%s' failed", serialdev);
+
+    if (verbose)
+	fprintf(stderr, "Done.\n");
+
+    return (0);
+}
diff --git a/tools/u-boot-tools/gdb/gdbsend.c b/tools/u-boot-tools/gdb/gdbsend.c
new file mode 100644
index 0000000000000000000000000000000000000000..19c4dcb1d9219ec25549ea50f7fc5e1d8958d303
--- /dev/null
+++ b/tools/u-boot-tools/gdb/gdbsend.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "serial.h"
+#include "error.h"
+#include "remote.h"
+
+char *serialdev = "/dev/term/b";
+speed_t speed = B230400;
+int verbose = 0, docont = 0;
+unsigned long addr = 0x10000UL;
+
+int
+main(int ac, char **av)
+{
+    int c, sfd, ifd;
+    char *ifn, *image;
+    struct stat ist;
+
+    if ((pname = strrchr(av[0], '/')) == NULL)
+	pname = av[0];
+    else
+	pname++;
+
+    while ((c = getopt(ac, av, "a:b:cp:v")) != EOF)
+	switch (c) {
+
+	case 'a': {
+	    char *ep;
+
+	    addr = strtol(optarg, &ep, 0);
+	    if (ep == optarg || *ep != '\0')
+		Error("can't decode address specified in -a option");
+	    break;
+	}
+
+	case 'b':
+	    if ((speed = cvtspeed(optarg)) == B0)
+		Error("can't decode baud rate specified in -b option");
+	    break;
+
+	case 'c':
+	    docont = 1;
+	    break;
+
+	case 'p':
+	    serialdev = optarg;
+	    break;
+
+	case 'v':
+	    verbose = 1;
+	    break;
+
+	default:
+	usage:
+	    fprintf(stderr,
+		"Usage: %s [-a addr] [-b bps] [-c] [-p dev] [-v] imagefile\n",
+		pname);
+	    exit(1);
+	}
+
+    if (optind != ac - 1)
+	goto usage;
+    ifn = av[optind++];
+
+    if (verbose)
+	fprintf(stderr, "Opening file and reading image...\n");
+
+    if ((ifd = open(ifn, O_RDONLY)) < 0)
+	Perror("can't open kernel image file '%s'", ifn);
+
+    if (fstat(ifd, &ist) < 0)
+	Perror("fstat '%s' failed", ifn);
+
+    if ((image = (char *)malloc(ist.st_size)) == NULL)
+	Perror("can't allocate %ld bytes for image", ist.st_size);
+
+    if ((c = read(ifd, image, ist.st_size)) < 0)
+	Perror("read of %d bytes from '%s' failed", ist.st_size, ifn);
+
+    if (c != ist.st_size)
+	Error("read of %ld bytes from '%s' failed (%d)", ist.st_size, ifn, c);
+
+    if (close(ifd) < 0)
+	Perror("close of '%s' failed", ifn);
+
+    if (verbose)
+	fprintf(stderr, "Opening serial port and sending image...\n");
+
+    if ((sfd = serialopen(serialdev, speed)) < 0)
+	Perror("open of serial device '%s' failed", serialdev);
+
+    remote_desc = sfd;
+    remote_reset();
+    remote_write_bytes(addr, image, ist.st_size);
+
+    if (docont) {
+	if (verbose)
+	    fprintf(stderr, "[continue]");
+	remote_continue();
+    }
+
+    if (serialclose(sfd) < 0)
+	Perror("close of serial device '%s' failed", serialdev);
+
+    if (verbose)
+	fprintf(stderr, "Done.\n");
+
+    return (0);
+}
diff --git a/tools/u-boot-tools/gdb/remote.c b/tools/u-boot-tools/gdb/remote.c
new file mode 100644
index 0000000000000000000000000000000000000000..3cd04213514c607dd9a2aaaa1b9aae6e92a7d63f
--- /dev/null
+++ b/tools/u-boot-tools/gdb/remote.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * taken from gdb/remote.c
+ *
+ * I am only interested in the write to memory stuff - everything else
+ * has been ripped out
+ *
+ * all the copyright notices etc have been left in
+ */
+
+/* enough so that it will compile */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+/*nicked from gcc..*/
+
+#ifndef alloca
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#else /* not GNU C.  */
+#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi)
+#include <alloca.h>
+#else /* not sparc */
+#if defined (MSDOS) && !defined (__TURBOC__)
+#include <malloc.h>
+#else /* not MSDOS, or __TURBOC__ */
+#if defined(_AIX)
+#include <malloc.h>
+ #pragma alloca
+#else /* not MSDOS, __TURBOC__, or _AIX */
+#ifdef __hpux
+#endif /* __hpux */
+#endif /* not _AIX */
+#endif /* not MSDOS, or __TURBOC__ */
+#endif /* not sparc.  */
+#endif /* not GNU C.  */
+#ifdef __cplusplus
+extern "C" {
+#endif
+    void* alloca(size_t);
+#ifdef __cplusplus
+}
+#endif
+#endif /* alloca not defined.  */
+
+
+#include "serial.h"
+#include "error.h"
+#include "remote.h"
+#define REGISTER_BYTES 0
+#define fprintf_unfiltered fprintf
+#define fprintf_filtered fprintf
+#define fputs_unfiltered fputs
+#define fputs_filtered fputs
+#define fputc_unfiltered fputc
+#define fputc_filtered fputc
+#define printf_unfiltered printf
+#define printf_filtered printf
+#define puts_unfiltered puts
+#define puts_filtered puts
+#define putchar_unfiltered putchar
+#define putchar_filtered putchar
+#define fputstr_unfiltered(a,b,c) fputs((a), (c))
+#define gdb_stdlog stderr
+#define SERIAL_READCHAR(fd,timo)	serialreadchar((fd), (timo))
+#define SERIAL_WRITE(fd, addr, len)	serialwrite((fd), (addr), (len))
+#define error Error
+#define perror_with_name Perror
+#define gdb_flush fflush
+#define max(a,b) (((a)>(b))?(a):(b))
+#define min(a,b) (((a)<(b))?(a):(b))
+#define target_mourn_inferior() {}
+#define ULONGEST unsigned long
+#define CORE_ADDR unsigned long
+
+static int putpkt (char *);
+static int putpkt_binary(char *, int);
+static void getpkt (char *, int);
+
+static int remote_debug = 0, remote_register_buf_size = 0, watchdog = 0;
+
+int remote_desc = -1, remote_timeout = 10;
+
+static void
+fputstrn_unfiltered(char *s, int n, int x, FILE *fp)
+{
+    while (n-- > 0)
+	fputc(*s++, fp);
+}
+
+void
+remote_reset(void)
+{
+    SERIAL_WRITE(remote_desc, "+", 1);
+}
+
+void
+remote_continue(void)
+{
+    putpkt("c");
+}
+
+/* Remote target communications for serial-line targets in custom GDB protocol
+   Copyright 1988, 91, 92, 93, 94, 95, 96, 97, 98, 1999
+   Free Software Foundation, Inc.
+
+   This file is part of GDB.
+ */
+/* *INDENT-OFF* */
+/* Remote communication protocol.
+
+   A debug packet whose contents are <data>
+   is encapsulated for transmission in the form:
+
+	$ <data> # CSUM1 CSUM2
+
+	<data> must be ASCII alphanumeric and cannot include characters
+	'$' or '#'.  If <data> starts with two characters followed by
+	':', then the existing stubs interpret this as a sequence number.
+
+	CSUM1 and CSUM2 are ascii hex representation of an 8-bit
+	checksum of <data>, the most significant nibble is sent first.
+	the hex digits 0-9,a-f are used.
+
+   Receiver responds with:
+
+	+	- if CSUM is correct and ready for next packet
+	-	- if CSUM is incorrect
+
+   <data> is as follows:
+   Most values are encoded in ascii hex digits.  Signal numbers are according
+   to the numbering in target.h.
+
+	Request		Packet
+
+	set thread	Hct...		Set thread for subsequent operations.
+					c = 'c' for thread used in step and
+					continue; t... can be -1 for all
+					threads.
+					c = 'g' for thread used in other
+					operations.  If zero, pick a thread,
+					any thread.
+	reply		OK		for success
+			ENN		for an error.
+
+	read registers  g
+	reply		XX....X		Each byte of register data
+					is described by two hex digits.
+					Registers are in the internal order
+					for GDB, and the bytes in a register
+					are in the same order the machine uses.
+			or ENN		for an error.
+
+	write regs	GXX..XX		Each byte of register data
+					is described by two hex digits.
+	reply		OK		for success
+			ENN		for an error
+
+	write reg	Pn...=r...	Write register n... with value r...,
+					which contains two hex digits for each
+					byte in the register (target byte
+					order).
+	reply		OK		for success
+			ENN		for an error
+	(not supported by all stubs).
+
+	read mem	mAA..AA,LLLL	AA..AA is address, LLLL is length.
+	reply		XX..XX		XX..XX is mem contents
+					Can be fewer bytes than requested
+					if able to read only part of the data.
+			or ENN		NN is errno
+
+	write mem	MAA..AA,LLLL:XX..XX
+					AA..AA is address,
+					LLLL is number of bytes,
+					XX..XX is data
+	reply		OK		for success
+			ENN		for an error (this includes the case
+					where only part of the data was
+					written).
+
+	write mem       XAA..AA,LLLL:XX..XX
+	 (binary)                       AA..AA is address,
+					LLLL is number of bytes,
+					XX..XX is binary data
+	reply           OK              for success
+			ENN             for an error
+
+	continue	cAA..AA		AA..AA is address to resume
+					If AA..AA is omitted,
+					resume at same address.
+
+	step		sAA..AA		AA..AA is address to resume
+					If AA..AA is omitted,
+					resume at same address.
+
+	continue with	Csig;AA..AA	Continue with signal sig (hex signal
+	signal				number).  If ;AA..AA is omitted,
+					resume at same address.
+
+	step with	Ssig;AA..AA	Like 'C' but step not continue.
+	signal
+
+	last signal     ?               Reply the current reason for stopping.
+					This is the same reply as is generated
+					for step or cont : SAA where AA is the
+					signal number.
+
+	detach          D               Reply OK.
+
+	There is no immediate reply to step or cont.
+	The reply comes when the machine stops.
+	It is		SAA		AA is the signal number.
+
+	or...		TAAn...:r...;n...:r...;n...:r...;
+					AA = signal number
+					n... = register number (hex)
+					  r... = register contents
+					n... = `thread'
+					  r... = thread process ID.  This is
+						 a hex integer.
+					n... = other string not starting
+					    with valid hex digit.
+					  gdb should ignore this n,r pair
+					  and go on to the next.  This way
+					  we can extend the protocol.
+	or...		WAA		The process exited, and AA is
+					the exit status.  This is only
+					applicable for certains sorts of
+					targets.
+	or...		XAA		The process terminated with signal
+					AA.
+	or (obsolete)	NAA;tttttttt;dddddddd;bbbbbbbb
+					AA = signal number
+					tttttttt = address of symbol "_start"
+					dddddddd = base of data section
+					bbbbbbbb = base of bss  section.
+					Note: only used by Cisco Systems
+					targets.  The difference between this
+					reply and the "qOffsets" query is that
+					the 'N' packet may arrive spontaneously
+					whereas the 'qOffsets' is a query
+					initiated by the host debugger.
+	or...           OXX..XX	XX..XX  is hex encoding of ASCII data. This
+					can happen at any time while the
+					program is running and the debugger
+					should continue to wait for
+					'W', 'T', etc.
+
+	thread alive	TXX		Find out if the thread XX is alive.
+	reply		OK		thread is still alive
+			ENN		thread is dead
+
+	remote restart	RXX		Restart the remote server
+
+	extended ops	!		Use the extended remote protocol.
+					Sticky -- only needs to be set once.
+
+	kill request	k
+
+	toggle debug	d		toggle debug flag (see 386 & 68k stubs)
+	reset		r		reset -- see sparc stub.
+	reserved	<other>		On other requests, the stub should
+					ignore the request and send an empty
+					response ($#<checksum>).  This way
+					we can extend the protocol and GDB
+					can tell whether the stub it is
+					talking to uses the old or the new.
+	search		tAA:PP,MM	Search backwards starting at address
+					AA for a match with pattern PP and
+					mask MM.  PP and MM are 4 bytes.
+					Not supported by all stubs.
+
+	general query	qXXXX		Request info about XXXX.
+	general set	QXXXX=yyyy	Set value of XXXX to yyyy.
+	query sect offs	qOffsets	Get section offsets.  Reply is
+					Text=xxx;Data=yyy;Bss=zzz
+
+	Responses can be run-length encoded to save space.  A '*' means that
+	the next character is an ASCII encoding giving a repeat count which
+	stands for that many repititions of the character preceding the '*'.
+	The encoding is n+29, yielding a printable character where n >=3
+	(which is where rle starts to win).  Don't use an n > 126.
+
+	So
+	"0* " means the same as "0000".  */
+/* *INDENT-ON* */
+
+/* This variable (available to the user via "set remotebinarydownload")
+   dictates whether downloads are sent in binary (via the 'X' packet).
+   We assume that the stub can, and attempt to do it. This will be cleared if
+   the stub does not understand it. This switch is still needed, though
+   in cases when the packet is supported in the stub, but the connection
+   does not allow it (i.e., 7-bit serial connection only). */
+static int remote_binary_download = 1;
+
+/* Have we already checked whether binary downloads work? */
+static int remote_binary_checked;
+
+/* Maximum number of bytes to read/write at once.  The value here
+   is chosen to fill up a packet (the headers account for the 32).  */
+#define MAXBUFBYTES(N) (((N)-32)/2)
+
+/* Having this larger than 400 causes us to be incompatible with m68k-stub.c
+   and i386-stub.c.  Normally, no one would notice because it only matters
+   for writing large chunks of memory (e.g. in downloads).  Also, this needs
+   to be more than 400 if required to hold the registers (see below, where
+   we round it up based on REGISTER_BYTES).  */
+/* Round up PBUFSIZ to hold all the registers, at least.  */
+#define	PBUFSIZ ((REGISTER_BYTES > MAXBUFBYTES (400)) \
+		 ? (REGISTER_BYTES * 2 + 32) \
+		 : 400)
+
+
+/* This variable sets the number of bytes to be written to the target
+   in a single packet.  Normally PBUFSIZ is satisfactory, but some
+   targets need smaller values (perhaps because the receiving end
+   is slow).  */
+
+static int remote_write_size = 0x7fffffff;
+
+/* This variable sets the number of bits in an address that are to be
+   sent in a memory ("M" or "m") packet.  Normally, after stripping
+   leading zeros, the entire address would be sent. This variable
+   restricts the address to REMOTE_ADDRESS_SIZE bits.  HISTORY: The
+   initial implementation of remote.c restricted the address sent in
+   memory packets to ``host::sizeof long'' bytes - (typically 32
+   bits).  Consequently, for 64 bit targets, the upper 32 bits of an
+   address was never sent.  Since fixing this bug may cause a break in
+   some remote targets this variable is principly provided to
+   facilitate backward compatibility. */
+
+static int remote_address_size;
+
+/* Convert hex digit A to a number.  */
+
+static int
+fromhex (int a)
+{
+  if (a >= '0' && a <= '9')
+    return a - '0';
+  else if (a >= 'a' && a <= 'f')
+    return a - 'a' + 10;
+  else if (a >= 'A' && a <= 'F')
+    return a - 'A' + 10;
+  else {
+    error ("Reply contains invalid hex digit %d", a);
+    return -1;
+  }
+}
+
+/* Convert number NIB to a hex digit.  */
+
+static int
+tohex (int nib)
+{
+  if (nib < 10)
+    return '0' + nib;
+  else
+    return 'a' + nib - 10;
+}
+
+/* Return the number of hex digits in num.  */
+
+static int
+hexnumlen (ULONGEST num)
+{
+  int i;
+
+  for (i = 0; num != 0; i++)
+    num >>= 4;
+
+  return max (i, 1);
+}
+
+/* Set BUF to the hex digits representing NUM.  */
+
+static int
+hexnumstr (char *buf, ULONGEST num)
+{
+  int i;
+  int len = hexnumlen (num);
+
+  buf[len] = '\0';
+
+  for (i = len - 1; i >= 0; i--)
+    {
+      buf[i] = "0123456789abcdef"[(num & 0xf)];
+      num >>= 4;
+    }
+
+  return len;
+}
+
+/* Mask all but the least significant REMOTE_ADDRESS_SIZE bits. */
+
+static CORE_ADDR
+remote_address_masked (CORE_ADDR addr)
+{
+  if (remote_address_size > 0
+      && remote_address_size < (sizeof (ULONGEST) * 8))
+    {
+      /* Only create a mask when that mask can safely be constructed
+	 in a ULONGEST variable. */
+      ULONGEST mask = 1;
+      mask = (mask << remote_address_size) - 1;
+      addr &= mask;
+    }
+  return addr;
+}
+
+/* Determine whether the remote target supports binary downloading.
+   This is accomplished by sending a no-op memory write of zero length
+   to the target at the specified address. It does not suffice to send
+   the whole packet, since many stubs strip the eighth bit and subsequently
+   compute a wrong checksum, which causes real havoc with remote_write_bytes.
+
+   NOTE: This can still lose if the serial line is not eight-bit clean. In
+   cases like this, the user should clear "remotebinarydownload". */
+static void
+check_binary_download (CORE_ADDR addr)
+{
+  if (remote_binary_download && !remote_binary_checked)
+    {
+      char *buf = alloca (PBUFSIZ);
+      char *p;
+      remote_binary_checked = 1;
+
+      p = buf;
+      *p++ = 'X';
+      p += hexnumstr (p, (ULONGEST) addr);
+      *p++ = ',';
+      p += hexnumstr (p, (ULONGEST) 0);
+      *p++ = ':';
+      *p = '\0';
+
+      putpkt_binary (buf, (int) (p - buf));
+      getpkt (buf, 0);
+
+      if (buf[0] == '\0')
+	remote_binary_download = 0;
+    }
+
+  if (remote_debug)
+    {
+      if (remote_binary_download)
+	fprintf_unfiltered (gdb_stdlog,
+			    "binary downloading suppported by target\n");
+      else
+	fprintf_unfiltered (gdb_stdlog,
+			    "binary downloading NOT suppported by target\n");
+    }
+}
+
+/* Write memory data directly to the remote machine.
+   This does not inform the data cache; the data cache uses this.
+   MEMADDR is the address in the remote memory space.
+   MYADDR is the address of the buffer in our space.
+   LEN is the number of bytes.
+
+   Returns number of bytes transferred, or 0 for error.  */
+
+int
+remote_write_bytes (memaddr, myaddr, len)
+     CORE_ADDR memaddr;
+     char *myaddr;
+     int len;
+{
+  unsigned char *buf = alloca (PBUFSIZ);
+  int max_buf_size;		/* Max size of packet output buffer */
+  int origlen;
+  extern int verbose;
+
+  /* Verify that the target can support a binary download */
+  check_binary_download (memaddr);
+
+  /* Chop the transfer down if necessary */
+
+  max_buf_size = min (remote_write_size, PBUFSIZ);
+  if (remote_register_buf_size != 0)
+    max_buf_size = min (max_buf_size, remote_register_buf_size);
+
+  /* Subtract header overhead from max payload size -  $M<memaddr>,<len>:#nn */
+  max_buf_size -= 2 + hexnumlen (memaddr + len - 1) + 1 + hexnumlen (len) + 4;
+
+  origlen = len;
+  while (len > 0)
+    {
+      unsigned char *p, *plen;
+      int todo;
+      int i;
+
+      /* construct "M"<memaddr>","<len>":" */
+      /* sprintf (buf, "M%lx,%x:", (unsigned long) memaddr, todo); */
+      memaddr = remote_address_masked (memaddr);
+      p = buf;
+      if (remote_binary_download)
+	{
+	  *p++ = 'X';
+	  todo = min (len, max_buf_size);
+	}
+      else
+	{
+	  *p++ = 'M';
+	  todo = min (len, max_buf_size / 2);	/* num bytes that will fit */
+	}
+
+      p += hexnumstr ((char *)p, (ULONGEST) memaddr);
+      *p++ = ',';
+
+      plen = p;			/* remember where len field goes */
+      p += hexnumstr ((char *)p, (ULONGEST) todo);
+      *p++ = ':';
+      *p = '\0';
+
+      /* We send target system values byte by byte, in increasing byte
+	 addresses, each byte encoded as two hex characters (or one
+	 binary character).  */
+      if (remote_binary_download)
+	{
+	  int escaped = 0;
+	  for (i = 0;
+	       (i < todo) && (i + escaped) < (max_buf_size - 2);
+	       i++)
+	    {
+	      switch (myaddr[i] & 0xff)
+		{
+		case '$':
+		case '#':
+		case 0x7d:
+		  /* These must be escaped */
+		  escaped++;
+		  *p++ = 0x7d;
+		  *p++ = (myaddr[i] & 0xff) ^ 0x20;
+		  break;
+		default:
+		  *p++ = myaddr[i] & 0xff;
+		  break;
+		}
+	    }
+
+	  if (i < todo)
+	    {
+	      /* Escape chars have filled up the buffer prematurely,
+		 and we have actually sent fewer bytes than planned.
+		 Fix-up the length field of the packet.  */
+
+	      /* FIXME: will fail if new len is a shorter string than
+		 old len.  */
+
+	      plen += hexnumstr ((char *)plen, (ULONGEST) i);
+	      *plen++ = ':';
+	    }
+	}
+      else
+	{
+	  for (i = 0; i < todo; i++)
+	    {
+	      *p++ = tohex ((myaddr[i] >> 4) & 0xf);
+	      *p++ = tohex (myaddr[i] & 0xf);
+	    }
+	  *p = '\0';
+	}
+
+      putpkt_binary ((char *)buf, (int) (p - buf));
+      getpkt ((char *)buf, 0);
+
+      if (buf[0] == 'E')
+	{
+	  /* There is no correspondance between what the remote protocol uses
+	     for errors and errno codes.  We would like a cleaner way of
+	     representing errors (big enough to include errno codes, bfd_error
+	     codes, and others).  But for now just return EIO.  */
+	  errno = EIO;
+	  return 0;
+	}
+
+      /* Increment by i, not by todo, in case escape chars
+	 caused us to send fewer bytes than we'd planned.  */
+      myaddr += i;
+      memaddr += i;
+      len -= i;
+
+      if (verbose)
+	putc('.', stderr);
+    }
+  return origlen;
+}
+
+/* Stuff for dealing with the packets which are part of this protocol.
+   See comment at top of file for details.  */
+
+/* Read a single character from the remote end, masking it down to 7 bits. */
+
+static int
+readchar (int timeout)
+{
+  int ch;
+
+  ch = SERIAL_READCHAR (remote_desc, timeout);
+
+  switch (ch)
+    {
+    case SERIAL_EOF:
+      error ("Remote connection closed");
+    case SERIAL_ERROR:
+      perror_with_name ("Remote communication error");
+    case SERIAL_TIMEOUT:
+      return ch;
+    default:
+      return ch & 0x7f;
+    }
+}
+
+static int
+putpkt (buf)
+     char *buf;
+{
+  return putpkt_binary (buf, strlen (buf));
+}
+
+/* Send a packet to the remote machine, with error checking.  The data
+   of the packet is in BUF.  The string in BUF can be at most  PBUFSIZ - 5
+   to account for the $, # and checksum, and for a possible /0 if we are
+   debugging (remote_debug) and want to print the sent packet as a string */
+
+static int
+putpkt_binary (buf, cnt)
+     char *buf;
+     int cnt;
+{
+  int i;
+  unsigned char csum = 0;
+  char *buf2 = alloca (PBUFSIZ);
+  char *junkbuf = alloca (PBUFSIZ);
+
+  int ch;
+  int tcount = 0;
+  char *p;
+
+  /* Copy the packet into buffer BUF2, encapsulating it
+     and giving it a checksum.  */
+
+  if (cnt > BUFSIZ - 5)		/* Prosanity check */
+    abort ();
+
+  p = buf2;
+  *p++ = '$';
+
+  for (i = 0; i < cnt; i++)
+    {
+      csum += buf[i];
+      *p++ = buf[i];
+    }
+  *p++ = '#';
+  *p++ = tohex ((csum >> 4) & 0xf);
+  *p++ = tohex (csum & 0xf);
+
+  /* Send it over and over until we get a positive ack.  */
+
+  while (1)
+    {
+      int started_error_output = 0;
+
+      if (remote_debug)
+	{
+	  *p = '\0';
+	  fprintf_unfiltered (gdb_stdlog, "Sending packet: ");
+	  fputstrn_unfiltered (buf2, p - buf2, 0, gdb_stdlog);
+	  fprintf_unfiltered (gdb_stdlog, "...");
+	  gdb_flush (gdb_stdlog);
+	}
+      if (SERIAL_WRITE (remote_desc, buf2, p - buf2))
+	perror_with_name ("putpkt: write failed");
+
+      /* read until either a timeout occurs (-2) or '+' is read */
+      while (1)
+	{
+	  ch = readchar (remote_timeout);
+
+	  if (remote_debug)
+	    {
+	      switch (ch)
+		{
+		case '+':
+		case SERIAL_TIMEOUT:
+		case '$':
+		  if (started_error_output)
+		    {
+		      putchar_unfiltered ('\n');
+		      started_error_output = 0;
+		    }
+		}
+	    }
+
+	  switch (ch)
+	    {
+	    case '+':
+	      if (remote_debug)
+		fprintf_unfiltered (gdb_stdlog, "Ack\n");
+	      return 1;
+	    case SERIAL_TIMEOUT:
+	      tcount++;
+	      if (tcount > 3)
+		return 0;
+	      break;		/* Retransmit buffer */
+	    case '$':
+	      {
+		/* It's probably an old response, and we're out of sync.
+		   Just gobble up the packet and ignore it.  */
+		getpkt (junkbuf, 0);
+		continue;	/* Now, go look for + */
+	      }
+	    default:
+	      if (remote_debug)
+		{
+		  if (!started_error_output)
+		    {
+		      started_error_output = 1;
+		      fprintf_unfiltered (gdb_stdlog, "putpkt: Junk: ");
+		    }
+		  fputc_unfiltered (ch & 0177, gdb_stdlog);
+		}
+	      continue;
+	    }
+	  break;		/* Here to retransmit */
+	}
+
+#if 0
+      /* This is wrong.  If doing a long backtrace, the user should be
+	 able to get out next time we call QUIT, without anything as
+	 violent as interrupt_query.  If we want to provide a way out of
+	 here without getting to the next QUIT, it should be based on
+	 hitting ^C twice as in remote_wait.  */
+      if (quit_flag)
+	{
+	  quit_flag = 0;
+	  interrupt_query ();
+	}
+#endif
+    }
+}
+
+/* Come here after finding the start of the frame.  Collect the rest
+   into BUF, verifying the checksum, length, and handling run-length
+   compression.  Returns 0 on any error, 1 on success.  */
+
+static int
+read_frame (char *buf)
+{
+  unsigned char csum;
+  char *bp;
+  int c;
+
+  csum = 0;
+  bp = buf;
+
+  while (1)
+    {
+      c = readchar (remote_timeout);
+
+      switch (c)
+	{
+	case SERIAL_TIMEOUT:
+	  if (remote_debug)
+	    fputs_filtered ("Timeout in mid-packet, retrying\n", gdb_stdlog);
+	  return 0;
+	case '$':
+	  if (remote_debug)
+	    fputs_filtered ("Saw new packet start in middle of old one\n",
+			    gdb_stdlog);
+	  return 0;		/* Start a new packet, count retries */
+	case '#':
+	  {
+	    unsigned char pktcsum;
+
+	    *bp = '\000';
+
+	    pktcsum = fromhex (readchar (remote_timeout)) << 4;
+	    pktcsum |= fromhex (readchar (remote_timeout));
+
+	    if (csum == pktcsum)
+	      {
+		return 1;
+	      }
+
+	    if (remote_debug)
+	      {
+		fprintf_filtered (gdb_stdlog,
+			      "Bad checksum, sentsum=0x%x, csum=0x%x, buf=",
+				  pktcsum, csum);
+		fputs_filtered (buf, gdb_stdlog);
+		fputs_filtered ("\n", gdb_stdlog);
+	      }
+	    return 0;
+	  }
+	case '*':		/* Run length encoding */
+	  csum += c;
+	  c = readchar (remote_timeout);
+	  csum += c;
+	  c = c - ' ' + 3;	/* Compute repeat count */
+
+	  if (c > 0 && c < 255 && bp + c - 1 < buf + PBUFSIZ - 1)
+	    {
+	      memset (bp, *(bp - 1), c);
+	      bp += c;
+	      continue;
+	    }
+
+	  *bp = '\0';
+	  printf_filtered ("Repeat count %d too large for buffer: ", c);
+	  puts_filtered (buf);
+	  puts_filtered ("\n");
+	  return 0;
+	default:
+	  if (bp < buf + PBUFSIZ - 1)
+	    {
+	      *bp++ = c;
+	      csum += c;
+	      continue;
+	    }
+
+	  *bp = '\0';
+	  puts_filtered ("Remote packet too long: ");
+	  puts_filtered (buf);
+	  puts_filtered ("\n");
+
+	  return 0;
+	}
+    }
+}
+
+/* Read a packet from the remote machine, with error checking, and
+   store it in BUF.  BUF is expected to be of size PBUFSIZ.  If
+   FOREVER, wait forever rather than timing out; this is used while
+   the target is executing user code.  */
+
+static void
+getpkt (buf, forever)
+     char *buf;
+     int forever;
+{
+  int c;
+  int tries;
+  int timeout;
+  int val;
+
+  strcpy (buf, "timeout");
+
+  if (forever)
+    {
+      timeout = watchdog > 0 ? watchdog : -1;
+    }
+
+  else
+    timeout = remote_timeout;
+
+#define MAX_TRIES 3
+
+  for (tries = 1; tries <= MAX_TRIES; tries++)
+    {
+      /* This can loop forever if the remote side sends us characters
+	 continuously, but if it pauses, we'll get a zero from readchar
+	 because of timeout.  Then we'll count that as a retry.  */
+
+      /* Note that we will only wait forever prior to the start of a packet.
+	 After that, we expect characters to arrive at a brisk pace.  They
+	 should show up within remote_timeout intervals.  */
+
+      do
+	{
+	  c = readchar (timeout);
+
+	  if (c == SERIAL_TIMEOUT)
+	    {
+	      if (forever)	/* Watchdog went off.  Kill the target. */
+		{
+		  target_mourn_inferior ();
+		  error ("Watchdog has expired.  Target detached.\n");
+		}
+	      if (remote_debug)
+		fputs_filtered ("Timed out.\n", gdb_stdlog);
+	      goto retry;
+	    }
+	}
+      while (c != '$');
+
+      /* We've found the start of a packet, now collect the data.  */
+
+      val = read_frame (buf);
+
+      if (val == 1)
+	{
+	  if (remote_debug)
+	    {
+	      fprintf_unfiltered (gdb_stdlog, "Packet received: ");
+	      fputstr_unfiltered (buf, 0, gdb_stdlog);
+	      fprintf_unfiltered (gdb_stdlog, "\n");
+	    }
+	  SERIAL_WRITE (remote_desc, "+", 1);
+	  return;
+	}
+
+      /* Try the whole thing again.  */
+    retry:
+      SERIAL_WRITE (remote_desc, "-", 1);
+    }
+
+  /* We have tried hard enough, and just can't receive the packet.  Give up. */
+
+  printf_unfiltered ("Ignoring packet error, continuing...\n");
+  SERIAL_WRITE (remote_desc, "+", 1);
+}
diff --git a/tools/u-boot-tools/gdb/remote.h b/tools/u-boot-tools/gdb/remote.h
new file mode 100644
index 0000000000000000000000000000000000000000..2a68402b80f2b1a1c1150a865c0931c51af43d18
--- /dev/null
+++ b/tools/u-boot-tools/gdb/remote.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+extern int remote_desc, remote_timeout;
+
+extern void remote_reset(void);
+extern void remote_continue(void);
+extern int remote_write_bytes(unsigned long, char *, int);
diff --git a/tools/u-boot-tools/gdb/serial.c b/tools/u-boot-tools/gdb/serial.c
new file mode 100644
index 0000000000000000000000000000000000000000..34ac609df13c62a3672c16d2ae7a6a58c14fdb89
--- /dev/null
+++ b/tools/u-boot-tools/gdb/serial.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+#include <unistd.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/time.h>
+#include "serial.h"
+
+#if defined(__sun__)	 || \
+    defined(__OpenBSD__) || \
+    defined(__FreeBSD__) || \
+    defined(__NetBSD__)	 || \
+    defined(__APPLE__)
+static struct termios tios = { BRKINT, 0, B115200|CS8|CREAD, 0, { 0 } };
+#else
+static struct termios tios = { BRKINT, 0, B115200|CS8|CREAD, 0,   0   };
+#endif
+
+static struct speedmap {
+    char *str;
+    speed_t val;
+} speedmap[] = {
+    { "50", B50 },		{ "75", B75 },		{ "110", B110 },
+    { "134", B134 },		{ "150", B150 },	{ "200", B200 },
+    { "300", B300 },		{ "600", B600 },	{ "1200", B1200 },
+    { "1800", B1800 },		{ "2400", B2400 },	{ "4800", B4800 },
+    { "9600", B9600 },		{ "19200", B19200 },	{ "38400", B38400 },
+    { "57600", B57600 },
+#ifdef	B76800
+    { "76800", B76800 },
+#endif
+    { "115200", B115200 },
+#ifdef	B153600
+    { "153600", B153600 },
+#endif
+    { "230400", B230400 },
+#ifdef	B307200
+    { "307200", B307200 },
+#endif
+#ifdef B460800
+    { "460800", B460800 }
+#endif
+};
+static int nspeeds = sizeof speedmap / sizeof speedmap[0];
+
+speed_t
+cvtspeed(char *str)
+{
+    struct speedmap *smp = speedmap, *esmp = &speedmap[nspeeds];
+
+    while (smp < esmp) {
+	if (strcmp(str, smp->str) == 0)
+	    return (smp->val);
+	smp++;
+    }
+    return B0;
+}
+
+int
+serialopen(char *device, speed_t speed)
+{
+    int fd;
+
+    if (cfsetospeed(&tios, speed) < 0)
+	return -1;
+
+    if ((fd = open(device, O_RDWR)) < 0)
+	return -1;
+
+    if (tcsetattr(fd, TCSAFLUSH, &tios) < 0) {
+	(void)close(fd);
+	return -1;
+    }
+
+    return fd;
+}
+
+int
+serialreadchar(int fd, int timeout)
+{
+    fd_set fds;
+    struct timeval tv;
+    int n;
+    char ch;
+
+    tv.tv_sec = timeout;
+    tv.tv_usec = 0;
+
+    FD_ZERO(&fds);
+    FD_SET(fd, &fds);
+
+    /* this is a fucking horrible quick hack - fix this */
+
+    if ((n = select(fd + 1, &fds, 0, 0, &tv)) < 0)
+	return SERIAL_ERROR;
+
+    if (n == 0)
+	return SERIAL_TIMEOUT;
+
+    if ((n = read(fd, &ch, 1)) < 0)
+	return SERIAL_ERROR;
+
+    if (n == 0)
+	return SERIAL_EOF;
+
+    return ch;
+}
+
+int
+serialwrite(int fd, char *buf, int len)
+{
+    int n;
+
+    do {
+	n = write(fd, buf, len);
+	if (n < 0)
+	    return 1;
+	len -= n;
+	buf += n;
+    } while (len > 0);
+    return 0;
+}
+
+int
+serialclose(int fd)
+{
+    return close(fd);
+}
diff --git a/tools/u-boot-tools/gdb/serial.h b/tools/u-boot-tools/gdb/serial.h
new file mode 100644
index 0000000000000000000000000000000000000000..c45d1dfd1adeb53d030c8b49764a2f523335ebbb
--- /dev/null
+++ b/tools/u-boot-tools/gdb/serial.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2000
+ * Murray Jensen <Murray.Jensen@csiro.au>
+ */
+
+#include <termios.h>
+
+#define SERIAL_ERROR	-1	/* General error, see errno for details */
+#define SERIAL_TIMEOUT	-2
+#define SERIAL_EOF	-3
+
+extern speed_t cvtspeed(char *);
+extern int serialopen(char *, speed_t);
+extern int serialreadchar(int, int);
+extern int serialwrite(int, char *, int);
+extern int serialclose(int);
diff --git a/tools/u-boot-tools/gen_eth_addr b/tools/u-boot-tools/gen_eth_addr
new file mode 100755
index 0000000000000000000000000000000000000000..393f237668a4015581498bb9efea37d91ae576ee
Binary files /dev/null and b/tools/u-boot-tools/gen_eth_addr differ
diff --git a/tools/u-boot-tools/gen_eth_addr.c b/tools/u-boot-tools/gen_eth_addr.c
new file mode 100644
index 0000000000000000000000000000000000000000..ad36f3f0c21d2f17d862b07a2928b0849463a331
--- /dev/null
+++ b/tools/u-boot-tools/gen_eth_addr.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2001
+ * Murray Jensen <Murray.Jensen@cmst.csiro.au>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+
+int
+main(int argc, char *argv[])
+{
+    unsigned long ethaddr_low, ethaddr_high;
+
+    srand(time(0) + (getpid() << 8));
+
+    /*
+     * setting the 2nd LSB in the most significant byte of
+     * the address makes it a locally administered ethernet
+     * address
+     */
+    ethaddr_high = (rand() & 0xfeff) | 0x0200;
+    ethaddr_low = rand();
+
+    printf("%02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+	ethaddr_high >> 8, ethaddr_high & 0xff,
+	ethaddr_low >> 24, (ethaddr_low >> 16) & 0xff,
+	(ethaddr_low >> 8) & 0xff, ethaddr_low & 0xff);
+
+    return (0);
+}
diff --git a/tools/u-boot-tools/gen_ethaddr_crc b/tools/u-boot-tools/gen_ethaddr_crc
new file mode 100755
index 0000000000000000000000000000000000000000..1e9bb782f63dad5b57a4fa8980a3d40c0581f83a
Binary files /dev/null and b/tools/u-boot-tools/gen_ethaddr_crc differ
diff --git a/tools/u-boot-tools/gen_ethaddr_crc.c b/tools/u-boot-tools/gen_ethaddr_crc.c
new file mode 100644
index 0000000000000000000000000000000000000000..e73d0429a04e828b4440a07c326a420467accb59
--- /dev/null
+++ b/tools/u-boot-tools/gen_ethaddr_crc.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2016
+ * Olliver Schinagl <oliver@schinagl.nl>
+ */
+
+#include <ctype.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <u-boot/crc.h>
+
+#define ARP_HLEN 6 /* Length of hardware address */
+#define ARP_HLEN_ASCII (ARP_HLEN * 2) + (ARP_HLEN - 1) /* with separators */
+#define ARP_HLEN_LAZY (ARP_HLEN * 2) /* separatorless hardware address length */
+
+uint8_t nibble_to_hex(const char *nibble, bool lo)
+{
+	return (strtol(nibble, NULL, 16) << (lo ? 0 : 4)) & (lo ? 0x0f : 0xf0);
+}
+
+int process_mac(const char *mac_address)
+{
+	uint8_t ethaddr[ARP_HLEN + 1] = { 0x00 };
+	uint_fast8_t i = 0;
+
+	while (*mac_address != '\0') {
+		char nibble[2] = { 0x00, '\n' }; /* for strtol */
+
+		nibble[0] = *mac_address++;
+		if (isxdigit(nibble[0])) {
+			if (isupper(nibble[0]))
+				nibble[0] = tolower(nibble[0]);
+			ethaddr[i >> 1] |= nibble_to_hex(nibble, (i % 2) != 0);
+			i++;
+		}
+	}
+
+	for (i = 0; i < ARP_HLEN; i++)
+		printf("%.2x", ethaddr[i]);
+	printf("%.2x\n", crc8(0, ethaddr, ARP_HLEN));
+
+	return 0;
+}
+
+void print_usage(char *cmdname)
+{
+	printf("Usage: %s <mac_address>\n", cmdname);
+	puts("<mac_address> may be with or without separators.");
+	puts("Valid seperators are ':' and '-'.");
+	puts("<mac_address> digits are in base 16.\n");
+}
+
+int main(int argc, char *argv[])
+{
+	if (argc < 2) {
+		print_usage(argv[0]);
+		return 1;
+	}
+
+	if (!((strlen(argv[1]) == ARP_HLEN_ASCII) || (strlen(argv[1]) == ARP_HLEN_LAZY))) {
+		puts("The MAC address is not valid.\n");
+		print_usage(argv[0]);
+		return 1;
+	}
+
+	if (process_mac(argv[1])) {
+		puts("Failed to calculate the MAC's checksum.");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/gen_ethaddr_crc.o b/tools/u-boot-tools/gen_ethaddr_crc.o
new file mode 100644
index 0000000000000000000000000000000000000000..f603e643e52d4e838d000dca8b7847b7c908ec29
Binary files /dev/null and b/tools/u-boot-tools/gen_ethaddr_crc.o differ
diff --git a/tools/u-boot-tools/genboardscfg.py b/tools/u-boot-tools/genboardscfg.py
new file mode 100755
index 0000000000000000000000000000000000000000..e9bbd15e15a937686d19f4ba6eb385e37fb5369b
--- /dev/null
+++ b/tools/u-boot-tools/genboardscfg.py
@@ -0,0 +1,447 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Author: Masahiro Yamada <yamada.m@jp.panasonic.com>
+#
+
+"""
+Converter from Kconfig and MAINTAINERS to a board database.
+
+Run 'tools/genboardscfg.py' to create a board database.
+
+Run 'tools/genboardscfg.py -h' for available options.
+
+Python 2.6 or later, but not Python 3.x is necessary to run this script.
+"""
+
+import errno
+import fnmatch
+import glob
+import multiprocessing
+import optparse
+import os
+import sys
+import tempfile
+import time
+
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'buildman'))
+import kconfiglib
+
+### constant variables ###
+OUTPUT_FILE = 'boards.cfg'
+CONFIG_DIR = 'configs'
+SLEEP_TIME = 0.03
+COMMENT_BLOCK = '''#
+# List of boards
+#   Automatically generated by %s: don't edit
+#
+# Status, Arch, CPU, SoC, Vendor, Board, Target, Options, Maintainers
+
+''' % __file__
+
+### helper functions ###
+def try_remove(f):
+    """Remove a file ignoring 'No such file or directory' error."""
+    try:
+        os.remove(f)
+    except OSError as exception:
+        # Ignore 'No such file or directory' error
+        if exception.errno != errno.ENOENT:
+            raise
+
+def check_top_directory():
+    """Exit if we are not at the top of source directory."""
+    for f in ('README', 'Licenses'):
+        if not os.path.exists(f):
+            sys.exit('Please run at the top of source directory.')
+
+def output_is_new(output):
+    """Check if the output file is up to date.
+
+    Returns:
+      True if the given output file exists and is newer than any of
+      *_defconfig, MAINTAINERS and Kconfig*.  False otherwise.
+    """
+    try:
+        ctime = os.path.getctime(output)
+    except OSError as exception:
+        if exception.errno == errno.ENOENT:
+            # return False on 'No such file or directory' error
+            return False
+        else:
+            raise
+
+    for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
+        for filename in fnmatch.filter(filenames, '*_defconfig'):
+            if fnmatch.fnmatch(filename, '.*'):
+                continue
+            filepath = os.path.join(dirpath, filename)
+            if ctime < os.path.getctime(filepath):
+                return False
+
+    for (dirpath, dirnames, filenames) in os.walk('.'):
+        for filename in filenames:
+            if (fnmatch.fnmatch(filename, '*~') or
+                not fnmatch.fnmatch(filename, 'Kconfig*') and
+                not filename == 'MAINTAINERS'):
+                continue
+            filepath = os.path.join(dirpath, filename)
+            if ctime < os.path.getctime(filepath):
+                return False
+
+    # Detect a board that has been removed since the current board database
+    # was generated
+    with open(output) as f:
+        for line in f:
+            if line[0] == '#' or line == '\n':
+                continue
+            defconfig = line.split()[6] + '_defconfig'
+            if not os.path.exists(os.path.join(CONFIG_DIR, defconfig)):
+                return False
+
+    return True
+
+### classes ###
+class KconfigScanner:
+
+    """Kconfig scanner."""
+
+    ### constant variable only used in this class ###
+    _SYMBOL_TABLE = {
+        'arch' : 'SYS_ARCH',
+        'cpu' : 'SYS_CPU',
+        'soc' : 'SYS_SOC',
+        'vendor' : 'SYS_VENDOR',
+        'board' : 'SYS_BOARD',
+        'config' : 'SYS_CONFIG_NAME',
+        'options' : 'SYS_EXTRA_OPTIONS'
+    }
+
+    def __init__(self):
+        """Scan all the Kconfig files and create a Config object."""
+        # Define environment variables referenced from Kconfig
+        os.environ['srctree'] = os.getcwd()
+        os.environ['UBOOTVERSION'] = 'dummy'
+        os.environ['KCONFIG_OBJDIR'] = ''
+        self._conf = kconfiglib.Config(print_warnings=False)
+
+    def __del__(self):
+        """Delete a leftover temporary file before exit.
+
+        The scan() method of this class creates a temporay file and deletes
+        it on success.  If scan() method throws an exception on the way,
+        the temporary file might be left over.  In that case, it should be
+        deleted in this destructor.
+        """
+        if hasattr(self, '_tmpfile') and self._tmpfile:
+            try_remove(self._tmpfile)
+
+    def scan(self, defconfig):
+        """Load a defconfig file to obtain board parameters.
+
+        Arguments:
+          defconfig: path to the defconfig file to be processed
+
+        Returns:
+          A dictionary of board parameters.  It has a form of:
+          {
+              'arch': <arch_name>,
+              'cpu': <cpu_name>,
+              'soc': <soc_name>,
+              'vendor': <vendor_name>,
+              'board': <board_name>,
+              'target': <target_name>,
+              'config': <config_header_name>,
+              'options': <extra_options>
+          }
+        """
+        # strip special prefixes and save it in a temporary file
+        fd, self._tmpfile = tempfile.mkstemp()
+        with os.fdopen(fd, 'w') as f:
+            for line in open(defconfig):
+                colon = line.find(':CONFIG_')
+                if colon == -1:
+                    f.write(line)
+                else:
+                    f.write(line[colon + 1:])
+
+        warnings = self._conf.load_config(self._tmpfile)
+        if warnings:
+            for warning in warnings:
+                print '%s: %s' % (defconfig, warning)
+
+        try_remove(self._tmpfile)
+        self._tmpfile = None
+
+        params = {}
+
+        # Get the value of CONFIG_SYS_ARCH, CONFIG_SYS_CPU, ... etc.
+        # Set '-' if the value is empty.
+        for key, symbol in self._SYMBOL_TABLE.items():
+            value = self._conf.get_symbol(symbol).get_value()
+            if value:
+                params[key] = value
+            else:
+                params[key] = '-'
+
+        defconfig = os.path.basename(defconfig)
+        params['target'], match, rear = defconfig.partition('_defconfig')
+        assert match and not rear, '%s : invalid defconfig' % defconfig
+
+        # fix-up for aarch64
+        if params['arch'] == 'arm' and params['cpu'] == 'armv8':
+            params['arch'] = 'aarch64'
+
+        # fix-up options field. It should have the form:
+        # <config name>[:comma separated config options]
+        if params['options'] != '-':
+            params['options'] = params['config'] + ':' + \
+                                params['options'].replace(r'\"', '"')
+        elif params['config'] != params['target']:
+            params['options'] = params['config']
+
+        return params
+
+def scan_defconfigs_for_multiprocess(queue, defconfigs):
+    """Scan defconfig files and queue their board parameters
+
+    This function is intended to be passed to
+    multiprocessing.Process() constructor.
+
+    Arguments:
+      queue: An instance of multiprocessing.Queue().
+             The resulting board parameters are written into it.
+      defconfigs: A sequence of defconfig files to be scanned.
+    """
+    kconf_scanner = KconfigScanner()
+    for defconfig in defconfigs:
+        queue.put(kconf_scanner.scan(defconfig))
+
+def read_queues(queues, params_list):
+    """Read the queues and append the data to the paramers list"""
+    for q in queues:
+        while not q.empty():
+            params_list.append(q.get())
+
+def scan_defconfigs(jobs=1):
+    """Collect board parameters for all defconfig files.
+
+    This function invokes multiple processes for faster processing.
+
+    Arguments:
+      jobs: The number of jobs to run simultaneously
+    """
+    all_defconfigs = []
+    for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
+        for filename in fnmatch.filter(filenames, '*_defconfig'):
+            if fnmatch.fnmatch(filename, '.*'):
+                continue
+            all_defconfigs.append(os.path.join(dirpath, filename))
+
+    total_boards = len(all_defconfigs)
+    processes = []
+    queues = []
+    for i in range(jobs):
+        defconfigs = all_defconfigs[total_boards * i / jobs :
+                                    total_boards * (i + 1) / jobs]
+        q = multiprocessing.Queue(maxsize=-1)
+        p = multiprocessing.Process(target=scan_defconfigs_for_multiprocess,
+                                    args=(q, defconfigs))
+        p.start()
+        processes.append(p)
+        queues.append(q)
+
+    # The resulting data should be accumulated to this list
+    params_list = []
+
+    # Data in the queues should be retrieved preriodically.
+    # Otherwise, the queues would become full and subprocesses would get stuck.
+    while any([p.is_alive() for p in processes]):
+        read_queues(queues, params_list)
+        # sleep for a while until the queues are filled
+        time.sleep(SLEEP_TIME)
+
+    # Joining subprocesses just in case
+    # (All subprocesses should already have been finished)
+    for p in processes:
+        p.join()
+
+    # retrieve leftover data
+    read_queues(queues, params_list)
+
+    return params_list
+
+class MaintainersDatabase:
+
+    """The database of board status and maintainers."""
+
+    def __init__(self):
+        """Create an empty database."""
+        self.database = {}
+
+    def get_status(self, target):
+        """Return the status of the given board.
+
+        The board status is generally either 'Active' or 'Orphan'.
+        Display a warning message and return '-' if status information
+        is not found.
+
+        Returns:
+          'Active', 'Orphan' or '-'.
+        """
+        if not target in self.database:
+            print >> sys.stderr, "WARNING: no status info for '%s'" % target
+            return '-'
+
+        tmp = self.database[target][0]
+        if tmp.startswith('Maintained'):
+            return 'Active'
+        elif tmp.startswith('Supported'):
+            return 'Active'
+        elif tmp.startswith('Orphan'):
+            return 'Orphan'
+        else:
+            print >> sys.stderr, ("WARNING: %s: unknown status for '%s'" %
+                                  (tmp, target))
+            return '-'
+
+    def get_maintainers(self, target):
+        """Return the maintainers of the given board.
+
+        Returns:
+          Maintainers of the board.  If the board has two or more maintainers,
+          they are separated with colons.
+        """
+        if not target in self.database:
+            print >> sys.stderr, "WARNING: no maintainers for '%s'" % target
+            return ''
+
+        return ':'.join(self.database[target][1])
+
+    def parse_file(self, file):
+        """Parse a MAINTAINERS file.
+
+        Parse a MAINTAINERS file and accumulates board status and
+        maintainers information.
+
+        Arguments:
+          file: MAINTAINERS file to be parsed
+        """
+        targets = []
+        maintainers = []
+        status = '-'
+        for line in open(file):
+            # Check also commented maintainers
+            if line[:3] == '#M:':
+                line = line[1:]
+            tag, rest = line[:2], line[2:].strip()
+            if tag == 'M:':
+                maintainers.append(rest)
+            elif tag == 'F:':
+                # expand wildcard and filter by 'configs/*_defconfig'
+                for f in glob.glob(rest):
+                    front, match, rear = f.partition('configs/')
+                    if not front and match:
+                        front, match, rear = rear.rpartition('_defconfig')
+                        if match and not rear:
+                            targets.append(front)
+            elif tag == 'S:':
+                status = rest
+            elif line == '\n':
+                for target in targets:
+                    self.database[target] = (status, maintainers)
+                targets = []
+                maintainers = []
+                status = '-'
+        if targets:
+            for target in targets:
+                self.database[target] = (status, maintainers)
+
+def insert_maintainers_info(params_list):
+    """Add Status and Maintainers information to the board parameters list.
+
+    Arguments:
+      params_list: A list of the board parameters
+    """
+    database = MaintainersDatabase()
+    for (dirpath, dirnames, filenames) in os.walk('.'):
+        if 'MAINTAINERS' in filenames:
+            database.parse_file(os.path.join(dirpath, 'MAINTAINERS'))
+
+    for i, params in enumerate(params_list):
+        target = params['target']
+        params['status'] = database.get_status(target)
+        params['maintainers'] = database.get_maintainers(target)
+        params_list[i] = params
+
+def format_and_output(params_list, output):
+    """Write board parameters into a file.
+
+    Columnate the board parameters, sort lines alphabetically,
+    and then write them to a file.
+
+    Arguments:
+      params_list: The list of board parameters
+      output: The path to the output file
+    """
+    FIELDS = ('status', 'arch', 'cpu', 'soc', 'vendor', 'board', 'target',
+              'options', 'maintainers')
+
+    # First, decide the width of each column
+    max_length = dict([ (f, 0) for f in FIELDS])
+    for params in params_list:
+        for f in FIELDS:
+            max_length[f] = max(max_length[f], len(params[f]))
+
+    output_lines = []
+    for params in params_list:
+        line = ''
+        for f in FIELDS:
+            # insert two spaces between fields like column -t would
+            line += '  ' + params[f].ljust(max_length[f])
+        output_lines.append(line.strip())
+
+    # ignore case when sorting
+    output_lines.sort(key=str.lower)
+
+    with open(output, 'w') as f:
+        f.write(COMMENT_BLOCK + '\n'.join(output_lines) + '\n')
+
+def gen_boards_cfg(output, jobs=1, force=False):
+    """Generate a board database file.
+
+    Arguments:
+      output: The name of the output file
+      jobs: The number of jobs to run simultaneously
+      force: Force to generate the output even if it is new
+    """
+    check_top_directory()
+
+    if not force and output_is_new(output):
+        print "%s is up to date. Nothing to do." % output
+        sys.exit(0)
+
+    params_list = scan_defconfigs(jobs)
+    insert_maintainers_info(params_list)
+    format_and_output(params_list, output)
+
+def main():
+    try:
+        cpu_count = multiprocessing.cpu_count()
+    except NotImplementedError:
+        cpu_count = 1
+
+    parser = optparse.OptionParser()
+    # Add options here
+    parser.add_option('-f', '--force', action="store_true", default=False,
+                      help='regenerate the output even if it is new')
+    parser.add_option('-j', '--jobs', type='int', default=cpu_count,
+                      help='the number of jobs to run simultaneously')
+    parser.add_option('-o', '--output', default=OUTPUT_FILE,
+                      help='output file [default=%s]' % OUTPUT_FILE)
+    (options, args) = parser.parse_args()
+
+    gen_boards_cfg(options.output, jobs=options.jobs, force=options.force)
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/u-boot-tools/getline.c b/tools/u-boot-tools/getline.c
new file mode 100644
index 0000000000000000000000000000000000000000..64f1260099ca5990f914fe7ad06f0ab1fd5dc5cf
--- /dev/null
+++ b/tools/u-boot-tools/getline.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* getline.c -- Replacement for GNU C library function getline
+ *
+ * Copyright (C) 1993, 1996, 2001, 2002 Free Software Foundation, Inc.
+ */
+
+/* Written by Jan Brittenson, bson@gnu.ai.mit.edu.  */
+
+#include <assert.h>
+#include <stdio.h>
+
+/* Always add at least this many bytes when extending the buffer.  */
+#define MIN_CHUNK 64
+
+/* Read up to (and including) a TERMINATOR from STREAM into *LINEPTR
+   + OFFSET (and null-terminate it). *LINEPTR is a pointer returned from
+   malloc (or NULL), pointing to *N characters of space.  It is realloc'd
+   as necessary.  Return the number of characters read (not including the
+   null terminator), or -1 on error or EOF.
+   NOTE: There is another getstr() function declared in <curses.h>.  */
+static int getstr(char **lineptr, size_t *n, FILE *stream,
+		  char terminator, size_t offset)
+{
+	int nchars_avail;	/* Allocated but unused chars in *LINEPTR.  */
+	char *read_pos;		/* Where we're reading into *LINEPTR. */
+	int ret;
+
+	if (!lineptr || !n || !stream)
+		return -1;
+
+	if (!*lineptr) {
+		*n = MIN_CHUNK;
+		*lineptr = malloc(*n);
+		if (!*lineptr)
+			return -1;
+	}
+
+	nchars_avail = *n - offset;
+	read_pos = *lineptr + offset;
+
+	for (;;) {
+		register int c = getc(stream);
+
+		/* We always want at least one char left in the buffer, since we
+		   always (unless we get an error while reading the first char)
+		   NUL-terminate the line buffer.  */
+
+		assert(*n - nchars_avail == read_pos - *lineptr);
+		if (nchars_avail < 2) {
+			if (*n > MIN_CHUNK)
+				*n *= 2;
+			else
+				*n += MIN_CHUNK;
+
+			nchars_avail = *n + *lineptr - read_pos;
+			*lineptr = realloc(*lineptr, *n);
+			if (!*lineptr)
+				return -1;
+			read_pos = *n - nchars_avail + *lineptr;
+			assert(*n - nchars_avail == read_pos - *lineptr);
+		}
+
+		if (c == EOF || ferror (stream)) {
+			/* Return partial line, if any.  */
+			if (read_pos == *lineptr)
+				return -1;
+			else
+				break;
+		}
+
+		*read_pos++ = c;
+		nchars_avail--;
+
+		if (c == terminator)
+			/* Return the line.  */
+			break;
+	}
+
+	/* Done - NUL terminate and return the number of chars read.  */
+	*read_pos = '\0';
+
+	ret = read_pos - (*lineptr + offset);
+	return ret;
+}
+
+int getline (char **lineptr, size_t *n, FILE *stream)
+{
+	return getstr(lineptr, n, stream, '\n', 0);
+}
diff --git a/tools/u-boot-tools/getline.h b/tools/u-boot-tools/getline.h
new file mode 100644
index 0000000000000000000000000000000000000000..a2f35b92b2cff9cfc1f2cd091cd2355aa291efde
--- /dev/null
+++ b/tools/u-boot-tools/getline.h
@@ -0,0 +1 @@
+int getline(char **lineptr, size_t *n, FILE *stream);
diff --git a/tools/u-boot-tools/gpheader.h b/tools/u-boot-tools/gpheader.h
new file mode 100644
index 0000000000000000000000000000000000000000..d5bf86e5893190e77d6e1b3b6b1dbe4ea3175c1f
--- /dev/null
+++ b/tools/u-boot-tools/gpheader.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2014
+ * Texas Instruments Incorporated
+ * Refactored common functions in to gpimage-common.c. Include this common
+ * header file
+ *
+ * (C) Copyright 2010
+ * Linaro LTD, www.linaro.org
+ * Author: John Rigby <john.rigby@linaro.org>
+ * Based on TI's signGP.c
+ *
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#ifndef _GPIMAGE_H_
+#define _GPIMAGE_H_
+
+/* common headers for gpimage and omapimage formats */
+struct gp_header {
+	uint32_t size;
+	uint32_t load_addr;
+};
+#define GPIMAGE_HDR_SIZE (sizeof(struct gp_header))
+
+/* common functions across gpimage and omapimage handlers */
+int valid_gph_size(uint32_t size);
+int valid_gph_load_addr(uint32_t load_addr);
+int gph_verify_header(struct gp_header *gph, int be);
+void gph_print_header(const struct gp_header *gph, int be);
+void gph_set_header(struct gp_header *gph, uint32_t size, uint32_t load_addr,
+			int be);
+int gpimage_check_params(struct image_tool_params *params);
+#endif
diff --git a/tools/u-boot-tools/gpimage-common.c b/tools/u-boot-tools/gpimage-common.c
new file mode 100644
index 0000000000000000000000000000000000000000..fc6406b9133723f60de44b82e3b9f201300f51e4
--- /dev/null
+++ b/tools/u-boot-tools/gpimage-common.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * Texas Instruments Incorporated
+ * Refactored common functions in to gpimage-common.c.
+ *
+ * (C) Copyright 2010
+ * Linaro LTD, www.linaro.org
+ * Author: John Rigby <john.rigby@linaro.org>
+ * Based on TI's signGP.c
+ *
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include "imagetool.h"
+#include <compiler.h>
+#include <image.h>
+#include "gpheader.h"
+
+/* Helper to convert size and load_addr to big endian */
+void to_be32(uint32_t *gph_size, uint32_t *gph_load_addr)
+{
+	*gph_size = cpu_to_be32(*gph_size);
+	*gph_load_addr = cpu_to_be32(*gph_load_addr);
+}
+
+int gph_verify_header(struct gp_header *gph, int be)
+{
+	uint32_t gph_size = gph->size;
+	uint32_t gph_load_addr = gph->load_addr;
+
+	if (be)
+		to_be32(&gph_size, &gph_load_addr);
+
+	if (!gph_size || !gph_load_addr)
+		return -1;
+
+	return 0;
+}
+
+void gph_print_header(const struct gp_header *gph, int be)
+{
+	uint32_t gph_size = gph->size, gph_load_addr = gph->load_addr;
+
+	if (be)
+		to_be32(&gph_size, &gph_load_addr);
+
+	if (!gph_size) {
+		fprintf(stderr, "Error: invalid image size %x\n", gph_size);
+		exit(EXIT_FAILURE);
+	}
+
+	if (!gph_load_addr) {
+		fprintf(stderr, "Error: invalid image load address %x\n",
+			gph_load_addr);
+		exit(EXIT_FAILURE);
+	}
+	printf("GP Header: Size %x LoadAddr %x\n", gph_size, gph_load_addr);
+}
+
+void gph_set_header(struct gp_header *gph, uint32_t size, uint32_t load_addr,
+	int be)
+{
+	gph->size = size;
+	gph->load_addr = load_addr;
+	if (be)
+		to_be32(&gph->size, &gph->load_addr);
+}
+
+int gpimage_check_params(struct image_tool_params *params)
+{
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag));
+}
diff --git a/tools/u-boot-tools/gpimage-common.o b/tools/u-boot-tools/gpimage-common.o
new file mode 100644
index 0000000000000000000000000000000000000000..709b0fbe1f4e5f6fb287dd4cabc1320c14fef0f8
Binary files /dev/null and b/tools/u-boot-tools/gpimage-common.o differ
diff --git a/tools/u-boot-tools/gpimage.c b/tools/u-boot-tools/gpimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..27de4cfaed77e2db0f852688c3e52fffe960e780
--- /dev/null
+++ b/tools/u-boot-tools/gpimage.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * Texas Instruments Incorporated
+ * Add gpimage format for keystone devices to format spl image. This is
+ * Based on omapimage.c
+ *
+ * (C) Copyright 2010
+ * Linaro LTD, www.linaro.org
+ * Author: John Rigby <john.rigby@linaro.org>
+ * Based on TI's signGP.c
+ *
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include "imagetool.h"
+#include <compiler.h>
+#include <image.h>
+#include "gpheader.h"
+
+static uint8_t gpimage_header[GPIMAGE_HDR_SIZE];
+
+/* to be in keystone gpimage */
+static int gpimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_GPIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static int gpimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct gp_header *gph = (struct gp_header *)ptr;
+
+	return gph_verify_header(gph, 1);
+}
+
+static void gpimage_print_header(const void *ptr)
+{
+	const struct gp_header *gph = (struct gp_header *)ptr;
+
+	gph_print_header(gph, 1);
+}
+
+static void gpimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	struct gp_header *gph = (struct gp_header *)ptr;
+
+	gph_set_header(gph, sbuf->st_size - GPIMAGE_HDR_SIZE, params->addr, 1);
+}
+
+/*
+ * gpimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	gpimage,
+	"TI KeyStone GP Image support",
+	GPIMAGE_HDR_SIZE,
+	(void *)&gpimage_header,
+	gpimage_check_params,
+	gpimage_verify_header,
+	gpimage_print_header,
+	gpimage_set_header,
+	NULL,
+	gpimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/gpimage.o b/tools/u-boot-tools/gpimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..6fd9defc342ddfda4e0869fa0ee257cd31f35db7
Binary files /dev/null and b/tools/u-boot-tools/gpimage.o differ
diff --git a/tools/u-boot-tools/ifdtool.c b/tools/u-boot-tools/ifdtool.c
new file mode 100644
index 0000000000000000000000000000000000000000..3a39b7bc701194e6bf1e9b0cb767c2feb7abca39
--- /dev/null
+++ b/tools/u-boot-tools/ifdtool.c
@@ -0,0 +1,1109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ifdtool - Manage Intel Firmware Descriptor information
+ *
+ * Copyright 2014 Google, Inc
+ *
+ * From Coreboot project, but it got a serious code clean-up
+ * and a few new features
+ */
+
+#include <assert.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <linux/libfdt.h>
+#include "ifdtool.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define debug(fmt, args...)	printf(fmt, ##args)
+#else
+#define debug(fmt, args...)
+#endif
+
+#define FD_SIGNATURE		0x0FF0A55A
+#define FLREG_BASE(reg)		((reg & 0x00000fff) << 12);
+#define FLREG_LIMIT(reg)	(((reg & 0x0fff0000) >> 4) | 0xfff);
+
+struct input_file {
+	char *fname;
+	unsigned int addr;
+};
+
+/**
+ * find_fd() - Find the flash description in the ROM image
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ * @return pointer to structure, or NULL if not found
+ */
+static struct fdbar_t *find_fd(char *image, int size)
+{
+	uint32_t *ptr, *end;
+
+	/* Scan for FD signature */
+	for (ptr = (uint32_t *)image, end = ptr + size / 4; ptr < end; ptr++) {
+		if (*ptr == FD_SIGNATURE)
+			break;
+	}
+
+	if (ptr == end) {
+		printf("No Flash Descriptor found in this image\n");
+		return NULL;
+	}
+
+	debug("Found Flash Descriptor signature at 0x%08lx\n",
+	      (char *)ptr - image);
+
+	return (struct fdbar_t *)ptr;
+}
+
+/**
+ * get_region() - Get information about the selected region
+ *
+ * @frba:		Flash region list
+ * @region_type:	Type of region (0..MAX_REGIONS-1)
+ * @region:		Region information is written here
+ * @return 0 if OK, else -ve
+ */
+static int get_region(struct frba_t *frba, int region_type,
+		      struct region_t *region)
+{
+	if (region_type >= MAX_REGIONS) {
+		fprintf(stderr, "Invalid region type.\n");
+		return -1;
+	}
+
+	region->base = FLREG_BASE(frba->flreg[region_type]);
+	region->limit = FLREG_LIMIT(frba->flreg[region_type]);
+	region->size = region->limit - region->base + 1;
+
+	return 0;
+}
+
+static const char *region_name(int region_type)
+{
+	static const char *const regions[] = {
+		"Flash Descriptor",
+		"BIOS",
+		"Intel ME",
+		"GbE",
+		"Platform Data"
+	};
+
+	assert(region_type < MAX_REGIONS);
+
+	return regions[region_type];
+}
+
+static const char *region_filename(int region_type)
+{
+	static const char *const region_filenames[] = {
+		"flashregion_0_flashdescriptor.bin",
+		"flashregion_1_bios.bin",
+		"flashregion_2_intel_me.bin",
+		"flashregion_3_gbe.bin",
+		"flashregion_4_platform_data.bin"
+	};
+
+	assert(region_type < MAX_REGIONS);
+
+	return region_filenames[region_type];
+}
+
+static int dump_region(int num, struct frba_t *frba)
+{
+	struct region_t region;
+	int ret;
+
+	ret = get_region(frba, num, &region);
+	if (ret)
+		return ret;
+
+	printf("  Flash Region %d (%s): %08x - %08x %s\n",
+	       num, region_name(num), region.base, region.limit,
+	       region.size < 1 ? "(unused)" : "");
+
+	return ret;
+}
+
+static void dump_frba(struct frba_t *frba)
+{
+	int i;
+
+	printf("Found Region Section\n");
+	for (i = 0; i < MAX_REGIONS; i++) {
+		printf("FLREG%d:    0x%08x\n", i, frba->flreg[i]);
+		dump_region(i, frba);
+	}
+}
+
+static void decode_spi_frequency(unsigned int freq)
+{
+	switch (freq) {
+	case SPI_FREQUENCY_20MHZ:
+		printf("20MHz");
+		break;
+	case SPI_FREQUENCY_33MHZ:
+		printf("33MHz");
+		break;
+	case SPI_FREQUENCY_50MHZ:
+		printf("50MHz");
+		break;
+	default:
+		printf("unknown<%x>MHz", freq);
+	}
+}
+
+static void decode_component_density(unsigned int density)
+{
+	switch (density) {
+	case COMPONENT_DENSITY_512KB:
+		printf("512KiB");
+		break;
+	case COMPONENT_DENSITY_1MB:
+		printf("1MiB");
+		break;
+	case COMPONENT_DENSITY_2MB:
+		printf("2MiB");
+		break;
+	case COMPONENT_DENSITY_4MB:
+		printf("4MiB");
+		break;
+	case COMPONENT_DENSITY_8MB:
+		printf("8MiB");
+		break;
+	case COMPONENT_DENSITY_16MB:
+		printf("16MiB");
+		break;
+	default:
+		printf("unknown<%x>MiB", density);
+	}
+}
+
+static void dump_fcba(struct fcba_t *fcba)
+{
+	printf("\nFound Component Section\n");
+	printf("FLCOMP     0x%08x\n", fcba->flcomp);
+	printf("  Dual Output Fast Read Support:       %ssupported\n",
+	       (fcba->flcomp & (1 << 30)) ? "" : "not ");
+	printf("  Read ID/Read Status Clock Frequency: ");
+	decode_spi_frequency((fcba->flcomp >> 27) & 7);
+	printf("\n  Write/Erase Clock Frequency:         ");
+	decode_spi_frequency((fcba->flcomp >> 24) & 7);
+	printf("\n  Fast Read Clock Frequency:           ");
+	decode_spi_frequency((fcba->flcomp >> 21) & 7);
+	printf("\n  Fast Read Support:                   %ssupported",
+	       (fcba->flcomp & (1 << 20)) ? "" : "not ");
+	printf("\n  Read Clock Frequency:                ");
+	decode_spi_frequency((fcba->flcomp >> 17) & 7);
+	printf("\n  Component 2 Density:                 ");
+	decode_component_density((fcba->flcomp >> 3) & 7);
+	printf("\n  Component 1 Density:                 ");
+	decode_component_density(fcba->flcomp & 7);
+	printf("\n");
+	printf("FLILL      0x%08x\n", fcba->flill);
+	printf("  Invalid Instruction 3: 0x%02x\n",
+	       (fcba->flill >> 24) & 0xff);
+	printf("  Invalid Instruction 2: 0x%02x\n",
+	       (fcba->flill >> 16) & 0xff);
+	printf("  Invalid Instruction 1: 0x%02x\n",
+	       (fcba->flill >> 8) & 0xff);
+	printf("  Invalid Instruction 0: 0x%02x\n",
+	       fcba->flill & 0xff);
+	printf("FLPB       0x%08x\n", fcba->flpb);
+	printf("  Flash Partition Boundary Address: 0x%06x\n\n",
+	       (fcba->flpb & 0xfff) << 12);
+}
+
+static void dump_fpsba(struct fpsba_t *fpsba)
+{
+	int i;
+
+	printf("Found PCH Strap Section\n");
+	for (i = 0; i < MAX_STRAPS; i++)
+		printf("PCHSTRP%-2d:  0x%08x\n", i, fpsba->pchstrp[i]);
+}
+
+static const char *get_enabled(int flag)
+{
+	return flag ? "enabled" : "disabled";
+}
+
+static void decode_flmstr(uint32_t flmstr)
+{
+	printf("  Platform Data Region Write Access: %s\n",
+	       get_enabled(flmstr & (1 << 28)));
+	printf("  GbE Region Write Access:           %s\n",
+	       get_enabled(flmstr & (1 << 27)));
+	printf("  Intel ME Region Write Access:      %s\n",
+	       get_enabled(flmstr & (1 << 26)));
+	printf("  Host CPU/BIOS Region Write Access: %s\n",
+	       get_enabled(flmstr & (1 << 25)));
+	printf("  Flash Descriptor Write Access:     %s\n",
+	       get_enabled(flmstr & (1 << 24)));
+
+	printf("  Platform Data Region Read Access:  %s\n",
+	       get_enabled(flmstr & (1 << 20)));
+	printf("  GbE Region Read Access:            %s\n",
+	       get_enabled(flmstr & (1 << 19)));
+	printf("  Intel ME Region Read Access:       %s\n",
+	       get_enabled(flmstr & (1 << 18)));
+	printf("  Host CPU/BIOS Region Read Access:  %s\n",
+	       get_enabled(flmstr & (1 << 17)));
+	printf("  Flash Descriptor Read Access:      %s\n",
+	       get_enabled(flmstr & (1 << 16)));
+
+	printf("  Requester ID:                      0x%04x\n\n",
+	       flmstr & 0xffff);
+}
+
+static void dump_fmba(struct fmba_t *fmba)
+{
+	printf("Found Master Section\n");
+	printf("FLMSTR1:   0x%08x (Host CPU/BIOS)\n", fmba->flmstr1);
+	decode_flmstr(fmba->flmstr1);
+	printf("FLMSTR2:   0x%08x (Intel ME)\n", fmba->flmstr2);
+	decode_flmstr(fmba->flmstr2);
+	printf("FLMSTR3:   0x%08x (GbE)\n", fmba->flmstr3);
+	decode_flmstr(fmba->flmstr3);
+}
+
+static void dump_fmsba(struct fmsba_t *fmsba)
+{
+	int i;
+
+	printf("Found Processor Strap Section\n");
+	for (i = 0; i < 4; i++)
+		printf("????:      0x%08x\n", fmsba->data[0]);
+}
+
+static void dump_jid(uint32_t jid)
+{
+	printf("    SPI Component Device ID 1:          0x%02x\n",
+	       (jid >> 16) & 0xff);
+	printf("    SPI Component Device ID 0:          0x%02x\n",
+	       (jid >> 8) & 0xff);
+	printf("    SPI Component Vendor ID:            0x%02x\n",
+	       jid & 0xff);
+}
+
+static void dump_vscc(uint32_t vscc)
+{
+	printf("    Lower Erase Opcode:                 0x%02x\n",
+	       vscc >> 24);
+	printf("    Lower Write Enable on Write Status: 0x%02x\n",
+	       vscc & (1 << 20) ? 0x06 : 0x50);
+	printf("    Lower Write Status Required:        %s\n",
+	       vscc & (1 << 19) ? "Yes" : "No");
+	printf("    Lower Write Granularity:            %d bytes\n",
+	       vscc & (1 << 18) ? 64 : 1);
+	printf("    Lower Block / Sector Erase Size:    ");
+	switch ((vscc >> 16) & 0x3) {
+	case 0:
+		printf("256 Byte\n");
+		break;
+	case 1:
+		printf("4KB\n");
+		break;
+	case 2:
+		printf("8KB\n");
+		break;
+	case 3:
+		printf("64KB\n");
+		break;
+	}
+
+	printf("    Upper Erase Opcode:                 0x%02x\n",
+	       (vscc >> 8) & 0xff);
+	printf("    Upper Write Enable on Write Status: 0x%02x\n",
+	       vscc & (1 << 4) ? 0x06 : 0x50);
+	printf("    Upper Write Status Required:        %s\n",
+	       vscc & (1 << 3) ? "Yes" : "No");
+	printf("    Upper Write Granularity:            %d bytes\n",
+	       vscc & (1 << 2) ? 64 : 1);
+	printf("    Upper Block / Sector Erase Size:    ");
+	switch (vscc & 0x3) {
+	case 0:
+		printf("256 Byte\n");
+		break;
+	case 1:
+		printf("4KB\n");
+		break;
+	case 2:
+		printf("8KB\n");
+		break;
+	case 3:
+		printf("64KB\n");
+		break;
+	}
+}
+
+static void dump_vtba(struct vtba_t *vtba, int vtl)
+{
+	int i;
+	int num = (vtl >> 1) < 8 ? (vtl >> 1) : 8;
+
+	printf("ME VSCC table:\n");
+	for (i = 0; i < num; i++) {
+		printf("  JID%d:  0x%08x\n", i, vtba->entry[i].jid);
+		dump_jid(vtba->entry[i].jid);
+		printf("  VSCC%d: 0x%08x\n", i, vtba->entry[i].vscc);
+		dump_vscc(vtba->entry[i].vscc);
+	}
+	printf("\n");
+}
+
+static void dump_oem(uint8_t *oem)
+{
+	int i, j;
+	printf("OEM Section:\n");
+	for (i = 0; i < 4; i++) {
+		printf("%02x:", i << 4);
+		for (j = 0; j < 16; j++)
+			printf(" %02x", oem[(i<<4)+j]);
+		printf("\n");
+	}
+	printf("\n");
+}
+
+/**
+ * dump_fd() - Display a dump of the full flash description
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ * @return 0 if OK, -1 on error
+ */
+static int dump_fd(char *image, int size)
+{
+	struct fdbar_t *fdb = find_fd(image, size);
+
+	if (!fdb)
+		return -1;
+
+	printf("FLMAP0:    0x%08x\n", fdb->flmap0);
+	printf("  NR:      %d\n", (fdb->flmap0 >> 24) & 7);
+	printf("  FRBA:    0x%x\n", ((fdb->flmap0 >> 16) & 0xff) << 4);
+	printf("  NC:      %d\n", ((fdb->flmap0 >> 8) & 3) + 1);
+	printf("  FCBA:    0x%x\n", ((fdb->flmap0) & 0xff) << 4);
+
+	printf("FLMAP1:    0x%08x\n", fdb->flmap1);
+	printf("  ISL:     0x%02x\n", (fdb->flmap1 >> 24) & 0xff);
+	printf("  FPSBA:   0x%x\n", ((fdb->flmap1 >> 16) & 0xff) << 4);
+	printf("  NM:      %d\n", (fdb->flmap1 >> 8) & 3);
+	printf("  FMBA:    0x%x\n", ((fdb->flmap1) & 0xff) << 4);
+
+	printf("FLMAP2:    0x%08x\n", fdb->flmap2);
+	printf("  PSL:     0x%04x\n", (fdb->flmap2 >> 8) & 0xffff);
+	printf("  FMSBA:   0x%x\n", ((fdb->flmap2) & 0xff) << 4);
+
+	printf("FLUMAP1:   0x%08x\n", fdb->flumap1);
+	printf("  Intel ME VSCC Table Length (VTL):        %d\n",
+	       (fdb->flumap1 >> 8) & 0xff);
+	printf("  Intel ME VSCC Table Base Address (VTBA): 0x%06x\n\n",
+	       (fdb->flumap1 & 0xff) << 4);
+	dump_vtba((struct vtba_t *)
+			(image + ((fdb->flumap1 & 0xff) << 4)),
+			(fdb->flumap1 >> 8) & 0xff);
+	dump_oem((uint8_t *)image + 0xf00);
+	dump_frba((struct frba_t *)(image + (((fdb->flmap0 >> 16) & 0xff)
+			<< 4)));
+	dump_fcba((struct fcba_t *)(image + (((fdb->flmap0) & 0xff) << 4)));
+	dump_fpsba((struct fpsba_t *)
+			(image + (((fdb->flmap1 >> 16) & 0xff) << 4)));
+	dump_fmba((struct fmba_t *)(image + (((fdb->flmap1) & 0xff) << 4)));
+	dump_fmsba((struct fmsba_t *)(image + (((fdb->flmap2) & 0xff) << 4)));
+
+	return 0;
+}
+
+/**
+ * write_regions() - Write each region from an image to its own file
+ *
+ * The filename to use in each case is fixed - see region_filename()
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ * @return 0 if OK, -ve on error
+ */
+static int write_regions(char *image, int size)
+{
+	struct fdbar_t *fdb;
+	struct frba_t *frba;
+	int ret = 0;
+	int i;
+
+	fdb =  find_fd(image, size);
+	if (!fdb)
+		return -1;
+
+	frba = (struct frba_t *)(image + (((fdb->flmap0 >> 16) & 0xff) << 4));
+
+	for (i = 0; i < MAX_REGIONS; i++) {
+		struct region_t region;
+		int region_fd;
+
+		ret = get_region(frba, i, &region);
+		if (ret)
+			return ret;
+		dump_region(i, frba);
+		if (region.size <= 0)
+			continue;
+		region_fd = open(region_filename(i),
+				 O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR |
+				 S_IWUSR | S_IRGRP | S_IROTH);
+		if (write(region_fd, image + region.base, region.size) !=
+				region.size) {
+			perror("Error while writing");
+			ret = -1;
+		}
+		close(region_fd);
+	}
+
+	return ret;
+}
+
+static int perror_fname(const char *fmt, const char *fname)
+{
+	char msg[strlen(fmt) + strlen(fname) + 1];
+
+	sprintf(msg, fmt, fname);
+	perror(msg);
+
+	return -1;
+}
+
+/**
+ * write_image() - Write the image to a file
+ *
+ * @filename:	Filename to use for the image
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ * @return 0 if OK, -ve on error
+ */
+static int write_image(char *filename, char *image, int size)
+{
+	int new_fd;
+
+	debug("Writing new image to %s\n", filename);
+
+	new_fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR |
+		      S_IWUSR | S_IRGRP | S_IROTH);
+	if (new_fd < 0)
+		return perror_fname("Could not open file '%s'", filename);
+	if (write(new_fd, image, size) != size)
+		return perror_fname("Could not write file '%s'", filename);
+	close(new_fd);
+
+	return 0;
+}
+
+/**
+ * set_spi_frequency() - Set the SPI frequency to use when booting
+ *
+ * Several frequencies are supported, some of which work with fast devices.
+ * For SPI emulators, the slowest (SPI_FREQUENCY_20MHZ) is often used. The
+ * Intel boot system uses this information somehow on boot.
+ *
+ * The image is updated with the supplied value
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ * @freq:	SPI frequency to use
+ */
+static void set_spi_frequency(char *image, int size, enum spi_frequency freq)
+{
+	struct fdbar_t *fdb = find_fd(image, size);
+	struct fcba_t *fcba;
+
+	fcba = (struct fcba_t *)(image + (((fdb->flmap0) & 0xff) << 4));
+
+	/* clear bits 21-29 */
+	fcba->flcomp &= ~0x3fe00000;
+	/* Read ID and Read Status Clock Frequency */
+	fcba->flcomp |= freq << 27;
+	/* Write and Erase Clock Frequency */
+	fcba->flcomp |= freq << 24;
+	/* Fast Read Clock Frequency */
+	fcba->flcomp |= freq << 21;
+}
+
+/**
+ * set_em100_mode() - Set a SPI frequency that will work with Dediprog EM100
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ */
+static void set_em100_mode(char *image, int size)
+{
+	struct fdbar_t *fdb = find_fd(image, size);
+	struct fcba_t *fcba;
+
+	fcba = (struct fcba_t *)(image + (((fdb->flmap0) & 0xff) << 4));
+	fcba->flcomp &= ~(1 << 30);
+	set_spi_frequency(image, size, SPI_FREQUENCY_20MHZ);
+}
+
+/**
+ * lock_descriptor() - Lock the NE descriptor so it cannot be updated
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ */
+static void lock_descriptor(char *image, int size)
+{
+	struct fdbar_t *fdb = find_fd(image, size);
+	struct fmba_t *fmba;
+
+	/*
+	 * TODO: Dynamically take Platform Data Region and GbE Region into
+	 * account.
+	 */
+	fmba = (struct fmba_t *)(image + (((fdb->flmap1) & 0xff) << 4));
+	fmba->flmstr1 = 0x0a0b0000;
+	fmba->flmstr2 = 0x0c0d0000;
+	fmba->flmstr3 = 0x08080118;
+}
+
+/**
+ * unlock_descriptor() - Lock the NE descriptor so it can be updated
+ *
+ * @image:	Pointer to image
+ * @size:	Size of image in bytes
+ */
+static void unlock_descriptor(char *image, int size)
+{
+	struct fdbar_t *fdb = find_fd(image, size);
+	struct fmba_t *fmba;
+
+	fmba = (struct fmba_t *)(image + (((fdb->flmap1) & 0xff) << 4));
+	fmba->flmstr1 = 0xffff0000;
+	fmba->flmstr2 = 0xffff0000;
+	fmba->flmstr3 = 0x08080118;
+}
+
+/**
+ * open_for_read() - Open a file for reading
+ *
+ * @fname:	Filename to open
+ * @sizep:	Returns file size in bytes
+ * @return 0 if OK, -1 on error
+ */
+int open_for_read(const char *fname, int *sizep)
+{
+	int fd = open(fname, O_RDONLY);
+	struct stat buf;
+
+	if (fd == -1)
+		return perror_fname("Could not open file '%s'", fname);
+	if (fstat(fd, &buf) == -1)
+		return perror_fname("Could not stat file '%s'", fname);
+	*sizep = buf.st_size;
+	debug("File %s is %d bytes\n", fname, *sizep);
+
+	return fd;
+}
+
+/**
+ * inject_region() - Add a file to an image region
+ *
+ * This puts a file into a particular region of the flash. Several pre-defined
+ * regions are used.
+ *
+ * @image:		Pointer to image
+ * @size:		Size of image in bytes
+ * @region_type:	Region where the file should be added
+ * @region_fname:	Filename to add to the image
+ * @return 0 if OK, -ve on error
+ */
+int inject_region(char *image, int size, int region_type, char *region_fname)
+{
+	struct fdbar_t *fdb = find_fd(image, size);
+	struct region_t region;
+	struct frba_t *frba;
+	int region_size;
+	int offset = 0;
+	int region_fd;
+	int ret;
+
+	if (!fdb)
+		exit(EXIT_FAILURE);
+	frba = (struct frba_t *)(image + (((fdb->flmap0 >> 16) & 0xff) << 4));
+
+	ret = get_region(frba, region_type, &region);
+	if (ret)
+		return -1;
+	if (region.size <= 0xfff) {
+		fprintf(stderr, "Region %s is disabled in target. Not injecting.\n",
+			region_name(region_type));
+		return -1;
+	}
+
+	region_fd = open_for_read(region_fname, &region_size);
+	if (region_fd < 0)
+		return region_fd;
+
+	if ((region_size > region.size) ||
+	    ((region_type != 1) && (region_size > region.size))) {
+		fprintf(stderr, "Region %s is %d(0x%x) bytes. File is %d(0x%x)  bytes. Not injecting.\n",
+			region_name(region_type), region.size,
+			region.size, region_size, region_size);
+		return -1;
+	}
+
+	if ((region_type == 1) && (region_size < region.size)) {
+		fprintf(stderr, "Region %s is %d(0x%x) bytes. File is %d(0x%x) bytes. Padding before injecting.\n",
+			region_name(region_type), region.size,
+			region.size, region_size, region_size);
+		offset = region.size - region_size;
+		memset(image + region.base, 0xff, offset);
+	}
+
+	if (size < region.base + offset + region_size) {
+		fprintf(stderr, "Output file is too small. (%d < %d)\n",
+			size, region.base + offset + region_size);
+		return -1;
+	}
+
+	if (read(region_fd, image + region.base + offset, region_size)
+							!= region_size) {
+		perror("Could not read file");
+		return -1;
+	}
+
+	close(region_fd);
+
+	debug("Adding %s as the %s section\n", region_fname,
+	      region_name(region_type));
+
+	return 0;
+}
+
+/**
+ * write_data() - Write some raw data into a region
+ *
+ * This puts a file into a particular place in the flash, ignoring the
+ * regions. Be careful not to overwrite something important.
+ *
+ * @image:		Pointer to image
+ * @size:		Size of image in bytes
+ * @addr:		x86 ROM address to put file. The ROM ends at
+ *			0xffffffff so use an address relative to that. For an
+ *			8MB ROM the start address is 0xfff80000.
+ * @write_fname:	Filename to add to the image
+ * @offset_uboot_top:	Offset of the top of U-Boot
+ * @offset_uboot_start:	Offset of the start of U-Boot
+ * @return number of bytes written if OK, -ve on error
+ */
+static int write_data(char *image, int size, unsigned int addr,
+		      const char *write_fname, int offset_uboot_top,
+		      int offset_uboot_start)
+{
+	int write_fd, write_size;
+	int offset;
+
+	write_fd = open_for_read(write_fname, &write_size);
+	if (write_fd < 0)
+		return write_fd;
+
+	offset = (uint32_t)(addr + size);
+	if (offset_uboot_top) {
+		if (offset_uboot_start < offset &&
+		    offset_uboot_top >= offset) {
+			fprintf(stderr, "U-Boot image overlaps with region '%s'\n",
+				write_fname);
+			fprintf(stderr,
+				"U-Boot finishes at offset %x, file starts at %x\n",
+				offset_uboot_top, offset);
+			return -EXDEV;
+		}
+		if (offset_uboot_start > offset &&
+		    offset_uboot_start <= offset + write_size) {
+			fprintf(stderr, "U-Boot image overlaps with region '%s'\n",
+				write_fname);
+			fprintf(stderr,
+				"U-Boot starts at offset %x, file finishes at %x\n",
+				offset_uboot_start, offset + write_size);
+			return -EXDEV;
+		}
+	}
+	debug("Writing %s to offset %#x\n", write_fname, offset);
+
+	if (offset < 0 || offset + write_size > size) {
+		fprintf(stderr, "Output file is too small. (%d < %d)\n",
+			size, offset + write_size);
+		return -1;
+	}
+
+	if (read(write_fd, image + offset, write_size) != write_size) {
+		perror("Could not read file");
+		return -1;
+	}
+
+	close(write_fd);
+
+	return write_size;
+}
+
+static void print_version(void)
+{
+	printf("ifdtool v%s -- ", IFDTOOL_VERSION);
+	printf("Copyright (C) 2014 Google Inc.\n\n");
+	printf("SPDX-License-Identifier: GPL-2.0+\n");
+}
+
+static void print_usage(const char *name)
+{
+	printf("usage: %s [-vhdix?] <filename> [<outfile>]\n", name);
+	printf("\n"
+	       "   -d | --dump:                      dump intel firmware descriptor\n"
+	       "   -x | --extract:                   extract intel fd modules\n"
+	       "   -i | --inject <region>:<module>   inject file <module> into region <region>\n"
+	       "   -w | --write <addr>:<file>        write file to appear at memory address <addr>\n"
+	       "                                     multiple files can be written simultaneously\n"
+	       "   -s | --spifreq <20|33|50>         set the SPI frequency\n"
+	       "   -e | --em100                      set SPI frequency to 20MHz and disable\n"
+	       "                                     Dual Output Fast Read Support\n"
+	       "   -l | --lock                       Lock firmware descriptor and ME region\n"
+	       "   -u | --unlock                     Unlock firmware descriptor and ME region\n"
+	       "   -r | --romsize                    Specify ROM size\n"
+	       "   -D | --write-descriptor <file>    Write descriptor at base\n"
+	       "   -c | --create                     Create a new empty image\n"
+	       "   -v | --version:                   print the version\n"
+	       "   -h | --help:                      print this help\n\n"
+	       "<region> is one of Descriptor, BIOS, ME, GbE, Platform\n"
+	       "\n");
+}
+
+/**
+ * get_two_words() - Convert a string into two words separated by :
+ *
+ * The supplied string is split at ':', two substrings are allocated and
+ * returned.
+ *
+ * @str:	String to split
+ * @firstp:	Returns first string
+ * @secondp:	Returns second string
+ * @return 0 if OK, -ve if @str does not have a :
+ */
+static int get_two_words(const char *str, char **firstp, char **secondp)
+{
+	const char *p;
+
+	p = strchr(str, ':');
+	if (!p)
+		return -1;
+	*firstp = strdup(str);
+	(*firstp)[p - str] = '\0';
+	*secondp = strdup(p + 1);
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int opt, option_index = 0;
+	int mode_dump = 0, mode_extract = 0, mode_inject = 0;
+	int mode_spifreq = 0, mode_em100 = 0, mode_locked = 0;
+	int mode_unlocked = 0, mode_write = 0, mode_write_descriptor = 0;
+	int create = 0;
+	char *region_type_string = NULL, *inject_fname = NULL;
+	char *desc_fname = NULL, *addr_str = NULL;
+	int region_type = -1, inputfreq = 0;
+	enum spi_frequency spifreq = SPI_FREQUENCY_20MHZ;
+	struct input_file input_file[WRITE_MAX], *ifile, *fdt = NULL;
+	unsigned char wr_idx, wr_num = 0;
+	int rom_size = -1;
+	bool write_it;
+	char *filename;
+	char *outfile = NULL;
+	struct stat buf;
+	int size = 0;
+	bool have_uboot = false;
+	int bios_fd;
+	char *image;
+	int ret;
+	static struct option long_options[] = {
+		{"create", 0, NULL, 'c'},
+		{"dump", 0, NULL, 'd'},
+		{"descriptor", 1, NULL, 'D'},
+		{"em100", 0, NULL, 'e'},
+		{"extract", 0, NULL, 'x'},
+		{"fdt", 1, NULL, 'f'},
+		{"inject", 1, NULL, 'i'},
+		{"lock", 0, NULL, 'l'},
+		{"romsize", 1, NULL, 'r'},
+		{"spifreq", 1, NULL, 's'},
+		{"unlock", 0, NULL, 'u'},
+		{"uboot", 1, NULL, 'U'},
+		{"write", 1, NULL, 'w'},
+		{"version", 0, NULL, 'v'},
+		{"help", 0, NULL, 'h'},
+		{0, 0, 0, 0}
+	};
+
+	while ((opt = getopt_long(argc, argv, "cdD:ef:hi:lr:s:uU:vw:x?",
+				  long_options, &option_index)) != EOF) {
+		switch (opt) {
+		case 'c':
+			create = 1;
+			break;
+		case 'd':
+			mode_dump = 1;
+			break;
+		case 'D':
+			mode_write_descriptor = 1;
+			desc_fname = optarg;
+			break;
+		case 'e':
+			mode_em100 = 1;
+			break;
+		case 'i':
+			if (get_two_words(optarg, &region_type_string,
+					  &inject_fname)) {
+				print_usage(argv[0]);
+				exit(EXIT_FAILURE);
+			}
+			if (!strcasecmp("Descriptor", region_type_string))
+				region_type = 0;
+			else if (!strcasecmp("BIOS", region_type_string))
+				region_type = 1;
+			else if (!strcasecmp("ME", region_type_string))
+				region_type = 2;
+			else if (!strcasecmp("GbE", region_type_string))
+				region_type = 3;
+			else if (!strcasecmp("Platform", region_type_string))
+				region_type = 4;
+			if (region_type == -1) {
+				fprintf(stderr, "No such region type: '%s'\n\n",
+					region_type_string);
+				print_usage(argv[0]);
+				exit(EXIT_FAILURE);
+			}
+			mode_inject = 1;
+			break;
+		case 'l':
+			mode_locked = 1;
+			break;
+		case 'r':
+			rom_size = strtol(optarg, NULL, 0);
+			debug("ROM size %d\n", rom_size);
+			break;
+		case 's':
+			/* Parse the requested SPI frequency */
+			inputfreq = strtol(optarg, NULL, 0);
+			switch (inputfreq) {
+			case 20:
+				spifreq = SPI_FREQUENCY_20MHZ;
+				break;
+			case 33:
+				spifreq = SPI_FREQUENCY_33MHZ;
+				break;
+			case 50:
+				spifreq = SPI_FREQUENCY_50MHZ;
+				break;
+			default:
+				fprintf(stderr, "Invalid SPI Frequency: %d\n",
+					inputfreq);
+				print_usage(argv[0]);
+				exit(EXIT_FAILURE);
+			}
+			mode_spifreq = 1;
+			break;
+		case 'u':
+			mode_unlocked = 1;
+			break;
+		case 'v':
+			print_version();
+			exit(EXIT_SUCCESS);
+			break;
+		case 'w':
+		case 'U':
+		case 'f':
+			ifile = &input_file[wr_num];
+			mode_write = 1;
+			if (wr_num < WRITE_MAX) {
+				if (get_two_words(optarg, &addr_str,
+						  &ifile->fname)) {
+					print_usage(argv[0]);
+					exit(EXIT_FAILURE);
+				}
+				ifile->addr = strtoll(optarg, NULL, 0);
+				wr_num++;
+			} else {
+				fprintf(stderr,
+					"The number of files to write simultaneously exceeds the limitation (%d)\n",
+					WRITE_MAX);
+			}
+			break;
+		case 'x':
+			mode_extract = 1;
+			break;
+		case 'h':
+		case '?':
+		default:
+			print_usage(argv[0]);
+			exit(EXIT_SUCCESS);
+			break;
+		}
+	}
+
+	if (mode_locked == 1 && mode_unlocked == 1) {
+		fprintf(stderr, "Locking/Unlocking FD and ME are mutually exclusive\n");
+		exit(EXIT_FAILURE);
+	}
+
+	if (mode_inject == 1 && mode_write == 1) {
+		fprintf(stderr, "Inject/Write are mutually exclusive\n");
+		exit(EXIT_FAILURE);
+	}
+
+	if ((mode_dump + mode_extract + mode_inject +
+		(mode_spifreq | mode_em100 | mode_unlocked |
+		 mode_locked)) > 1) {
+		fprintf(stderr, "You may not specify more than one mode.\n\n");
+		print_usage(argv[0]);
+		exit(EXIT_FAILURE);
+	}
+
+	if ((mode_dump + mode_extract + mode_inject + mode_spifreq +
+	     mode_em100 + mode_locked + mode_unlocked + mode_write +
+	     mode_write_descriptor) == 0 && !create) {
+		fprintf(stderr, "You need to specify a mode.\n\n");
+		print_usage(argv[0]);
+		exit(EXIT_FAILURE);
+	}
+
+	if (create && rom_size == -1) {
+		fprintf(stderr, "You need to specify a rom size when creating.\n\n");
+		exit(EXIT_FAILURE);
+	}
+
+	if (optind + 1 != argc) {
+		fprintf(stderr, "You need to specify a file.\n\n");
+		print_usage(argv[0]);
+		exit(EXIT_FAILURE);
+	}
+
+	if (have_uboot && !fdt) {
+		fprintf(stderr,
+			"You must supply a device tree file for U-Boot\n\n");
+		print_usage(argv[0]);
+		exit(EXIT_FAILURE);
+	}
+
+	filename = argv[optind];
+	if (optind + 2 != argc)
+		outfile = argv[optind + 1];
+
+	if (create)
+		bios_fd = open(filename, O_WRONLY | O_CREAT, 0666);
+	else
+		bios_fd = open(filename, outfile ? O_RDONLY : O_RDWR);
+
+	if (bios_fd == -1) {
+		perror("Could not open file");
+		exit(EXIT_FAILURE);
+	}
+
+	if (!create) {
+		if (fstat(bios_fd, &buf) == -1) {
+			perror("Could not stat file");
+			exit(EXIT_FAILURE);
+		}
+		size = buf.st_size;
+	}
+
+	debug("File %s is %d bytes\n", filename, size);
+
+	if (rom_size == -1)
+		rom_size = size;
+
+	image = malloc(rom_size);
+	if (!image) {
+		printf("Out of memory.\n");
+		exit(EXIT_FAILURE);
+	}
+
+	memset(image, '\xff', rom_size);
+	if (!create && read(bios_fd, image, size) != size) {
+		perror("Could not read file");
+		exit(EXIT_FAILURE);
+	}
+	if (size != rom_size) {
+		debug("ROM size changed to %d bytes\n", rom_size);
+		size = rom_size;
+	}
+
+	write_it = true;
+	ret = 0;
+	if (mode_dump) {
+		ret = dump_fd(image, size);
+		write_it = false;
+	}
+
+	if (mode_extract) {
+		ret = write_regions(image, size);
+		write_it = false;
+	}
+
+	if (mode_write_descriptor)
+		ret = write_data(image, size, -size, desc_fname, 0, 0);
+
+	if (mode_inject)
+		ret = inject_region(image, size, region_type, inject_fname);
+
+	if (mode_write) {
+		int offset_uboot_top = 0;
+		int offset_uboot_start = 0;
+
+		for (wr_idx = 0; wr_idx < wr_num; wr_idx++) {
+			ifile = &input_file[wr_idx];
+			ret = write_data(image, size, ifile->addr,
+					 ifile->fname, offset_uboot_top,
+					 offset_uboot_start);
+			if (ret < 0)
+				break;
+		}
+	}
+
+	if (mode_spifreq)
+		set_spi_frequency(image, size, spifreq);
+
+	if (mode_em100)
+		set_em100_mode(image, size);
+
+	if (mode_locked)
+		lock_descriptor(image, size);
+
+	if (mode_unlocked)
+		unlock_descriptor(image, size);
+
+	if (write_it) {
+		if (outfile) {
+			ret = write_image(outfile, image, size);
+		} else {
+			if (lseek(bios_fd, 0, SEEK_SET)) {
+				perror("Error while seeking");
+				ret = -1;
+			}
+			if (write(bios_fd, image, size) != size) {
+				perror("Error while writing");
+				ret = -1;
+			}
+		}
+	}
+
+	free(image);
+	close(bios_fd);
+
+	return ret < 0 ? 1 : 0;
+}
diff --git a/tools/u-boot-tools/ifdtool.h b/tools/u-boot-tools/ifdtool.h
new file mode 100644
index 0000000000000000000000000000000000000000..bb70b2f710dc3f044b7c9ab728f63f6789293010
--- /dev/null
+++ b/tools/u-boot-tools/ifdtool.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ifdtool - Manage Intel Firmware Descriptor information
+ *
+ * Copyright (C) 2011 The ChromiumOS Authors.
+ *
+ * From Coreboot project
+ */
+
+#include <stdint.h>
+
+#define __packed	__attribute__((packed))
+
+#define IFDTOOL_VERSION "1.1-U-Boot"
+
+#define WRITE_MAX	16
+
+enum spi_frequency {
+	SPI_FREQUENCY_20MHZ = 0,
+	SPI_FREQUENCY_33MHZ = 1,
+	SPI_FREQUENCY_50MHZ = 4,
+};
+
+enum component_density {
+	COMPONENT_DENSITY_512KB = 0,
+	COMPONENT_DENSITY_1MB   = 1,
+	COMPONENT_DENSITY_2MB   = 2,
+	COMPONENT_DENSITY_4MB   = 3,
+	COMPONENT_DENSITY_8MB   = 4,
+	COMPONENT_DENSITY_16MB  = 5,
+};
+
+/* flash descriptor */
+struct __packed fdbar_t {
+	uint32_t flvalsig;
+	uint32_t flmap0;
+	uint32_t flmap1;
+	uint32_t flmap2;
+	uint8_t  reserved[0xefc - 0x20];
+	uint32_t flumap1;
+};
+
+#define MAX_REGIONS	5
+
+/* regions */
+struct __packed frba_t {
+	uint32_t flreg[MAX_REGIONS];
+};
+
+/* component section */
+struct __packed fcba_t {
+	uint32_t flcomp;
+	uint32_t flill;
+	uint32_t flpb;
+};
+
+#define MAX_STRAPS	18
+
+/* pch strap */
+struct __packed fpsba_t {
+	uint32_t pchstrp[MAX_STRAPS];
+};
+
+/* master */
+struct __packed fmba_t {
+	uint32_t flmstr1;
+	uint32_t flmstr2;
+	uint32_t flmstr3;
+};
+
+/* processor strap */
+struct __packed fmsba_t {
+	uint32_t data[8];
+};
+
+/* ME VSCC */
+struct vscc_t {
+	uint32_t jid;
+	uint32_t vscc;
+};
+
+struct vtba_t {
+	/* Actual number of entries specified in vtl */
+	struct vscc_t entry[8];
+};
+
+struct region_t {
+	int base, limit, size;
+};
diff --git a/tools/u-boot-tools/image-host.c b/tools/u-boot-tools/image-host.c
new file mode 100644
index 0000000000000000000000000000000000000000..88b329502ca3a56e6a1349f3312cfab2e4117143
--- /dev/null
+++ b/tools/u-boot-tools/image-host.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2013, Google Inc.
+ *
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2006
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ */
+
+#include "mkimage.h"
+#include <bootm.h>
+#include <image.h>
+#include <version.h>
+
+/**
+ * fit_set_hash_value - set hash value in requested has node
+ * @fit: pointer to the FIT format image header
+ * @noffset: hash node offset
+ * @value: hash value to be set
+ * @value_len: hash value length
+ *
+ * fit_set_hash_value() attempts to set hash value in a node at offset
+ * given and returns operation status to the caller.
+ *
+ * returns
+ *     0, on success
+ *     -1, on failure
+ */
+static int fit_set_hash_value(void *fit, int noffset, uint8_t *value,
+				int value_len)
+{
+	int ret;
+
+	ret = fdt_setprop(fit, noffset, FIT_VALUE_PROP, value, value_len);
+	if (ret) {
+		printf("Can't set hash '%s' property for '%s' node(%s)\n",
+		       FIT_VALUE_PROP, fit_get_name(fit, noffset, NULL),
+		       fdt_strerror(ret));
+		return ret == -FDT_ERR_NOSPACE ? -ENOSPC : -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * fit_image_process_hash - Process a single subnode of the images/ node
+ *
+ * Check each subnode and process accordingly. For hash nodes we generate
+ * a hash of the supplised data and store it in the node.
+ *
+ * @fit:	pointer to the FIT format image header
+ * @image_name:	name of image being processes (used to display errors)
+ * @noffset:	subnode offset
+ * @data:	data to process
+ * @size:	size of data in bytes
+ * @return 0 if ok, -1 on error
+ */
+static int fit_image_process_hash(void *fit, const char *image_name,
+		int noffset, const void *data, size_t size)
+{
+	uint8_t value[FIT_MAX_HASH_LEN];
+	const char *node_name;
+	int value_len;
+	char *algo;
+	int ret;
+
+	node_name = fit_get_name(fit, noffset, NULL);
+
+	if (fit_image_hash_get_algo(fit, noffset, &algo)) {
+		printf("Can't get hash algo property for '%s' hash node in '%s' image node\n",
+		       node_name, image_name);
+		return -ENOENT;
+	}
+
+	if (calculate_hash(data, size, algo, value, &value_len)) {
+		printf("Unsupported hash algorithm (%s) for '%s' hash node in '%s' image node\n",
+		       algo, node_name, image_name);
+		return -EPROTONOSUPPORT;
+	}
+
+	ret = fit_set_hash_value(fit, noffset, value, value_len);
+	if (ret) {
+		printf("Can't set hash value for '%s' hash node in '%s' image node\n",
+		       node_name, image_name);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * fit_image_write_sig() - write the signature to a FIT
+ *
+ * This writes the signature and signer data to the FIT.
+ *
+ * @fit: pointer to the FIT format image header
+ * @noffset: hash node offset
+ * @value: signature value to be set
+ * @value_len: signature value length
+ * @comment: Text comment to write (NULL for none)
+ *
+ * returns
+ *     0, on success
+ *     -FDT_ERR_..., on failure
+ */
+static int fit_image_write_sig(void *fit, int noffset, uint8_t *value,
+		int value_len, const char *comment, const char *region_prop,
+		int region_proplen, const char *cmdname)
+{
+	int string_size;
+	int ret;
+
+	/*
+	 * Get the current string size, before we update the FIT and add
+	 * more
+	 */
+	string_size = fdt_size_dt_strings(fit);
+
+	ret = fdt_setprop(fit, noffset, FIT_VALUE_PROP, value, value_len);
+	if (!ret) {
+		ret = fdt_setprop_string(fit, noffset, "signer-name",
+					 "mkimage");
+	}
+	if (!ret) {
+		ret = fdt_setprop_string(fit, noffset, "signer-version",
+				  PLAIN_VERSION);
+	}
+	if (comment && !ret)
+		ret = fdt_setprop_string(fit, noffset, "comment", comment);
+	if (!ret) {
+		time_t timestamp = imagetool_get_source_date(cmdname,
+							     time(NULL));
+
+		ret = fit_set_timestamp(fit, noffset, timestamp);
+	}
+	if (region_prop && !ret) {
+		uint32_t strdata[2];
+
+		ret = fdt_setprop(fit, noffset, "hashed-nodes",
+				   region_prop, region_proplen);
+		/* This is a legacy offset, it is unused, and must remain 0. */
+		strdata[0] = 0;
+		strdata[1] = cpu_to_fdt32(string_size);
+		if (!ret) {
+			ret = fdt_setprop(fit, noffset, "hashed-strings",
+					  strdata, sizeof(strdata));
+		}
+	}
+
+	return ret;
+}
+
+static int fit_image_setup_sig(struct image_sign_info *info,
+		const char *keydir, void *fit, const char *image_name,
+		int noffset, const char *require_keys, const char *engine_id)
+{
+	const char *node_name;
+	char *algo_name;
+	const char *padding_name;
+
+	node_name = fit_get_name(fit, noffset, NULL);
+	if (fit_image_hash_get_algo(fit, noffset, &algo_name)) {
+		printf("Can't get algo property for '%s' signature node in '%s' image node\n",
+		       node_name, image_name);
+		return -1;
+	}
+
+	padding_name = fdt_getprop(fit, noffset, "padding", NULL);
+
+	memset(info, '\0', sizeof(*info));
+	info->keydir = keydir;
+	info->keyname = fdt_getprop(fit, noffset, "key-name-hint", NULL);
+	info->fit = fit;
+	info->node_offset = noffset;
+	info->name = strdup(algo_name);
+	info->checksum = image_get_checksum_algo(algo_name);
+	info->crypto = image_get_crypto_algo(algo_name);
+	info->padding = image_get_padding_algo(padding_name);
+	info->require_keys = require_keys;
+	info->engine_id = engine_id;
+	if (!info->checksum || !info->crypto) {
+		printf("Unsupported signature algorithm (%s) for '%s' signature node in '%s' image node\n",
+		       algo_name, node_name, image_name);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * fit_image_process_sig- Process a single subnode of the images/ node
+ *
+ * Check each subnode and process accordingly. For signature nodes we
+ * generate a signed hash of the supplised data and store it in the node.
+ *
+ * @keydir:	Directory containing keys to use for signing
+ * @keydest:	Destination FDT blob to write public keys into
+ * @fit:	pointer to the FIT format image header
+ * @image_name:	name of image being processes (used to display errors)
+ * @noffset:	subnode offset
+ * @data:	data to process
+ * @size:	size of data in bytes
+ * @comment:	Comment to add to signature nodes
+ * @require_keys: Mark all keys as 'required'
+ * @engine_id:	Engine to use for signing
+ * @return 0 if ok, -1 on error
+ */
+static int fit_image_process_sig(const char *keydir, void *keydest,
+		void *fit, const char *image_name,
+		int noffset, const void *data, size_t size,
+		const char *comment, int require_keys, const char *engine_id,
+		const char *cmdname)
+{
+	struct image_sign_info info;
+	struct image_region region;
+	const char *node_name;
+	uint8_t *value;
+	uint value_len;
+	int ret;
+
+	if (fit_image_setup_sig(&info, keydir, fit, image_name, noffset,
+				require_keys ? "image" : NULL, engine_id))
+		return -1;
+
+	node_name = fit_get_name(fit, noffset, NULL);
+	region.data = data;
+	region.size = size;
+	ret = info.crypto->sign(&info, &region, 1, &value, &value_len);
+	if (ret) {
+		printf("Failed to sign '%s' signature node in '%s' image node: %d\n",
+		       node_name, image_name, ret);
+
+		/* We allow keys to be missing */
+		if (ret == -ENOENT)
+			return 0;
+		return -1;
+	}
+
+	ret = fit_image_write_sig(fit, noffset, value, value_len, comment,
+			NULL, 0, cmdname);
+	if (ret) {
+		if (ret == -FDT_ERR_NOSPACE)
+			return -ENOSPC;
+		printf("Can't write signature for '%s' signature node in '%s' conf node: %s\n",
+		       node_name, image_name, fdt_strerror(ret));
+		return -1;
+	}
+	free(value);
+
+	/* Get keyname again, as FDT has changed and invalidated our pointer */
+	info.keyname = fdt_getprop(fit, noffset, "key-name-hint", NULL);
+
+	/*
+	 * Write the public key into the supplied FDT file; this might fail
+	 * several times, since we try signing with successively increasing
+	 * size values
+	 */
+	if (keydest) {
+		ret = info.crypto->add_verify_data(&info, keydest);
+		if (ret) {
+			printf("Failed to add verification data for '%s' signature node in '%s' image node\n",
+			       node_name, image_name);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * fit_image_add_verification_data() - calculate/set verig. data for image node
+ *
+ * This adds hash and signature values for an component image node.
+ *
+ * All existing hash subnodes are checked, if algorithm property is set to
+ * one of the supported hash algorithms, hash value is computed and
+ * corresponding hash node property is set, for example:
+ *
+ * Input component image node structure:
+ *
+ * o image-1 (at image_noffset)
+ *   | - data = [binary data]
+ *   o hash-1
+ *     |- algo = "sha1"
+ *
+ * Output component image node structure:
+ *
+ * o image-1 (at image_noffset)
+ *   | - data = [binary data]
+ *   o hash-1
+ *     |- algo = "sha1"
+ *     |- value = sha1(data)
+ *
+ * For signature details, please see doc/uImage.FIT/signature.txt
+ *
+ * @keydir	Directory containing *.key and *.crt files (or NULL)
+ * @keydest	FDT Blob to write public keys into (NULL if none)
+ * @fit:	Pointer to the FIT format image header
+ * @image_noffset: Requested component image node
+ * @comment:	Comment to add to signature nodes
+ * @require_keys: Mark all keys as 'required'
+ * @engine_id:	Engine to use for signing
+ * @return: 0 on success, <0 on failure
+ */
+int fit_image_add_verification_data(const char *keydir, void *keydest,
+		void *fit, int image_noffset, const char *comment,
+		int require_keys, const char *engine_id, const char *cmdname)
+{
+	const char *image_name;
+	const void *data;
+	size_t size;
+	int noffset;
+
+	/* Get image data and data length */
+	if (fit_image_get_data(fit, image_noffset, &data, &size)) {
+		printf("Can't get image data/size\n");
+		return -1;
+	}
+
+	image_name = fit_get_name(fit, image_noffset, NULL);
+
+	/* Process all hash subnodes of the component image node */
+	for (noffset = fdt_first_subnode(fit, image_noffset);
+	     noffset >= 0;
+	     noffset = fdt_next_subnode(fit, noffset)) {
+		const char *node_name;
+		int ret = 0;
+
+		/*
+		 * Check subnode name, must be equal to "hash" or "signature".
+		 * Multiple hash nodes require unique unit node
+		 * names, e.g. hash-1, hash-2, signature-1, etc.
+		 */
+		node_name = fit_get_name(fit, noffset, NULL);
+		if (!strncmp(node_name, FIT_HASH_NODENAME,
+			     strlen(FIT_HASH_NODENAME))) {
+			ret = fit_image_process_hash(fit, image_name, noffset,
+						data, size);
+		} else if (IMAGE_ENABLE_SIGN && keydir &&
+			   !strncmp(node_name, FIT_SIG_NODENAME,
+				strlen(FIT_SIG_NODENAME))) {
+			ret = fit_image_process_sig(keydir, keydest,
+				fit, image_name, noffset, data, size,
+				comment, require_keys, engine_id, cmdname);
+		}
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+struct strlist {
+	int count;
+	char **strings;
+};
+
+static void strlist_init(struct strlist *list)
+{
+	memset(list, '\0', sizeof(*list));
+}
+
+static void strlist_free(struct strlist *list)
+{
+	int i;
+
+	for (i = 0; i < list->count; i++)
+		free(list->strings[i]);
+	free(list->strings);
+}
+
+static int strlist_add(struct strlist *list, const char *str)
+{
+	char *dup;
+
+	dup = strdup(str);
+	list->strings = realloc(list->strings,
+				(list->count + 1) * sizeof(char *));
+	if (!list || !str)
+		return -1;
+	list->strings[list->count++] = dup;
+
+	return 0;
+}
+
+static const char *fit_config_get_image_list(void *fit, int noffset,
+		int *lenp, int *allow_missingp)
+{
+	static const char default_list[] = FIT_KERNEL_PROP "\0"
+			FIT_FDT_PROP;
+	const char *prop;
+
+	/* If there is an "image" property, use that */
+	prop = fdt_getprop(fit, noffset, "sign-images", lenp);
+	if (prop) {
+		*allow_missingp = 0;
+		return *lenp ? prop : NULL;
+	}
+
+	/* Default image list */
+	*allow_missingp = 1;
+	*lenp = sizeof(default_list);
+
+	return default_list;
+}
+
+static int fit_config_get_hash_list(void *fit, int conf_noffset,
+				    int sig_offset, struct strlist *node_inc)
+{
+	int allow_missing;
+	const char *prop, *iname, *end;
+	const char *conf_name, *sig_name;
+	char name[200], path[200];
+	int image_count;
+	int ret, len;
+
+	conf_name = fit_get_name(fit, conf_noffset, NULL);
+	sig_name = fit_get_name(fit, sig_offset, NULL);
+
+	/*
+	 * Build a list of nodes we need to hash. We always need the root
+	 * node and the configuration.
+	 */
+	strlist_init(node_inc);
+	snprintf(name, sizeof(name), "%s/%s", FIT_CONFS_PATH, conf_name);
+	if (strlist_add(node_inc, "/") ||
+	    strlist_add(node_inc, name))
+		goto err_mem;
+
+	/* Get a list of images that we intend to sign */
+	prop = fit_config_get_image_list(fit, sig_offset, &len,
+					&allow_missing);
+	if (!prop)
+		return 0;
+
+	/* Locate the images */
+	end = prop + len;
+	image_count = 0;
+	for (iname = prop; iname < end; iname += strlen(iname) + 1) {
+		int noffset;
+		int image_noffset;
+		int hash_count;
+
+		image_noffset = fit_conf_get_prop_node(fit, conf_noffset,
+						       iname);
+		if (image_noffset < 0) {
+			printf("Failed to find image '%s' in  configuration '%s/%s'\n",
+			       iname, conf_name, sig_name);
+			if (allow_missing)
+				continue;
+
+			return -ENOENT;
+		}
+
+		ret = fdt_get_path(fit, image_noffset, path, sizeof(path));
+		if (ret < 0)
+			goto err_path;
+		if (strlist_add(node_inc, path))
+			goto err_mem;
+
+		snprintf(name, sizeof(name), "%s/%s", FIT_CONFS_PATH,
+			 conf_name);
+
+		/* Add all this image's hashes */
+		hash_count = 0;
+		for (noffset = fdt_first_subnode(fit, image_noffset);
+		     noffset >= 0;
+		     noffset = fdt_next_subnode(fit, noffset)) {
+			const char *name = fit_get_name(fit, noffset, NULL);
+
+			if (strncmp(name, FIT_HASH_NODENAME,
+				    strlen(FIT_HASH_NODENAME)))
+				continue;
+			ret = fdt_get_path(fit, noffset, path, sizeof(path));
+			if (ret < 0)
+				goto err_path;
+			if (strlist_add(node_inc, path))
+				goto err_mem;
+			hash_count++;
+		}
+
+		if (!hash_count) {
+			printf("Failed to find any hash nodes in configuration '%s/%s' image '%s' - without these it is not possible to verify this image\n",
+			       conf_name, sig_name, iname);
+			return -ENOMSG;
+		}
+
+		image_count++;
+	}
+
+	if (!image_count) {
+		printf("Failed to find any images for configuration '%s/%s'\n",
+		       conf_name, sig_name);
+		return -ENOMSG;
+	}
+
+	return 0;
+
+err_mem:
+	printf("Out of memory processing configuration '%s/%s'\n", conf_name,
+	       sig_name);
+	return -ENOMEM;
+
+err_path:
+	printf("Failed to get path for image '%s' in configuration '%s/%s': %s\n",
+	       iname, conf_name, sig_name, fdt_strerror(ret));
+	return -ENOENT;
+}
+
+static int fit_config_get_data(void *fit, int conf_noffset, int noffset,
+		struct image_region **regionp, int *region_countp,
+		char **region_propp, int *region_proplen)
+{
+	char * const exc_prop[] = {"data"};
+	struct strlist node_inc;
+	struct image_region *region;
+	struct fdt_region fdt_regions[100];
+	const char *conf_name, *sig_name;
+	char path[200];
+	int count, i;
+	char *region_prop;
+	int ret, len;
+
+	conf_name = fit_get_name(fit, conf_noffset, NULL);
+	sig_name = fit_get_name(fit, noffset, NULL);
+	debug("%s: conf='%s', sig='%s'\n", __func__, conf_name, sig_name);
+
+	/* Get a list of nodes we want to hash */
+	ret = fit_config_get_hash_list(fit, conf_noffset, noffset, &node_inc);
+	if (ret)
+		return ret;
+
+	/* Get a list of regions to hash */
+	count = fdt_find_regions(fit, node_inc.strings, node_inc.count,
+			exc_prop, ARRAY_SIZE(exc_prop),
+			fdt_regions, ARRAY_SIZE(fdt_regions),
+			path, sizeof(path), 1);
+	if (count < 0) {
+		printf("Failed to hash configuration '%s/%s': %s\n", conf_name,
+		       sig_name, fdt_strerror(ret));
+		return -EIO;
+	}
+	if (count == 0) {
+		printf("No data to hash for configuration '%s/%s': %s\n",
+		       conf_name, sig_name, fdt_strerror(ret));
+		return -EINVAL;
+	}
+
+	/* Build our list of data blocks */
+	region = fit_region_make_list(fit, fdt_regions, count, NULL);
+	if (!region) {
+		printf("Out of memory hashing configuration '%s/%s'\n",
+		       conf_name, sig_name);
+		return -ENOMEM;
+	}
+
+	/* Create a list of all hashed properties */
+	debug("Hash nodes:\n");
+	for (i = len = 0; i < node_inc.count; i++) {
+		debug("   %s\n", node_inc.strings[i]);
+		len += strlen(node_inc.strings[i]) + 1;
+	}
+	region_prop = malloc(len);
+	if (!region_prop) {
+		printf("Out of memory setting up regions for configuration '%s/%s'\n",
+		       conf_name, sig_name);
+		return -ENOMEM;
+	}
+	for (i = len = 0; i < node_inc.count;
+	     len += strlen(node_inc.strings[i]) + 1, i++)
+		strcpy(region_prop + len, node_inc.strings[i]);
+	strlist_free(&node_inc);
+
+	*region_countp = count;
+	*regionp = region;
+	*region_propp = region_prop;
+	*region_proplen = len;
+
+	return 0;
+}
+
+static int fit_config_process_sig(const char *keydir, void *keydest,
+		void *fit, const char *conf_name, int conf_noffset,
+		int noffset, const char *comment, int require_keys,
+		const char *engine_id, const char *cmdname)
+{
+	struct image_sign_info info;
+	const char *node_name;
+	struct image_region *region;
+	char *region_prop;
+	int region_proplen;
+	int region_count;
+	uint8_t *value;
+	uint value_len;
+	int ret;
+
+	node_name = fit_get_name(fit, noffset, NULL);
+	if (fit_config_get_data(fit, conf_noffset, noffset, &region,
+				&region_count, &region_prop, &region_proplen))
+		return -1;
+
+	if (fit_image_setup_sig(&info, keydir, fit, conf_name, noffset,
+				require_keys ? "conf" : NULL, engine_id))
+		return -1;
+
+	ret = info.crypto->sign(&info, region, region_count, &value,
+				&value_len);
+	free(region);
+	if (ret) {
+		printf("Failed to sign '%s' signature node in '%s' conf node\n",
+		       node_name, conf_name);
+
+		/* We allow keys to be missing */
+		if (ret == -ENOENT)
+			return 0;
+		return -1;
+	}
+
+	ret = fit_image_write_sig(fit, noffset, value, value_len, comment,
+				region_prop, region_proplen, cmdname);
+	if (ret) {
+		if (ret == -FDT_ERR_NOSPACE)
+			return -ENOSPC;
+		printf("Can't write signature for '%s' signature node in '%s' conf node: %s\n",
+		       node_name, conf_name, fdt_strerror(ret));
+		return -1;
+	}
+	free(value);
+	free(region_prop);
+
+	/* Get keyname again, as FDT has changed and invalidated our pointer */
+	info.keyname = fdt_getprop(fit, noffset, "key-name-hint", NULL);
+
+	/* Write the public key into the supplied FDT file */
+	if (keydest) {
+		ret = info.crypto->add_verify_data(&info, keydest);
+		if (ret) {
+			printf("Failed to add verification data for '%s' signature node in '%s' configuration node\n",
+			       node_name, conf_name);
+		}
+		return ret;
+	}
+
+	return 0;
+}
+
+static int fit_config_add_verification_data(const char *keydir, void *keydest,
+		void *fit, int conf_noffset, const char *comment,
+		int require_keys, const char *engine_id, const char *cmdname)
+{
+	const char *conf_name;
+	int noffset;
+
+	conf_name = fit_get_name(fit, conf_noffset, NULL);
+
+	/* Process all hash subnodes of the configuration node */
+	for (noffset = fdt_first_subnode(fit, conf_noffset);
+	     noffset >= 0;
+	     noffset = fdt_next_subnode(fit, noffset)) {
+		const char *node_name;
+		int ret = 0;
+
+		node_name = fit_get_name(fit, noffset, NULL);
+		if (!strncmp(node_name, FIT_SIG_NODENAME,
+			     strlen(FIT_SIG_NODENAME))) {
+			ret = fit_config_process_sig(keydir, keydest,
+				fit, conf_name, conf_noffset, noffset, comment,
+				require_keys, engine_id, cmdname);
+		}
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int fit_add_verification_data(const char *keydir, void *keydest, void *fit,
+			      const char *comment, int require_keys,
+			      const char *engine_id, const char *cmdname)
+{
+	int images_noffset, confs_noffset;
+	int noffset;
+	int ret;
+
+	/* Find images parent node offset */
+	images_noffset = fdt_path_offset(fit, FIT_IMAGES_PATH);
+	if (images_noffset < 0) {
+		printf("Can't find images parent node '%s' (%s)\n",
+		       FIT_IMAGES_PATH, fdt_strerror(images_noffset));
+		return images_noffset;
+	}
+
+	/* Process its subnodes, print out component images details */
+	for (noffset = fdt_first_subnode(fit, images_noffset);
+	     noffset >= 0;
+	     noffset = fdt_next_subnode(fit, noffset)) {
+		/*
+		 * Direct child node of the images parent node,
+		 * i.e. component image node.
+		 */
+		ret = fit_image_add_verification_data(keydir, keydest,
+				fit, noffset, comment, require_keys, engine_id,
+				cmdname);
+		if (ret)
+			return ret;
+	}
+
+	/* If there are no keys, we can't sign configurations */
+	if (!IMAGE_ENABLE_SIGN || !keydir)
+		return 0;
+
+	/* Find configurations parent node offset */
+	confs_noffset = fdt_path_offset(fit, FIT_CONFS_PATH);
+	if (confs_noffset < 0) {
+		printf("Can't find images parent node '%s' (%s)\n",
+		       FIT_CONFS_PATH, fdt_strerror(confs_noffset));
+		return -ENOENT;
+	}
+
+	/* Process its subnodes, print out component images details */
+	for (noffset = fdt_first_subnode(fit, confs_noffset);
+	     noffset >= 0;
+	     noffset = fdt_next_subnode(fit, noffset)) {
+		ret = fit_config_add_verification_data(keydir, keydest,
+						       fit, noffset, comment,
+						       require_keys,
+						       engine_id, cmdname);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_FIT_SIGNATURE
+int fit_check_sign(const void *fit, const void *key)
+{
+	int cfg_noffset;
+	int ret;
+
+	cfg_noffset = fit_conf_get_node(fit, NULL);
+	if (!cfg_noffset)
+		return -1;
+
+	printf("Verifying Hash Integrity ... ");
+	ret = fit_config_verify(fit, cfg_noffset);
+	if (ret)
+		return ret;
+	ret = bootm_host_load_images(fit, cfg_noffset);
+
+	return ret;
+}
+#endif
diff --git a/tools/u-boot-tools/image-host.o b/tools/u-boot-tools/image-host.o
new file mode 100644
index 0000000000000000000000000000000000000000..f0516461273a93b368a636d977634a0e2bf38a49
Binary files /dev/null and b/tools/u-boot-tools/image-host.o differ
diff --git a/tools/u-boot-tools/imagetool.c b/tools/u-boot-tools/imagetool.c
new file mode 100644
index 0000000000000000000000000000000000000000..b3e628f612f527f7a3ed11afb7c57395ae6a6bab
--- /dev/null
+++ b/tools/u-boot-tools/imagetool.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2013
+ *
+ * Written by Guilherme Maciel Ferreira <guilherme.maciel.ferreira@gmail.com>
+ */
+
+#include "imagetool.h"
+
+#include <image.h>
+
+struct image_type_params *imagetool_get_type(int type)
+{
+	struct image_type_params **curr;
+	INIT_SECTION(image_type);
+
+	struct image_type_params **start = __start_image_type;
+	struct image_type_params **end = __stop_image_type;
+
+	for (curr = start; curr != end; curr++) {
+		if ((*curr)->check_image_type) {
+			if (!(*curr)->check_image_type(type))
+				return *curr;
+		}
+	}
+	return NULL;
+}
+
+int imagetool_verify_print_header(
+	void *ptr,
+	struct stat *sbuf,
+	struct image_type_params *tparams,
+	struct image_tool_params *params)
+{
+	int retval = -1;
+	struct image_type_params **curr;
+	INIT_SECTION(image_type);
+
+	struct image_type_params **start = __start_image_type;
+	struct image_type_params **end = __stop_image_type;
+
+	for (curr = start; curr != end; curr++) {
+		if ((*curr)->verify_header) {
+			retval = (*curr)->verify_header((unsigned char *)ptr,
+						     sbuf->st_size, params);
+
+			if (retval == 0) {
+				/*
+				 * Print the image information  if verify is
+				 * successful
+				 */
+				if ((*curr)->print_header) {
+					if (!params->quiet)
+						(*curr)->print_header(ptr);
+				} else {
+					fprintf(stderr,
+						"%s: print_header undefined for %s\n",
+						params->cmdname, (*curr)->name);
+				}
+				break;
+			}
+		}
+	}
+
+	return retval;
+}
+
+int imagetool_save_subimage(
+	const char *file_name,
+	ulong file_data,
+	ulong file_len)
+{
+	int dfd;
+
+	dfd = open(file_name, O_RDWR | O_CREAT | O_TRUNC | O_BINARY,
+		   S_IRUSR | S_IWUSR);
+	if (dfd < 0) {
+		fprintf(stderr, "Can't open \"%s\": %s\n",
+			file_name, strerror(errno));
+		return -1;
+	}
+
+	if (write(dfd, (void *)file_data, file_len) != (ssize_t)file_len) {
+		fprintf(stderr, "Write error on \"%s\": %s\n",
+			file_name, strerror(errno));
+		close(dfd);
+		return -1;
+	}
+
+	close(dfd);
+
+	return 0;
+}
+
+int imagetool_get_filesize(struct image_tool_params *params, const char *fname)
+{
+	struct stat sbuf;
+	int fd;
+
+	fd = open(fname, O_RDONLY | O_BINARY);
+	if (fd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		return -1;
+	}
+
+	if (fstat(fd, &sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat %s: %s\n",
+			params->cmdname, fname, strerror(errno));
+		close(fd);
+		return -1;
+	}
+	close(fd);
+
+	return sbuf.st_size;
+}
+
+time_t imagetool_get_source_date(
+	 const char *cmdname,
+	 time_t fallback)
+{
+	char *source_date_epoch = getenv("SOURCE_DATE_EPOCH");
+
+	if (source_date_epoch == NULL)
+		return fallback;
+
+	time_t time = (time_t) strtol(source_date_epoch, NULL, 10);
+
+	if (gmtime(&time) == NULL) {
+		fprintf(stderr, "%s: SOURCE_DATE_EPOCH is not valid\n",
+			cmdname);
+		time = 0;
+	}
+
+	return time;
+}
diff --git a/tools/u-boot-tools/imagetool.h b/tools/u-boot-tools/imagetool.h
new file mode 100644
index 0000000000000000000000000000000000000000..71471420f9c4e218337ff0ee51514607274c816f
--- /dev/null
+++ b/tools/u-boot-tools/imagetool.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2013
+ *
+ * Written by Guilherme Maciel Ferreira <guilherme.maciel.ferreira@gmail.com>
+ */
+
+#ifndef _IMAGETOOL_H_
+#define _IMAGETOOL_H_
+
+#include "os_support.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <u-boot/sha1.h>
+
+#include "fdt_host.h"
+
+#define ARRAY_SIZE(x)		(sizeof(x) / sizeof((x)[0]))
+
+#define IH_ARCH_DEFAULT		IH_ARCH_INVALID
+
+/* Information about a file that needs to be placed into the FIT */
+struct content_info {
+	struct content_info *next;
+	int type;		/* File type (IH_TYPE_...) */
+	const char *fname;
+};
+
+/*
+ * This structure defines all such variables those are initialized by
+ * mkimage and dumpimage main core and need to be referred by image
+ * type specific functions
+ */
+struct image_tool_params {
+	int dflag;
+	int eflag;
+	int fflag;
+	int iflag;
+	int lflag;
+	int pflag;
+	int vflag;
+	int xflag;
+	int skipcpy;
+	int os;
+	int arch;
+	int type;
+	int comp;
+	char *dtc;
+	unsigned int addr;
+	unsigned int ep;
+	char *imagename;
+	char *imagename2;
+	char *datafile;
+	char *imagefile;
+	char *cmdname;
+	const char *outfile;	/* Output filename */
+	const char *keydir;	/* Directory holding private keys */
+	const char *keydest;	/* Destination .dtb for public key */
+	const char *comment;	/* Comment to add to signature node */
+	int require_keys;	/* 1 to mark signing keys as 'required' */
+	int file_size;		/* Total size of output file */
+	int orig_file_size;	/* Original size for file before padding */
+	bool auto_its;		/* Automatically create the .its file */
+	int fit_image_type;	/* Image type to put into the FIT */
+	char *fit_ramdisk;	/* Ramdisk file to include */
+	struct content_info *content_head;	/* List of files to include */
+	struct content_info *content_tail;
+	bool external_data;	/* Store data outside the FIT */
+	bool quiet;		/* Don't output text in normal operation */
+	unsigned int external_offset;	/* Add padding to external data */
+	const char *engine_id;	/* Engine to use for signing */
+};
+
+/*
+ * image type specific variables and callback functions
+ */
+struct image_type_params {
+	/* name is an identification tag string for added support */
+	char *name;
+	/*
+	 * header size is local to the specific image type to be supported,
+	 * mkimage core treats this as number of bytes
+	 */
+	uint32_t header_size;
+	/* Image type header pointer */
+	void *hdr;
+	/*
+	 * There are several arguments that are passed on the command line
+	 * and are registered as flags in image_tool_params structure.
+	 * This callback function can be used to check the passed arguments
+	 * are in-lined with the image type to be supported
+	 *
+	 * Returns 1 if parameter check is successful
+	 */
+	int (*check_params) (struct image_tool_params *);
+	/*
+	 * This function is used by list command (i.e. mkimage -l <filename>)
+	 * image type verification code must be put here
+	 *
+	 * Returns 0 if image header verification is successful
+	 * otherwise, returns respective negative error codes
+	 */
+	int (*verify_header) (unsigned char *, int, struct image_tool_params *);
+	/* Prints image information abstracting from image header */
+	void (*print_header) (const void *);
+	/*
+	 * The header or image contents need to be set as per image type to
+	 * be generated using this callback function.
+	 * further output file post processing (for ex. checksum calculation,
+	 * padding bytes etc..) can also be done in this callback function.
+	 */
+	void (*set_header) (void *, struct stat *, int,
+					struct image_tool_params *);
+	/*
+	 * This function is used by the command to retrieve a component
+	 * (sub-image) from the image (i.e. dumpimage -i <image> -p <position>
+	 * <sub-image-name>).
+	 * Thus the code to extract a file from an image must be put here.
+	 *
+	 * Returns 0 if the file was successfully retrieved from the image,
+	 * or a negative value on error.
+	 */
+	int (*extract_subimage)(void *, struct image_tool_params *);
+	/*
+	 * Some image generation support for ex (default image type) supports
+	 * more than one type_ids, this callback function is used to check
+	 * whether input (-T <image_type>) is supported by registered image
+	 * generation/list low level code
+	 */
+	int (*check_image_type) (uint8_t);
+	/* This callback function will be executed if fflag is defined */
+	int (*fflag_handle) (struct image_tool_params *);
+	/*
+	 * This callback function will be executed for variable size record
+	 * It is expected to build this header in memory and return its length
+	 * and a pointer to it by using image_type_params.header_size and
+	 * image_type_params.hdr. The return value shall indicate if an
+	 * additional padding should be used when copying the data image
+	 * by returning the padding length.
+	 */
+	int (*vrec_header) (struct image_tool_params *,
+		struct image_type_params *);
+};
+
+/**
+ * imagetool_get_type() - find the image type params for a given image type
+ *
+ * It scans all registers image type supports
+ * checks the input type for each supported image type
+ *
+ * if successful,
+ *     returns respective image_type_params pointer if success
+ * if input type_id is not supported by any of image_type_support
+ *     returns NULL
+ */
+struct image_type_params *imagetool_get_type(int type);
+
+/*
+ * imagetool_verify_print_header() - verifies the image header
+ *
+ * Scan registered image types and verify the image_header for each
+ * supported image type. If verification is successful, this prints
+ * the respective header.
+ *
+ * @return 0 on success, negative if input image format does not match with
+ * any of supported image types
+ */
+int imagetool_verify_print_header(
+	void *ptr,
+	struct stat *sbuf,
+	struct image_type_params *tparams,
+	struct image_tool_params *params);
+
+/**
+ * imagetool_save_subimage - store data into a file
+ * @file_name: name of the destination file
+ * @file_data: data to be written
+ * @file_len: the amount of data to store
+ *
+ * imagetool_save_subimage() store file_len bytes of data pointed by file_data
+ * into the file name by file_name.
+ *
+ * returns:
+ *     zero in case of success or a negative value if fail.
+ */
+int imagetool_save_subimage(
+	const char *file_name,
+	ulong file_data,
+	ulong file_len);
+
+/**
+ * imagetool_get_filesize() - Utility function to obtain the size of a file
+ *
+ * This function prints a message if an error occurs, showing the error that
+ * was obtained.
+ *
+ * @params:	mkimage parameters
+ * @fname:	filename to check
+ * @return size of file, or -ve value on error
+ */
+int imagetool_get_filesize(struct image_tool_params *params, const char *fname);
+
+/**
+ * imagetool_get_source_date() - Get timestamp for build output.
+ *
+ * Gets a timestamp for embedding it in a build output. If set
+ * SOURCE_DATE_EPOCH is used. Else the given fallback value is returned. Prints
+ * an error message if SOURCE_DATE_EPOCH contains an invalid value and returns
+ * 0.
+ *
+ * @cmdname:	command name
+ * @fallback:	timestamp to use if SOURCE_DATE_EPOCH isn't set
+ * @return timestamp based on SOURCE_DATE_EPOCH
+ */
+time_t imagetool_get_source_date(
+	const char *cmdname,
+	time_t fallback);
+
+/*
+ * There is a c file associated with supported image type low level code
+ * for ex. default_image.c, fit_image.c
+ */
+
+
+void pbl_load_uboot(int fd, struct image_tool_params *mparams);
+int zynqmpbif_copy_image(int fd, struct image_tool_params *mparams);
+int imx8image_copy_image(int fd, struct image_tool_params *mparams);
+int imx8mimage_copy_image(int fd, struct image_tool_params *mparams);
+
+#define ___cat(a, b) a ## b
+#define __cat(a, b) ___cat(a, b)
+
+/* we need some special handling for this host tool running eventually on
+ * Darwin. The Mach-O section handling is a bit different than ELF section
+ * handling. The differnces in detail are:
+ *  a) we have segments which have sections
+ *  b) we need a API call to get the respective section symbols */
+#if defined(__MACH__)
+#include <mach-o/getsect.h>
+
+#define INIT_SECTION(name)  do {					\
+		unsigned long name ## _len;				\
+		char *__cat(pstart_, name) = getsectdata("__TEXT",	\
+			#name, &__cat(name, _len));			\
+		char *__cat(pstop_, name) = __cat(pstart_, name) +	\
+			__cat(name, _len);				\
+		__cat(__start_, name) = (void *)__cat(pstart_, name);	\
+		__cat(__stop_, name) = (void *)__cat(pstop_, name);	\
+	} while (0)
+#define SECTION(name)   __attribute__((section("__TEXT, " #name)))
+
+struct image_type_params **__start_image_type, **__stop_image_type;
+#else
+#define INIT_SECTION(name) /* no-op for ELF */
+#define SECTION(name)   __attribute__((section(#name)))
+
+/* We construct a table of pointers in an ELF section (pointers generally
+ * go unpadded by gcc).  ld creates boundary syms for us. */
+extern struct image_type_params *__start_image_type[], *__stop_image_type[];
+#endif /* __MACH__ */
+
+#if !defined(__used)
+# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
+#  define __used			__attribute__((__unused__))
+# else
+#  define __used			__attribute__((__used__))
+# endif
+#endif
+
+#define U_BOOT_IMAGE_TYPE( \
+		_id, \
+		_name, \
+		_header_size, \
+		_header, \
+		_check_params, \
+		_verify_header, \
+		_print_header, \
+		_set_header, \
+		_extract_subimage, \
+		_check_image_type, \
+		_fflag_handle, \
+		_vrec_header \
+	) \
+	static struct image_type_params __cat(image_type_, _id) = \
+	{ \
+		.name = _name, \
+		.header_size = _header_size, \
+		.hdr = _header, \
+		.check_params = _check_params, \
+		.verify_header = _verify_header, \
+		.print_header = _print_header, \
+		.set_header = _set_header, \
+		.extract_subimage = _extract_subimage, \
+		.check_image_type = _check_image_type, \
+		.fflag_handle = _fflag_handle, \
+		.vrec_header = _vrec_header \
+	}; \
+	static struct image_type_params *SECTION(image_type) __used \
+		__cat(image_type_ptr_, _id) = &__cat(image_type_, _id)
+
+#endif /* _IMAGETOOL_H_ */
diff --git a/tools/u-boot-tools/imagetool.o b/tools/u-boot-tools/imagetool.o
new file mode 100644
index 0000000000000000000000000000000000000000..e0f874ea0d0516653eac14e7b18f1f94f0d7a6dd
Binary files /dev/null and b/tools/u-boot-tools/imagetool.o differ
diff --git a/tools/u-boot-tools/img2brec.sh b/tools/u-boot-tools/img2brec.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0fcdba27d46892aeb801f86558504bdd49e56a68
--- /dev/null
+++ b/tools/u-boot-tools/img2brec.sh
@@ -0,0 +1,388 @@
+#!/bin/sh
+
+# This script converts binary files (u-boot.bin) into so called
+# bootstrap records that are accepted by Motorola's MC9328MX1/L
+# (a.k.a. DragaonBall i.MX) in "Bootstrap Mode"
+# 
+# The code for the SynchFlash programming routines is taken from
+# Bootloader\Bin\SyncFlash\programBoot_b.txt contained in
+# Motorolas LINUX_BSP_0_3_8.tar.gz 
+# 
+# The script could easily extended for AMD flash routines.
+#
+# 2004-06-23	-	steven.scholz@imc-berlin.de
+
+#################################################################################
+# From the posting to the U-Boot-Users mailing list, 23 Jun 2004:
+# ===============================================================
+# I just hacked a simple script that converts u-boot.bin into a text file 
+# containg processor init code, SynchFlash programming code and U-Boot data in 
+# form of so called b-records.
+# 
+# This can be used to programm U-Boot into (Synch)Flash using the Bootstrap 
+# Mode of the MC9328MX1/L
+# 
+# 0AFE1F3410202E2E2E000000002073756363656564/
+# 0AFE1F44102E0A0000206661696C656420210A0000/
+# 0AFE100000
+# ...
+# MX1ADS Sync-flash Programming Utility v0.5 2002/08/21
+# 
+# Source address (stored in 0x0AFE0000): 0x0A000000
+# Target address (stored in 0x0AFE0004): 0x0C000000
+# Size           (stored in 0x0AFE0008): 0x0001A320
+# 
+# Press any key to start programming ...
+# Erasing ...
+# Blank checking ...
+# Programming ...
+# Verifying flash ... succeed.
+# 
+# Programming finished.
+# 
+# So no need for a BDI2000 anymore... ;-)
+# 
+# This is working on my MX1ADS eval board. Hope this could be useful for 
+# someone.
+#################################################################################
+
+if [ "$#" -lt 1 -o "$#" -gt 2 ] ; then
+    echo "Usage: $0 infile [outfile]" >&2
+    echo "       $0 u-boot.bin [u-boot.brec]" >&2
+    exit 1
+fi
+
+if [ "$#" -ge 1 ] ; then
+    INFILE=$1
+fi
+
+if [ ! -f $INFILE ] ; then
+    echo "Error: file '$INFILE' does not exist." >&2
+    exit 1
+fi
+
+FILESIZE=`filesize $INFILE`
+
+output_init()
+{
+echo "\
+********************************************
+* Initialize I/O Pad Driving Strength      *
+********************************************
+0021B80CC4000003AB
+********************************************
+* Initialize SDRAM                         *
+********************************************
+00221000C492120200   ; pre-charge command
+08200000E4   ; special read
+
+00221000C4A2120200   ; auto-refresh command
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+08000000E4   ; 8 special read
+
+00221000C4B2120200   ; set mode register
+08111800E4   ; special read
+
+00221000C482124200   ; set normal mode
+
"
+}
+
+output_uboot()
+{
+echo "\
+********************************************
+* U-Boot image as bootstrap records        *
+*   will be stored in SDRAM at 0x0A000000  *
+********************************************
+
"
+
+cat $INFILE | \
+hexdump -v -e "\"0A0%05.5_ax10\" 16/1 \"%02x\"\"\r\n\"" | \
+tr [:lower:] [:upper:]
+}
+
+output_flashprog()
+{
+echo "\
+********************************************
+* Address of arguments to flashProg        *
+* ---------------------------------------- *
+* Source      : 0x0A000000                 *
+* Destination : 0x0C000000                 *
"
+
+# get the real size of the U-Boot image
+printf "* Size        : 0x%08X                 *\r\n" $FILESIZE
+printf "********************************************\r\n"
+printf "0AFE0000CC0A0000000C000000%08X\r\n" $FILESIZE
+
+#;0AFE0000CC0A0000000C00000000006000
+
+echo "\
+********************************************
+* Flash Program                            *
+********************************************
+0AFE10001008D09FE5AC0000EA00F0A0E1A42DFE0A
+0AFE1010100080FE0A0DC0A0E100D82DE904B04CE2
+0AFE1020109820A0E318309FE5003093E5033082E0
+0AFE103010003093E5013003E2FF3003E20300A0E1
+0AFE10401000A81BE9A01DFE0A0DC0A0E100D82DE9
+0AFE10501004B04CE204D04DE20030A0E10D304BE5
+0AFE1060109820A0E330309FE5003093E5033082E0
+0AFE107010003093E5013903E2000053E3F7FFFF0A
+0AFE1080104020A0E310309FE5003093E5032082E0
+0AFE1090100D305BE5003082E500A81BE9A01DFE0A
+0AFE10A0100DC0A0E100D82DE904B04CE20000A0E1
+0AFE10B010D7FFFFEB0030A0E1FF3003E2000053E3
+0AFE10C010FAFFFF0A10309FE5003093E5003093E5
+0AFE10D010FF3003E20300A0E100A81BE9A01DFE0A
+0AFE10E0100DC0A0E100D82DE904B04CE204D04DE2
+0AFE10F0100030A0E10D304BE50D305BE52332A0E1
+0AFE1100100E304BE50E305BE5090053E30300009A
+0AFE1110100E305BE5373083E20E304BE5020000EA
+0AFE1120100E305BE5303083E20E304BE50E305BE5
+0AFE1130100300A0E1C3FFFFEB0D305BE50F3003E2
+0AFE1140100E304BE50E305BE5090053E30300009A
+0AFE1150100E305BE5373083E20E304BE5020000EA
+0AFE1160100E305BE5303083E20E304BE50E305BE5
+0AFE1170100300A0E1B3FFFFEB00A81BE90DC0A0E1
+0AFE11801000D82DE904B04CE21CD04DE210000BE5
+0AFE11901014100BE518200BE588009FE5E50200EB
+0AFE11A01010301BE51C300BE514301BE520300BE5
+0AFE11B0100030A0E324300BE524201BE518301BE5
+0AFE11C010030052E10000003A120000EA1C004BE2
+0AFE11D010002090E520104BE2003091E500C093E5
+0AFE11E010043083E2003081E5003092E5042082E2
+0AFE11F010002080E50C0053E10200000A0030A0E3
+0AFE12001028300BE5050000EA24301BE5043083E2
+0AFE12101024300BE5E7FFFFEA0130A0E328300BE5
+0AFE12201028001BE500A81BE9E81EFE0A0DC0A0E1
+0AFE12301000D82DE904B04CE214D04DE210000BE5
+0AFE12401014100BE56C009FE5BA0200EB10301BE5
+0AFE12501018300BE50030A0E31C300BE51C201BE5
+0AFE12601014301BE5030052E10000003A0D0000EA
+0AFE12701018304BE2002093E5001092E5042082E2
+0AFE128010002083E5010071E30200000A0030A0E3
+0AFE12901020300BE5050000EA1C301BE5043083E2
+0AFE12A0101C300BE5ECFFFFEA0130A0E320300BE5
+0AFE12B01020001BE500A81BE9001FFE0A0DC0A0E1
+0AFE12C01000D82DE904B04CE224D04DE20130A0E3
+0AFE12D01024300BE5A4229FE58139A0E3023A83E2
+0AFE12E010003082E59820A0E390329FE5003093E5
+0AFE12F010033082E0003093E5023903E2000053E3
+0AFE1300100300001A74229FE58139A0E3033A83E2
+0AFE131010003082E568029FE5860200EBAF36A0E3
+0AFE1320100E3883E2003093E510300BE554029FE5
+0AFE133010800200EB10301BE5233CA0E1FF3003E2
+0AFE1340100300A0E165FFFFEB10301BE52338A0E1
+0AFE135010FF3003E20300A0E160FFFFEB10301BE5
+0AFE1360102334A0E1FF3003E20300A0E15BFFFFEB
+0AFE13701010305BE50300A0E158FFFFEB0A00A0E3
+0AFE13801030FFFFEB0D00A0E32EFFFFEBAF36A0E3
+0AFE1390100E3883E2043083E2003093E514300BE5
+0AFE13A010E4019FE5630200EB14301BE5233CA0E1
+0AFE13B010FF3003E20300A0E148FFFFEB14301BE5
+0AFE13C0102338A0E1FF3003E20300A0E143FFFFEB
+0AFE13D01014301BE52334A0E1FF3003E20300A0E1
+0AFE13E0103EFFFFEB14305BE50300A0E13BFFFFEB
+0AFE13F0100A00A0E313FFFFEB0D00A0E311FFFFEB
+0AFE140010AF36A0E30E3883E2083083E2003093E5
+0AFE14101018300BE574019FE5460200EB18301BE5
+0AFE142010233CA0E1FF3003E20300A0E12BFFFFEB
+0AFE14301018301BE52338A0E1FF3003E20300A0E1
+0AFE14401026FFFFEB18301BE52334A0E1FF3003E2
+0AFE1450100300A0E121FFFFEB18305BE50300A0E1
+0AFE1460101EFFFFEB0A00A0E3F6FEFFEB0D00A0E3
+0AFE147010F4FEFFEBE6FEFFEB0030A0E1FF3003E2
+0AFE148010000053E30000001A020000EA03FFFFEB
+0AFE1490102D004BE5F6FFFFEAF4009FE5250200EB
+0AFE14A010FEFEFFEB2D004BE5CD0000EBC00000EB
+0AFE14B010E0009FE51F0200EB18301BE528300BE5
+0AFE14C01014301BE52C300BE52C001BE5100100EB
+0AFE14D01028301BE5013643E228300BE52C301BE5
+0AFE14E010013683E22C300BE528301BE5000053E3
+0AFE14F010F4FFFFCAAE0000EB14001BE518101BE5
+0AFE15001049FFFFEB0030A0E1FF3003E2000053E3
+0AFE151010E6FFFF0A80009FE5060200EB10001BE5
+0AFE15201014101BE518201BE5D00000EB10001BE5
+0AFE15301014101BE518201BE50FFFFFEB0030A0E1
+0AFE154010FF3003E2000053E30200000A4C009FE5
+0AFE155010F80100EB010000EA44009FE5F50100EB
+0AFE156010930000EB3C009FE5F20100EB0000A0E3
+0AFE157010A4FEFFEB0030A0E30300A0E100A81BE9
+0AFE158010A01DFE0AA41DFE0AE01DFE0A0C1EFE0A
+0AFE159010381EFE0A641EFE0A181FFE0A281FFE0A
+0AFE15A0103C1FFE0A481FFE0AB41EFE0A0DC0A0E1
+0AFE15B01000D82DE904B04CE204D04DE210000BE5
+0AFE15C01010301BE5013043E210300BE5010073E3
+0AFE15D010FAFFFF1A00A81BE90DC0A0E100D82DE9
+0AFE15E01004B04CE208D04DE210000BE510301BE5
+0AFE15F01014300BE514301BE50300A0E100A81BE9
+0AFE1600100DC0A0E100D82DE904B04CE204D04DE2
+0AFE1610102228A0E3012A82E2042082E2E134A0E3
+0AFE162010023883E2033C83E2003082E50333A0E3
+0AFE163010053983E2003093E510300BE500A81BE9
+0AFE1640100DC0A0E100D82DE904B04CE204D04DE2
+0AFE1650102228A0E3012A82E2042082E29134A0E3
+0AFE166010023883E2033C83E2003082E5C136A0E3
+0AFE167010003093E510300BE52228A0E3012A82E2
+0AFE168010042082E2E134A0E3023883E2033C83E2
+0AFE169010003082E50333A0E3073983E20020A0E3
+0AFE16A010002083E52228A0E3012A82E2042082E2
+0AFE16B0108134A0E3023883E2033C83E2003082E5
+0AFE16C0100333A0E3003093E510300BE5CBFFFFEB
+0AFE16D01010301BE50300A0E100A81BE90DC0A0E1
+0AFE16E01000D82DE904B04CE208D04DE2D3FFFFEB
+0AFE16F0100030A0E110300BE510301BE5023503E2
+0AFE170010000053E30500000A10301BE5073703E2
+0AFE171010000053E30100000A10001BE5ADFFFFEB
+0AFE17201010301BE5803003E2000053E30500000A
+0AFE17301010301BE51C3003E2000053E30100000A
+0AFE17401010001BE5A3FFFFEB10201BE50235A0E3
+0AFE175010803083E2030052E10200001A0130A0E3
+0AFE17601014300BE5010000EA0030A0E314300BE5
+0AFE17701014001BE500A81BE90DC0A0E100D82DE9
+0AFE17801004B04CE204D04DE22228A0E3012A82E2
+0AFE179010042082E29134A0E3023883E2033C83E2
+0AFE17A010003082E5C136A0E3003093E510300BE5
+0AFE17B01000A81BE90DC0A0E100D82DE904B04CE2
+0AFE17C010ECFFFFEB2228A0E3012A82E2042082E2
+0AFE17D0108134A0E3023883E2033C83E2003082E5
+0AFE17E01000A81BE90DC0A0E100D82DE904B04CE2
+0AFE17F01004D04DE22228A0E3012A82E2042082E2
+0AFE1800102238A0E3013A83E2043083E2003093E5
+0AFE181010023183E3003082E52228A0E3012A82E2
+0AFE1820102238A0E3013A83E2003093E5023183E3
+0AFE183010003082E5FA0FA0E35BFFFFEB2228A0E3
+0AFE184010012A82E2042082E2B134A0E3023883E2
+0AFE185010033C83E2003082E50333A0E3233983E2
+0AFE186010033B83E2003093E510300BE500A81BE9
+0AFE1870100DC0A0E100D82DE904B04CE21CD04DE2
+0AFE18801010000BE514100BE518200BE50030A0E3
+0AFE1890101C300BE51C201BE518301BE5030052E1
+0AFE18A0100000003A190000EAB2FFFFEB2228A0E3
+0AFE18B010012A82E2042082E2F134A0E3023883E2
+0AFE18C010033C83E2003082E514201BE51C301BE5
+0AFE18D010031082E010201BE51C301BE5033082E0
+0AFE18E010003093E5003081E57BFFFFEB0030A0E1
+0AFE18F010FF3003E2000053E3FAFFFF0AACFFFFEB
+0AFE1900101C301BE5043083E21C300BE5E0FFFFEA
+0AFE19101000A81BE90DC0A0E100D82DE904B04CE2
+0AFE1920100CD04DE210000BE52228A0E3012A82E2
+0AFE193010042082E28134A0E3023883E2033C83E2
+0AFE194010003082E510301BE5003093E514300BE5
+0AFE1950102228A0E3012A82E2042082E29134A0E3
+0AFE196010023883E2033C83E2003082E510301BE5
+0AFE197010003093E518300BE52228A0E3012A82E2
+0AFE198010042082E2E134A0E3023883E2033C83E2
+0AFE199010003082E50229A0E310301BE5032082E0
+0AFE19A0100030A0E3003082E52228A0E3012A82E2
+0AFE19B010042082E28134A0E3023883E2033C83E2
+0AFE19C010003082E510201BE50D3AA0E3D03083E2
+0AFE19D010033883E1003082E53FFFFFEB0030A0E1
+0AFE19E010FF3003E2000053E3FAFFFF0A70FFFFEB
+0AFE19F01000A81BE90DC0A0E100D82DE904B04CE2
+0AFE1A00105CFFFFEB2228A0E3012A82E2042082E2
+0AFE1A1010E134A0E3023883E2033C83E2003082E5
+0AFE1A20100333A0E3033983E20020A0E3002083E5
+0AFE1A30102228A0E3012A82E2042082E28134A0E3
+0AFE1A4010023883E2033C83E2003082E50323A0E3
+0AFE1A5010032982E20339A0E3C03083E2033883E1
+0AFE1A6010003082E500A81BE90DC0A0E100D82DE9
+0AFE1A701004B04CE23FFFFFEB2228A0E3012A82E2
+0AFE1A8010042082E2E134A0E3023883E2033C83E2
+0AFE1A9010003082E50333A0E30A3983E20020A0E3
+0AFE1AA010002083E52228A0E3012A82E2042082E2
+0AFE1AB0108134A0E3023883E2033C83E2003082E5
+0AFE1AC0100323A0E30A2982E20339A0E3C03083E2
+0AFE1AD010033883E1003082E500A81BE90DC0A0E1
+0AFE1AE01000D82DE904B04CE28729A0E3222E82E2
+0AFE1AF0108739A0E3223E83E2003093E51E3CC3E3
+0AFE1B0010003082E58729A0E38E2F82E28739A0E3
+0AFE1B10108E3F83E2003093E51E3CC3E3003082E5
+0AFE1B20108139A0E3823D83E20520A0E3002083E5
+0AFE1B30108129A0E3822D82E2042082E20139A0E3
+0AFE1B4010273083E2003082E58139A0E3823D83E2
+0AFE1B50100C3083E20120A0E3002083E58129A0E3
+0AFE1B6010822D82E2102082E22A3DA0E3013083E2
+0AFE1B7010003082E58139A0E3823D83E2243083E2
+0AFE1B80100F20A0E3002083E58139A0E3823D83E2
+0AFE1B9010283083E28A20A0E3002083E58139A0E3
+0AFE1BA010823D83E22C3083E20820A0E3002083E5
+0AFE1BB01000A81BE90DC0A0E100D82DE904B04CE2
+0AFE1BC0108139A0E3823D83E2183083E2003093E5
+0AFE1BD010013003E2FF3003E20300A0E100A81BE9
+0AFE1BE0100DC0A0E100D82DE904B04CE204D04DE2
+0AFE1BF0100030A0E10D304BE58139A0E3823D83E2
+0AFE1C0010183083E2003093E5013903E2000053E3
+0AFE1C1010F8FFFF0A8139A0E3813D83E20D205BE5
+0AFE1C2010002083E50D305BE50A0053E30A00001A
+0AFE1C30108139A0E3823D83E2183083E2003093E5
+0AFE1C4010013903E2000053E3F8FFFF0A8139A0E3
+0AFE1C5010813D83E20D20A0E3002083E500A81BE9
+0AFE1C60100DC0A0E100D82DE904B04CE20000A0E1
+0AFE1C7010CFFFFFEB0030A0E1FF3003E2000053E3
+0AFE1C8010FAFFFF0A8139A0E3023A83E2003093E5
+0AFE1C9010FF3003E20300A0E100A81BE90DC0A0E1
+0AFE1CA01000D82DE904B04CE204D04DE20030A0E1
+0AFE1CB0100D304BE50D305BE52332A0E10E304BE5
+0AFE1CC0100E305BE5090053E30300009A0E305BE5
+0AFE1CD010373083E20E304BE5020000EA0E305BE5
+0AFE1CE010303083E20E304BE50E305BE50300A0E1
+0AFE1CF010BAFFFFEB0D305BE50F3003E20E304BE5
+0AFE1D00100E305BE5090053E30300009A0E305BE5
+0AFE1D1010373083E20E304BE5020000EA0E305BE5
+0AFE1D2010303083E20E304BE50E305BE50300A0E1
+0AFE1D3010AAFFFFEB00A81BE90DC0A0E100D82DE9
+0AFE1D401004B04CE204D04DE210000BE510301BE5
+0AFE1D50100030D3E5000053E30000001A080000EA
+0AFE1D601010104BE2003091E50320A0E10020D2E5
+0AFE1D7010013083E2003081E50200A0E197FFFFEB
+0AFE1D8008F1FFFFEA00A81BE9
+0AFE1DA4100A0D4D58314144532053796E632D666C
+0AFE1DB4106173682050726F6772616D6D696E6720
+0AFE1DC4105574696C6974792076302E3520323030
+0AFE1DD410322F30382F32310A0D000000536F7572
+0AFE1DE41063652061646472657373202873746F72
+0AFE1DF410656420696E2030783041464530303030
+0AFE1E0410293A2030780000005461726765742061
+0AFE1E1410646472657373202873746F7265642069
+0AFE1E24106E2030783041464530303034293A2030
+0AFE1E34107800000053697A652020202020202020
+0AFE1E44102020202873746F72656420696E203078
+0AFE1E54103041464530303038293A203078000000
+0AFE1E6410507265737320616E79206B657920746F
+0AFE1E74102073746172742070726F6772616D6D69
+0AFE1E84106E67202E2E2E00000A0D45726173696E
+0AFE1E94106720666C617368202E2E2E000A0D5072
+0AFE1EA4106F6772616D6D696E67202E2E2E000000
+0AFE1EB4100A0D50726F6772616D6D696E67206669
+0AFE1EC4106E69736865642E0A0D50726573732027
+0AFE1ED410612720746F20636F6E74696E7565202E
+0AFE1EE4102E2E2E000A0D566572696679696E6720
+0AFE1EF410666C617368202E2E2E0000000A0D426C
+0AFE1F0410616E6B20636865636B696E67202E2E2E
+0AFE1F1410000000000A45726173696E67202E2E2E
+0AFE1F2410000000000A50726F6772616D6D696E67
+0AFE1F3410202E2E2E000000002073756363656564
+0AFE1F44102E0A0000206661696C656420210A0000
+0AFE100000
+
"
+}
+
+#########################################################
+
+if [ "$#" -eq 2 ] ; then
+    output_init > $2
+    output_uboot >> $2
+    output_flashprog >> $2
+else
+    output_init;
+    output_uboot;
+    output_flashprog;
+fi
diff --git a/tools/u-boot-tools/img2srec b/tools/u-boot-tools/img2srec
new file mode 100755
index 0000000000000000000000000000000000000000..c5b3dbf1f3ab3770fd5ca4d8a17251e8dffbced2
Binary files /dev/null and b/tools/u-boot-tools/img2srec differ
diff --git a/tools/u-boot-tools/img2srec.c b/tools/u-boot-tools/img2srec.c
new file mode 100644
index 0000000000000000000000000000000000000000..75efd76e0e3fe9758b83fc1946729fd2c122ab05
--- /dev/null
+++ b/tools/u-boot-tools/img2srec.c
@@ -0,0 +1,372 @@
+/*************************************************************************
+|  COPYRIGHT (c) 2000 BY ABATRON AG
+|*************************************************************************
+|
+|  PROJECT NAME: Linux Image to S-record Conversion Utility
+|  FILENAME    : img2srec.c
+|
+|  COMPILER    : GCC
+|
+|  TARGET OS   : LINUX / UNIX
+|  TARGET HW   : -
+|
+|  PROGRAMMER  : Abatron / RD
+|  CREATION    : 07.07.00
+|
+|*************************************************************************
+|
+|  DESCRIPTION :
+|
+|  Utility to convert a Linux Boot Image to S-record:
+|  ==================================================
+|
+|  This command line utility can be used to convert a Linux boot image
+|  (zimage.initrd) to S-Record format used for flash programming.
+|  This conversion takes care of the special sections "IMAGE" and INITRD".
+|
+|  img2srec [-o offset] image > image.srec
+|
+|
+|  Build the utility:
+|  ==================
+|
+|  To build the utility use GCC as follows:
+|
+|  gcc img2srec.c -o img2srec
+|
+|
+|*************************************************************************
+|
+|
+|  UPDATES     :
+|
+|  DATE      NAME  CHANGES
+|  -----------------------------------------------------------
+|  Latest update
+|
+|  07.07.00  aba   Initial release
+|
+|*************************************************************************/
+
+/*************************************************************************
+|  INCLUDES
+|*************************************************************************/
+
+#include "os_support.h"
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <elf.h>
+#include <unistd.h>
+#include <errno.h>
+
+/*************************************************************************
+|  FUNCTIONS
+|*************************************************************************/
+
+static char* ExtractHex (uint32_t* value,  char* getPtr)
+{
+  uint32_t num;
+  uint32_t digit;
+  uint8_t  c;
+
+  while (*getPtr == ' ') getPtr++;
+  num = 0;
+  for (;;) {
+    c = *getPtr;
+    if      ((c >= '0') && (c <= '9')) digit = (uint32_t)(c - '0');
+    else if ((c >= 'A') && (c <= 'F')) digit = (uint32_t)(c - 'A' + 10);
+    else if ((c >= 'a') && (c <= 'f')) digit = (uint32_t)(c - 'a' + 10);
+    else break;
+    num <<= 4;
+    num += digit;
+    getPtr++;
+  } /* for */
+  *value = num;
+  return getPtr;
+} /* ExtractHex */
+
+static char* ExtractDecimal (uint32_t* value,  char* getPtr)
+{
+  uint32_t num;
+  uint32_t digit;
+  uint8_t  c;
+
+  while (*getPtr == ' ') getPtr++;
+  num = 0;
+  for (;;) {
+    c = *getPtr;
+    if      ((c >= '0') && (c <= '9')) digit = (uint32_t)(c - '0');
+    else break;
+    num *= 10;
+    num += digit;
+    getPtr++;
+  } /* for */
+  *value = num;
+  return getPtr;
+} /* ExtractDecimal */
+
+
+static void ExtractNumber (uint32_t* value,  char* getPtr)
+{
+  bool  neg = false;
+
+  while (*getPtr == ' ') getPtr++;
+  if (*getPtr == '-') {
+    neg = true;
+    getPtr++;
+  } /* if */
+  if ((*getPtr == '0') && ((*(getPtr+1) == 'x') || (*(getPtr+1) == 'X'))) {
+    getPtr +=2;
+    (void)ExtractHex(value, getPtr);
+  } /* if */
+  else {
+    (void)ExtractDecimal(value, getPtr);
+  } /* else */
+  if (neg) *value = -(*value);
+} /* ExtractNumber */
+
+
+static uint8_t* ExtractWord(uint16_t* value, uint8_t* buffer)
+{
+  uint16_t x;
+  x = (uint16_t)*buffer++;
+  x = (x<<8) + (uint16_t)*buffer++;
+  *value = x;
+  return buffer;
+} /* ExtractWord */
+
+
+static uint8_t* ExtractLong(uint32_t* value, uint8_t* buffer)
+{
+  uint32_t x;
+  x = (uint32_t)*buffer++;
+  x = (x<<8) + (uint32_t)*buffer++;
+  x = (x<<8) + (uint32_t)*buffer++;
+  x = (x<<8) + (uint32_t)*buffer++;
+  *value = x;
+  return buffer;
+} /* ExtractLong */
+
+
+static uint8_t* ExtractBlock(uint16_t count, uint8_t* data, uint8_t* buffer)
+{
+  while (count--) *data++ = *buffer++;
+  return buffer;
+} /* ExtractBlock */
+
+
+static char* WriteHex(char* pa, uint8_t value, uint16_t* pCheckSum)
+{
+  uint16_t  temp;
+
+  static  char ByteToHex[] = "0123456789ABCDEF";
+
+  *pCheckSum += value;
+  temp  = value / 16;
+  *pa++ = ByteToHex[temp];
+  temp  = value % 16;
+  *pa++ = ByteToHex[temp];
+  return pa;
+}
+
+
+static char* BuildSRecord(char* pa, uint16_t sType, uint32_t addr,
+			  const uint8_t* data, int nCount)
+{
+  uint16_t  addrLen;
+  uint16_t  sRLen;
+  uint16_t  checkSum;
+  uint16_t  i;
+
+  switch (sType) {
+  case 0:
+  case 1:
+  case 9:
+    addrLen = 2;
+    break;
+  case 2:
+  case 8:
+    addrLen = 3;
+    break;
+  case 3:
+  case 7:
+    addrLen = 4;
+    break;
+  default:
+    return pa;
+  } /* switch */
+
+  *pa++ = 'S';
+  *pa++ = (char)(sType + '0');
+  sRLen = addrLen + nCount + 1;
+  checkSum = 0;
+  pa = WriteHex(pa, (uint8_t)sRLen, &checkSum);
+
+  /* Write address field */
+  for (i = 1; i <= addrLen; i++) {
+    pa = WriteHex(pa, (uint8_t)(addr >> (8 * (addrLen - i))), &checkSum);
+  } /* for */
+
+  /* Write code/data fields */
+  for (i = 0; i < nCount; i++) {
+    pa = WriteHex(pa, *data++, &checkSum);
+  } /* for */
+
+  /* Write checksum field */
+  checkSum = ~checkSum;
+  pa = WriteHex(pa, (uint8_t)checkSum, &checkSum);
+  *pa++ = '\0';
+  return pa;
+}
+
+
+static void ConvertELF(char* fileName, uint32_t loadOffset)
+{
+  FILE*         file;
+  int           i;
+  int           rxCount;
+  uint8_t          rxBlock[1024];
+  uint32_t         loadSize;
+  uint32_t         firstAddr;
+  uint32_t         loadAddr;
+  uint32_t         loadDiff = 0;
+  Elf32_Ehdr    elfHeader;
+  Elf32_Shdr    sectHeader[32];
+  uint8_t*         getPtr;
+  char          srecLine[128];
+  char		*hdr_name;
+
+
+  /* open file */
+  if ((file = fopen(fileName,"rb")) == NULL) {
+    fprintf (stderr, "Can't open %s: %s\n", fileName, strerror(errno));
+    return;
+  } /* if */
+
+  /* read ELF header */
+  rxCount = fread(rxBlock, 1, sizeof elfHeader, file);
+  getPtr = ExtractBlock(sizeof elfHeader.e_ident, elfHeader.e_ident, rxBlock);
+  getPtr = ExtractWord(&elfHeader.e_type, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_machine, getPtr);
+  getPtr = ExtractLong((uint32_t *)&elfHeader.e_version, getPtr);
+  getPtr = ExtractLong((uint32_t *)&elfHeader.e_entry, getPtr);
+  getPtr = ExtractLong((uint32_t *)&elfHeader.e_phoff, getPtr);
+  getPtr = ExtractLong((uint32_t *)&elfHeader.e_shoff, getPtr);
+  getPtr = ExtractLong((uint32_t *)&elfHeader.e_flags, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_ehsize, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_phentsize, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_phnum, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_shentsize, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_shnum, getPtr);
+  getPtr = ExtractWord(&elfHeader.e_shstrndx, getPtr);
+  if (    (rxCount              != sizeof elfHeader)
+       || (elfHeader.e_ident[0] != ELFMAG0)
+       || (elfHeader.e_ident[1] != ELFMAG1)
+       || (elfHeader.e_ident[2] != ELFMAG2)
+       || (elfHeader.e_ident[3] != ELFMAG3)
+       || (elfHeader.e_type     != ET_EXEC)
+     ) {
+    fclose(file);
+    fprintf (stderr, "*** illegal file format\n");
+    return;
+  } /* if */
+
+  /* read all section headers */
+  fseek(file, elfHeader.e_shoff, SEEK_SET);
+  for (i = 0; i < elfHeader.e_shnum; i++) {
+    rxCount = fread(rxBlock, 1, sizeof sectHeader[0], file);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_name, rxBlock);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_type, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_flags, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_addr, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_offset, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_size, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_link, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_info, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_addralign, getPtr);
+    getPtr = ExtractLong((uint32_t *)&sectHeader[i].sh_entsize, getPtr);
+    if (rxCount != sizeof sectHeader[0]) {
+      fclose(file);
+      fprintf (stderr, "*** illegal file format\n");
+      return;
+    } /* if */
+  } /* for */
+
+  if ((hdr_name = strrchr(fileName, '/')) == NULL) {
+    hdr_name = fileName;
+  } else {
+    ++hdr_name;
+  }
+  /* write start record */
+  (void)BuildSRecord(srecLine, 0, 0, (uint8_t *)hdr_name, strlen(hdr_name));
+  printf("%s\r\n",srecLine);
+
+  /* write data records */
+  firstAddr = ~0;
+  loadAddr  =  0;
+  for (i = 0; i < elfHeader.e_shnum; i++) {
+    if (    (sectHeader[i].sh_type == SHT_PROGBITS)
+	 && (sectHeader[i].sh_size != 0)
+	 ) {
+      loadSize = sectHeader[i].sh_size;
+      if (sectHeader[i].sh_flags != 0) {
+	loadAddr = sectHeader[i].sh_addr;
+	loadDiff = loadAddr - sectHeader[i].sh_offset;
+      } /* if */
+      else {
+	loadAddr = sectHeader[i].sh_offset + loadDiff;
+      } /* else */
+
+      if (loadAddr < firstAddr)
+	firstAddr = loadAddr;
+
+      /* build s-records */
+      loadSize = sectHeader[i].sh_size;
+      fseek(file, sectHeader[i].sh_offset, SEEK_SET);
+      while (loadSize) {
+	rxCount = fread(rxBlock, 1, (loadSize > 32) ? 32 : loadSize, file);
+	if (rxCount < 0) {
+	  fclose(file);
+	  fprintf (stderr, "*** illegal file format\n");
+	return;
+	} /* if */
+	(void)BuildSRecord(srecLine, 3, loadAddr + loadOffset, rxBlock, rxCount);
+	loadSize -= rxCount;
+	loadAddr += rxCount;
+	printf("%s\r\n",srecLine);
+      } /* while */
+    } /* if */
+  } /* for */
+
+  /* add end record */
+  (void)BuildSRecord(srecLine, 7, firstAddr + loadOffset, 0, 0);
+  printf("%s\r\n",srecLine);
+  fclose(file);
+} /* ConvertELF */
+
+
+/*************************************************************************
+|  MAIN
+|*************************************************************************/
+
+int main( int argc, char *argv[ ])
+{
+  uint32_t offset;
+
+  if (argc == 2) {
+    ConvertELF(argv[1], 0);
+  } /* if */
+  else if ((argc == 4) && (strcmp(argv[1], "-o") == 0)) {
+    ExtractNumber(&offset, argv[2]);
+    ConvertELF(argv[3], offset);
+  } /* if */
+  else {
+    fprintf (stderr, "Usage: img2srec [-o offset] <image>\n");
+  } /* if */
+
+  return 0;
+} /* main */
diff --git a/tools/u-boot-tools/imx8image.c b/tools/u-boot-tools/imx8image.c
new file mode 100644
index 0000000000000000000000000000000000000000..0d856b9d94f967d8549589d21302e13ad221497f
--- /dev/null
+++ b/tools/u-boot-tools/imx8image.c
@@ -0,0 +1,992 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include "imx8image.h"
+
+static int p_idx;
+static int sector_size;
+static soc_type_t soc;
+static int container = -1;
+static int32_t core_type = CFG_CORE_INVALID;
+static bool emmc_fastboot;
+static image_t param_stack[IMG_STACK_SIZE];
+static uint8_t fuse_version;
+static uint16_t sw_version;
+static uint32_t custom_partition;
+static uint32_t scfw_flags;
+
+int imx8image_check_params(struct image_tool_params *params)
+{
+	return 0;
+}
+
+static void imx8image_set_header(void *ptr, struct stat *sbuf, int ifd,
+				 struct image_tool_params *params)
+{
+}
+
+static void imx8image_print_header(const void *ptr)
+{
+}
+
+static int imx8image_check_image_types(uint8_t type)
+{
+	return (type == IH_TYPE_IMX8IMAGE) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+static table_entry_t imx8image_cmds[] = {
+	{CMD_BOOT_FROM,         "BOOT_FROM",            "boot command",	      },
+	{CMD_FUSE_VERSION,      "FUSE_VERSION",         "fuse version",	      },
+	{CMD_SW_VERSION,        "SW_VERSION",           "sw version",	      },
+	{CMD_MSG_BLOCK,         "MSG_BLOCK",            "msg block",	      },
+	{CMD_FILEOFF,           "FILEOFF",              "fileoff",	      },
+	{CMD_FLAG,              "FLAG",                 "flag",	      },
+	{CMD_APPEND,            "APPEND",               "append a container", },
+	{CMD_PARTITION,         "PARTITION",            "new partition",      },
+	{CMD_SOC_TYPE,          "SOC_TYPE",             "soc type",           },
+	{CMD_CONTAINER,         "CONTAINER",            "new container",      },
+	{CMD_IMAGE,             "IMAGE",                "new image",          },
+	{CMD_DATA,              "DATA",                 "new data",           },
+	{-1,                    "",                     "",	              },
+};
+
+static table_entry_t imx8image_core_entries[] = {
+	{CFG_SCU,	"SCU",			"scu core",	},
+	{CFG_M40,	"M40",			"M4 core 0",	},
+	{CFG_M41,	"M41",			"M4 core 1",	},
+	{CFG_A35,	"A35",			"A35 core",	},
+	{CFG_A53,	"A53",			"A53 core",	},
+	{CFG_A72,	"A72",			"A72 core",	},
+	{-1,		"",			"",		},
+};
+
+static table_entry_t imx8image_sector_size[] = {
+	{0x400,		"sd",			"sd/emmc",},
+	{0x400,		"emmc_fastboot",	"emmc fastboot",},
+	{0x400,		"fspi",			"flexspi",	},
+	{0x1000,	"nand_4k",		"nand 4K",	},
+	{0x2000,	"nand_8k",		"nand 8K",	},
+	{0x4000,	"nand_16k",		"nand 16K",	},
+	{-1,		"",			"Invalid",	},
+};
+
+static void parse_cfg_cmd(image_t *param_stack, int32_t cmd, char *token,
+			  char *name, int lineno)
+{
+	switch (cmd) {
+	case CMD_BOOT_FROM:
+		sector_size = get_table_entry_id(imx8image_sector_size,
+						 "imximage boot option",
+						 token);
+		if (!strncmp("emmc_fastboot", token, 13))
+			emmc_fastboot = true;
+		break;
+	case CMD_FUSE_VERSION:
+		fuse_version = (uint8_t)(strtoll(token, NULL, 0) & 0xFF);
+		break;
+	case CMD_SW_VERSION:
+		sw_version = (uint8_t)(strtoll(token, NULL, 0) & 0xFFFF);
+		break;
+	case CMD_FILEOFF:
+		param_stack[p_idx].option = FILEOFF;
+		param_stack[p_idx++].dst = (uint32_t)strtoll(token, NULL, 0);
+		break;
+	case CMD_MSG_BLOCK:
+		param_stack[p_idx].option = MSG_BLOCK;
+		param_stack[p_idx].filename = token;
+		break;
+	case CMD_FLAG:
+		param_stack[p_idx].option = FLAG;
+		param_stack[p_idx++].entry = (uint32_t)strtoll(token, NULL, 0);
+		break;
+	case CMD_APPEND:
+		param_stack[p_idx].option = APPEND;
+		param_stack[p_idx++].filename = token;
+		break;
+	case CMD_PARTITION:
+		param_stack[p_idx].option = PARTITION;
+		param_stack[p_idx++].entry = (uint32_t)strtoll(token, NULL, 0);
+		break;
+	case CMD_SOC_TYPE:
+		if (!strncmp(token, "IMX8QX", 6)) {
+			soc = QX;
+		} else if (!strncmp(token, "IMX8QM", 6)) {
+			soc = QM;
+		} else {
+			fprintf(stderr, "Unknown CMD_SOC_TYPE");
+			exit(EXIT_FAILURE);
+		}
+		break;
+	case CMD_IMAGE:
+	case CMD_DATA:
+		core_type = get_table_entry_id(imx8image_core_entries,
+					       "imx8image core entries",
+					       token);
+		if (core_type < 0) {
+			fprintf(stderr, "Wrong IMAGE core_type %s\n", token);
+			exit(EXIT_FAILURE);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void parse_cfg_fld(image_t *param_stack, int32_t *cmd, char *token,
+			  char *name, int lineno, int fld)
+{
+	switch (fld) {
+	case CFG_COMMAND:
+		*cmd = get_table_entry_id(imx8image_cmds, "imx8image cmds",
+					  token);
+		if (*cmd < 0) {
+			fprintf(stderr, "Error: %s[%d] - Invalid command (%s)\n", name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+
+		if (*cmd == CMD_CONTAINER) {
+			fprintf(stdout, "New Container: \t%d\n", ++container);
+			param_stack[p_idx++].option = NEW_CONTAINER;
+		}
+		break;
+	case CFG_CORE_TYPE:
+		parse_cfg_cmd(param_stack, *cmd, token, name, lineno);
+		break;
+	case CFG_IMAGE_NAME:
+		if (*cmd == CMD_MSG_BLOCK) {
+			if (!strncmp(token, "fuse", 4)) {
+				param_stack[p_idx].ext = SC_R_OTP;
+			} else if (!strncmp(token, "debug", 5)) {
+				param_stack[p_idx].ext = SC_R_DEBUG;
+			} else if (!strncmp(token, "field", 5)) {
+				param_stack[p_idx].ext = SC_R_ROM_0;
+			} else {
+				fprintf(stderr, "MSG type not found %s\n", token);
+				exit(EXIT_FAILURE);
+			}
+			break;
+		}
+		switch (core_type) {
+		case CFG_SCU:
+			param_stack[p_idx].option = SCFW;
+			param_stack[p_idx++].filename = token;
+			break;
+		case CFG_M40:
+			param_stack[p_idx].option = M40;
+			param_stack[p_idx].ext = 0;
+			param_stack[p_idx].filename = token;
+			break;
+		case CFG_M41:
+			param_stack[p_idx].option = M41;
+			param_stack[p_idx].ext = 1;
+			param_stack[p_idx].filename = token;
+			break;
+		case CFG_A35:
+			param_stack[p_idx].ext = CORE_CA35;
+			param_stack[p_idx].option =
+				(*cmd == CMD_DATA) ? DATA : AP;
+			param_stack[p_idx].filename = token;
+			break;
+		case CFG_A53:
+			param_stack[p_idx].ext = CORE_CA53;
+			param_stack[p_idx].option =
+				(*cmd == CMD_DATA) ? DATA : AP;
+			param_stack[p_idx].filename = token;
+			break;
+		case CFG_A72:
+			param_stack[p_idx].ext = CORE_CA72;
+			param_stack[p_idx].option =
+				(*cmd == CMD_DATA) ? DATA : AP;
+			param_stack[p_idx].filename = token;
+			break;
+		}
+		break;
+	case CFG_LOAD_ADDR:
+		if (*cmd == CMD_MSG_BLOCK) {
+			param_stack[p_idx++].entry =
+				(uint32_t)strtoll(token, NULL, 0);
+			break;
+		}
+		switch (core_type) {
+		case CFG_SCU:
+			break;
+		case CFG_M40:
+		case CFG_M41:
+		case CFG_A35:
+		case CFG_A53:
+		case CFG_A72:
+			param_stack[p_idx++].entry =
+				(uint32_t)strtoll(token, NULL, 0);
+			break;
+		}
+	default:
+		break;
+	}
+}
+
+static uint32_t parse_cfg_file(image_t *param_stack, char *name)
+{
+	FILE *fd = NULL;
+	char *line = NULL;
+	char *token, *saveptr1, *saveptr2;
+	int lineno = 0;
+	int fld;
+	size_t len;
+	int32_t cmd;
+
+	fd = fopen(name, "r");
+	if (fd == 0) {
+		fprintf(stderr, "Error: %s - Can't open cfg file\n", name);
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Very simple parsing, line starting with # are comments
+	 * and are dropped
+	 */
+	while ((getline(&line, &len, fd)) > 0) {
+		lineno++;
+
+		token = strtok_r(line, "\r\n", &saveptr1);
+		if (!token)
+			continue;
+
+		/* Check inside the single line */
+		for (fld = CFG_COMMAND, cmd = CFG_INVALID,
+		     line = token; ; line = NULL, fld++) {
+			token = strtok_r(line, " \t", &saveptr2);
+			if (!token)
+				break;
+
+			/* Drop all text starting with '#' as comments */
+			if (token[0] == '#')
+				break;
+
+			parse_cfg_fld(param_stack, &cmd, token, name, lineno,
+				      fld);
+		}
+	}
+
+	return 0;
+}
+
+static void check_file(struct stat *sbuf, char *filename)
+{
+	int tmp_fd  = open(filename, O_RDONLY | O_BINARY);
+
+	if (tmp_fd < 0) {
+		fprintf(stderr, "%s: Can't open: %s\n",
+			filename, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(tmp_fd, sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat: %s\n",
+			filename, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	close(tmp_fd);
+}
+
+static void copy_file_aligned(int ifd, const char *datafile, int offset,
+			      int align)
+{
+	int dfd;
+	struct stat sbuf;
+	unsigned char *ptr;
+	uint8_t zeros[0x4000];
+	int size;
+	int ret;
+
+	if (align > 0x4000) {
+		fprintf(stderr, "Wrong alignment requested %d\n", align);
+		exit(EXIT_FAILURE);
+	}
+
+	memset(zeros, 0, sizeof(zeros));
+
+	dfd = open(datafile, O_RDONLY | O_BINARY);
+	if (dfd < 0) {
+		fprintf(stderr, "Can't open %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf(stderr, "Can't stat %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (sbuf.st_size == 0)
+		goto close;
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, dfd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf(stderr, "Can't read %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	size = sbuf.st_size;
+	ret = lseek(ifd, offset, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "%s: lseek error %s\n",
+			__func__, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (write(ifd, ptr, size) != size) {
+		fprintf(stderr, "Write error %s\n", strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	align = ALIGN(size, align) - size;
+
+	if (write(ifd, (char *)&zeros, align) != align) {
+		fprintf(stderr, "Write error: %s\n", strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	munmap((void *)ptr, sbuf.st_size);
+close:
+	close(dfd);
+}
+
+static void copy_file (int ifd, const char *datafile, int pad, int offset)
+{
+	int dfd;
+	struct stat sbuf;
+	unsigned char *ptr;
+	int tail;
+	int zero = 0;
+	uint8_t zeros[4096];
+	int size, ret;
+
+	memset(zeros, 0, sizeof(zeros));
+
+	dfd = open(datafile, O_RDONLY | O_BINARY);
+	if (dfd < 0) {
+		fprintf(stderr, "Can't open %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf(stderr, "Can't stat %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (sbuf.st_size == 0)
+		goto close;
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, dfd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf(stderr, "Can't read %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	size = sbuf.st_size;
+	ret = lseek(ifd, offset, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "%s: lseek error %s\n",
+			__func__, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (write(ifd, ptr, size) != size) {
+		fprintf(stderr, "Write error %s\n",
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	tail = size % 4;
+	pad = pad - size;
+	if (pad == 1 && tail != 0) {
+		if (write(ifd, (char *)&zero, 4 - tail) != 4 - tail) {
+			fprintf(stderr, "Write error on %s\n",
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+	} else if (pad > 1) {
+		while (pad > 0) {
+			int todo = sizeof(zeros);
+
+			if (todo > pad)
+				todo = pad;
+			if (write(ifd, (char *)&zeros, todo) != todo) {
+				fprintf(stderr, "Write error: %s\n",
+					strerror(errno));
+				exit(EXIT_FAILURE);
+			}
+			pad -= todo;
+		}
+	}
+
+	munmap((void *)ptr, sbuf.st_size);
+close:
+	close(dfd);
+}
+
+uint64_t read_dcd_offset(char *filename)
+{
+	int dfd;
+	struct stat sbuf;
+	uint8_t *ptr;
+	uint64_t offset = 0;
+
+	dfd = open(filename, O_RDONLY | O_BINARY);
+	if (dfd < 0) {
+		fprintf(stderr, "Can't open %s: %s\n", filename, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf(stderr, "Can't stat %s: %s\n", filename, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, dfd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf(stderr, "Can't read %s: %s\n", filename, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	offset = *(uint32_t *)(ptr + DCD_ENTRY_ADDR_IN_SCFW);
+
+	munmap((void *)ptr, sbuf.st_size);
+	close(dfd);
+
+	return offset;
+}
+
+static void set_image_hash(boot_img_t *img, char *filename, uint32_t hash_type)
+{
+	FILE *fp = NULL;
+	char sha_command[512];
+	char hash[2 * HASH_MAX_LEN + 1];
+	int i, ret;
+
+	if (img->size == 0)
+		sprintf(sha_command, "sha%dsum /dev/null", hash_type);
+	else
+		sprintf(sha_command, "dd if=/dev/zero of=tmp_pad bs=%d count=1;\
+			dd if=\'%s\' of=tmp_pad conv=notrunc;\
+			sha%dsum tmp_pad; rm -f tmp_pad",
+			img->size, filename, hash_type);
+
+	switch (hash_type) {
+	case HASH_TYPE_SHA_256:
+		img->hab_flags |= IMG_FLAG_HASH_SHA256;
+		break;
+	case HASH_TYPE_SHA_384:
+		img->hab_flags |= IMG_FLAG_HASH_SHA384;
+		break;
+	case HASH_TYPE_SHA_512:
+		img->hab_flags |= IMG_FLAG_HASH_SHA512;
+		break;
+	default:
+		fprintf(stderr, "Wrong hash type selected (%d) !!!\n\n",
+			hash_type);
+		exit(EXIT_FAILURE);
+		break;
+	}
+	memset(img->hash, 0, HASH_MAX_LEN);
+
+	fp = popen(sha_command, "r");
+	if (!fp) {
+		fprintf(stderr, "Failed to run command hash\n");
+		exit(EXIT_FAILURE);
+	}
+
+	if (!fgets(hash, hash_type / 4 + 1, fp)) {
+		fprintf(stderr, "Failed to hash file: %s\n", filename);
+		exit(EXIT_FAILURE);
+	}
+
+	for (i = 0; i < strlen(hash) / 2; i++) {
+		ret = sscanf(hash + 2 * i, "%02hhx", &img->hash[i]);
+		if (ret < 0) {
+			fprintf(stderr, "Failed sscanf hash: %d\n", ret);
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	pclose(fp);
+}
+
+static void set_image_array_entry(flash_header_v3_t *container,
+				  soc_type_t soc, const image_t *image_stack,
+				  uint32_t offset, uint32_t size,
+				  char *tmp_filename, bool dcd_skip)
+{
+	uint64_t entry = image_stack->entry;
+	uint64_t core = image_stack->ext;
+	uint32_t meta;
+	char *tmp_name = "";
+	option_type_t type = image_stack->option;
+	boot_img_t *img = &container->img[container->num_images];
+
+	img->offset = offset;  /* Is re-adjusted later */
+	img->size = size;
+
+	set_image_hash(img, tmp_filename, IMAGE_HASH_ALGO_DEFAULT);
+
+	switch (type) {
+	case SECO:
+		img->hab_flags |= IMG_TYPE_SECO;
+		img->hab_flags |= CORE_SECO << BOOT_IMG_FLAGS_CORE_SHIFT;
+		tmp_name = "SECO";
+		img->dst = 0x20C00000;
+		img->entry = 0x20000000;
+		break;
+	case AP:
+		if (soc == QX && core == CORE_CA35) {
+			meta = IMAGE_A35_DEFAULT_META(custom_partition);
+		} else if (soc == QM && core == CORE_CA53) {
+			meta = IMAGE_A53_DEFAULT_META(custom_partition);
+		} else if (soc == QM && core == CORE_CA72) {
+			meta = IMAGE_A72_DEFAULT_META(custom_partition);
+		} else {
+			fprintf(stderr, "Error: invalid AP core id: %lu\n",
+				core);
+			exit(EXIT_FAILURE);
+		}
+		img->hab_flags |= IMG_TYPE_EXEC;
+		/* On B0, only core id = 4 is valid */
+		img->hab_flags |= CORE_CA53 << BOOT_IMG_FLAGS_CORE_SHIFT;
+		tmp_name = "AP";
+		img->dst = entry;
+		img->entry = entry;
+		img->meta = meta;
+		custom_partition = 0;
+		break;
+	case M40:
+	case M41:
+		if (core == 0) {
+			core = CORE_CM4_0;
+			meta = IMAGE_M4_0_DEFAULT_META(custom_partition);
+		} else if (core == 1) {
+			core = CORE_CM4_1;
+			meta = IMAGE_M4_1_DEFAULT_META(custom_partition);
+		} else {
+			fprintf(stderr, "Error: invalid m4 core id: %lu\n", core);
+			exit(EXIT_FAILURE);
+		}
+		img->hab_flags |= IMG_TYPE_EXEC;
+		img->hab_flags |= core << BOOT_IMG_FLAGS_CORE_SHIFT;
+		tmp_name = "M4";
+		if ((entry & 0x7) != 0) {
+			fprintf(stderr, "\n\nWarning: M4 Destination address is not 8 byte aligned\n\n");
+			exit(EXIT_FAILURE);
+		}
+		img->dst = entry;
+		img->entry = entry;
+		img->meta = meta;
+		custom_partition = 0;
+		break;
+	case DATA:
+		img->hab_flags |= IMG_TYPE_DATA;
+		img->hab_flags |= CORE_CA35 << BOOT_IMG_FLAGS_CORE_SHIFT;
+		tmp_name = "DATA";
+		img->dst = entry;
+		break;
+	case MSG_BLOCK:
+		img->hab_flags |= IMG_TYPE_DATA;
+		img->hab_flags |= CORE_CA35 << BOOT_IMG_FLAGS_CORE_SHIFT;
+		img->meta = core << BOOT_IMG_META_MU_RID_SHIFT;
+		tmp_name = "MSG_BLOCK";
+		img->dst = entry;
+		break;
+	case SCFW:
+		img->hab_flags |= scfw_flags & 0xFFFF0000;
+		img->hab_flags |= IMG_TYPE_EXEC;
+		img->hab_flags |= CORE_SC << BOOT_IMG_FLAGS_CORE_SHIFT;
+		tmp_name = "SCFW";
+		img->dst = 0x1FFE0000;
+		img->entry = 0x1FFE0000;
+
+		/* Lets add the DCD now */
+		if (!dcd_skip) {
+			container->num_images++;
+			img = &container->img[container->num_images];
+			img->hab_flags |= IMG_TYPE_DCD_DDR;
+			img->hab_flags |= CORE_SC << BOOT_IMG_FLAGS_CORE_SHIFT;
+			set_image_hash(img, "/dev/null",
+				       IMAGE_HASH_ALGO_DEFAULT);
+			img->offset = offset + img->size;
+			img->entry = read_dcd_offset(tmp_filename);
+			img->dst = img->entry - 1;
+		}
+		break;
+	default:
+		fprintf(stderr, "unrecognized image type (%d)\n", type);
+		exit(EXIT_FAILURE);
+	}
+
+	fprintf(stdout, "%s file_offset = 0x%x size = 0x%x\n", tmp_name, offset, size);
+
+	container->num_images++;
+}
+
+void set_container(flash_header_v3_t *container,  uint16_t sw_version,
+		   uint32_t alignment, uint32_t flags, uint16_t fuse_version)
+{
+	container->sig_blk_hdr.tag = 0x90;
+	container->sig_blk_hdr.length = sizeof(sig_blk_hdr_t);
+	container->sw_version = sw_version;
+	container->padding = alignment;
+	container->fuse_version = fuse_version;
+	container->flags = flags;
+	fprintf(stdout, "container flags: 0x%x\n", container->flags);
+}
+
+static int get_container_image_start_pos(image_t *image_stack, uint32_t align)
+{
+	image_t *img_sp = image_stack;
+	/*8K total container header*/
+	int file_off = CONTAINER_IMAGE_ARRAY_START_OFFSET;
+	FILE *fd = NULL;
+	flash_header_v3_t header;
+	int ret;
+
+	while (img_sp->option != NO_IMG) {
+		if (img_sp->option == APPEND) {
+			fd = fopen(img_sp->filename, "r");
+			if (!fd) {
+				fprintf(stderr, "Fail open first container file %s\n", img_sp->filename);
+				exit(EXIT_FAILURE);
+			}
+
+			ret = fread(&header, sizeof(header), 1, fd);
+			if (ret != 1) {
+				printf("Failure Read header %d\n", ret);
+				exit(EXIT_FAILURE);
+			}
+
+			fclose(fd);
+
+			if (header.tag != IVT_HEADER_TAG_B0) {
+				fprintf(stderr, "header tag missmatched \n");
+				exit(EXIT_FAILURE);
+			} else {
+				file_off +=
+					header.img[header.num_images - 1].size;
+				file_off = ALIGN(file_off, align);
+			}
+		}
+
+		img_sp++;
+	}
+
+	return file_off;
+}
+
+static void set_imx_hdr_v3(imx_header_v3_t *imxhdr, uint32_t cont_id)
+{
+	flash_header_v3_t *fhdr_v3 = &imxhdr->fhdr[cont_id];
+
+	/* Set magic number, Only >= B0 supported */
+	fhdr_v3->tag = IVT_HEADER_TAG_B0;
+	fhdr_v3->version = IVT_VERSION_B0;
+}
+
+static uint8_t *flatten_container_header(imx_header_v3_t *imx_header,
+					 uint8_t containers_count,
+					 uint32_t *size_out,
+					 uint32_t file_offset)
+{
+	uint8_t *flat = NULL;
+	uint8_t *ptr = NULL;
+	uint16_t size = 0;
+	int i, j;
+
+	/* Compute size of all container headers */
+	for (i = 0; i < containers_count; i++) {
+		flash_header_v3_t *container = &imx_header->fhdr[i];
+
+		container->sig_blk_offset = HEADER_IMG_ARRAY_OFFSET +
+			container->num_images * IMG_ARRAY_ENTRY_SIZE;
+
+		container->length = HEADER_IMG_ARRAY_OFFSET +
+			(IMG_ARRAY_ENTRY_SIZE * container->num_images) +
+			sizeof(sig_blk_hdr_t);
+
+		/* Print info needed by CST to sign the container header */
+		fprintf(stdout, "CST: CONTAINER %d offset: 0x%x\n",
+			i, file_offset + size);
+		fprintf(stdout, "CST: CONTAINER %d: Signature Block: offset is at 0x%x\n", i,
+			file_offset + size + container->length -
+			SIGNATURE_BLOCK_HEADER_LENGTH);
+
+		size += ALIGN(container->length, container->padding);
+	}
+
+	flat = calloc(size, sizeof(uint8_t));
+	if (!flat) {
+		fprintf(stderr, "Failed to allocate memory (%d)\n", size);
+		exit(EXIT_FAILURE);
+	}
+
+	ptr = flat;
+	*size_out = size;
+
+	for (i = 0; i < containers_count; i++) {
+		flash_header_v3_t *container = &imx_header->fhdr[i];
+		uint32_t container_start_offset = ptr - flat;
+
+		/* Append container header */
+		append(ptr, container, HEADER_IMG_ARRAY_OFFSET);
+
+		/* Adjust images offset to start from container headers start */
+		for (j = 0; j < container->num_images; j++) {
+			container->img[j].offset -=
+				container_start_offset + file_offset;
+		}
+		/* Append each image array entry */
+		for (j = 0; j < container->num_images; j++)
+			append(ptr, &container->img[j], sizeof(boot_img_t));
+
+		append(ptr, &container->sig_blk_hdr, sizeof(sig_blk_hdr_t));
+
+		/* Padding for container (if necessary) */
+		ptr += ALIGN(container->length, container->padding) -
+			container->length;
+	}
+
+	return flat;
+}
+
+static int build_container(soc_type_t soc, uint32_t sector_size,
+			   bool emmc_fastboot, image_t *image_stack,
+			   bool dcd_skip, uint8_t fuse_version,
+			   uint16_t sw_version, int ofd)
+{
+	static imx_header_v3_t imx_header;
+	image_t *img_sp = image_stack;
+	int file_off;
+	uint8_t *tmp;
+	struct stat sbuf;
+	char *tmp_filename = NULL;
+	uint32_t size = 0;
+	uint32_t file_padding = 0;
+	int ret;
+
+	int container = -1;
+	int cont_img_count = 0; /* indexes to arrange the container */
+
+	memset((char *)&imx_header, 0, sizeof(imx_header_v3_t));
+
+	if (!image_stack) {
+		fprintf(stderr, "Empty image stack ");
+		exit(EXIT_FAILURE);
+	}
+
+	if (soc == QX)
+		fprintf(stdout, "Platform:\ti.MX8QXP B0\n");
+	else if (soc == QM)
+		fprintf(stdout, "Platform:\ti.MX8QM B0\n");
+
+	set_imx_hdr_v3(&imx_header, 0);
+	set_imx_hdr_v3(&imx_header, 1);
+
+	file_off = get_container_image_start_pos(image_stack, sector_size);
+	fprintf(stdout, "container image offset (aligned):%x\n", file_off);
+
+	/* step through image stack and generate the header */
+	img_sp = image_stack;
+
+	/* stop once we reach null terminator */
+	while (img_sp->option != NO_IMG) {
+		switch (img_sp->option) {
+		case AP:
+		case M40:
+		case M41:
+		case SCFW:
+		case DATA:
+		case MSG_BLOCK:
+			if (container < 0) {
+				fprintf(stderr, "No container found\n");
+				exit(EXIT_FAILURE);
+			}
+			check_file(&sbuf, img_sp->filename);
+			tmp_filename = img_sp->filename;
+			set_image_array_entry(&imx_header.fhdr[container],
+					      soc, img_sp, file_off,
+					      ALIGN(sbuf.st_size, sector_size),
+					      tmp_filename, dcd_skip);
+			img_sp->src = file_off;
+
+			file_off += ALIGN(sbuf.st_size, sector_size);
+			cont_img_count++;
+			break;
+
+		case SECO:
+			if (container < 0) {
+				fprintf(stderr, "No container found\n");
+				exit(EXIT_FAILURE);
+			}
+			check_file(&sbuf, img_sp->filename);
+			tmp_filename = img_sp->filename;
+			set_image_array_entry(&imx_header.fhdr[container],
+					      soc,
+					      img_sp,
+					      file_off,
+					      sbuf.st_size,
+					      tmp_filename, dcd_skip);
+			img_sp->src = file_off;
+
+			file_off += sbuf.st_size;
+			cont_img_count++;
+			break;
+
+		case NEW_CONTAINER:
+			container++;
+			set_container(&imx_header.fhdr[container], sw_version,
+				      CONTAINER_ALIGNMENT,
+				      CONTAINER_FLAGS_DEFAULT,
+				      fuse_version);
+			/* reset img count when moving to new container */
+			cont_img_count = 0;
+			scfw_flags = 0;
+			break;
+
+		case APPEND:
+			/*
+			 * nothing to do here, the container is appended
+			 * in the output
+			 */
+			break;
+		case FLAG:
+			/*
+			 * override the flags for scfw in current container
+			 * mask off bottom 16 bits.
+			 */
+			scfw_flags = img_sp->entry & 0xFFFF0000;
+			break;
+		case FILEOFF:
+			if (file_off > img_sp->dst) {
+				fprintf(stderr, "FILEOFF address less than current file offset!!!\n");
+				exit(EXIT_FAILURE);
+			}
+			if (img_sp->dst != ALIGN(img_sp->dst, sector_size)) {
+				fprintf(stderr, "FILEOFF address is not aligned to sector size!!!\n");
+				exit(EXIT_FAILURE);
+			}
+			file_off = img_sp->dst;
+			break;
+		case PARTITION:
+			/*
+			 * keep custom partition until next executable image
+			 * use a global var for default behaviour
+			 */
+			custom_partition = img_sp->entry;
+			break;
+		default:
+			fprintf(stderr, "unrecognized option in input stack (%d)\n", img_sp->option);
+			exit(EXIT_FAILURE);
+		}
+		img_sp++; /* advance index */
+	}
+
+	/* Append container (if specified) */
+	img_sp = image_stack;
+	do {
+		if (img_sp->option == APPEND) {
+			copy_file(ofd, img_sp->filename, 0, 0);
+			file_padding += FIRST_CONTAINER_HEADER_LENGTH;
+		}
+		img_sp++;
+	} while (img_sp->option != NO_IMG);
+
+	/* Add padding or skip appended container */
+	ret = lseek(ofd, file_padding, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "%s: lseek error %s\n",
+			__func__, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (container >= 0) {
+		/* Note: Image offset are not contained in the image */
+		tmp = flatten_container_header(&imx_header, container + 1,
+					       &size, file_padding);
+		/* Write image header */
+		if (write(ofd, tmp, size) != size) {
+			fprintf(stderr, "error writing image hdr\n");
+			exit(EXIT_FAILURE);
+		}
+
+		/* Clean-up memory used by the headers */
+		free(tmp);
+	}
+
+	/*
+	 * step through the image stack again this time copying
+	 * images to final bin, stop once we reach null terminator.
+	 */
+	img_sp = image_stack;
+	while (img_sp->option != NO_IMG) {
+		if (img_sp->option == M40 || img_sp->option == M41 ||
+		    img_sp->option == AP || img_sp->option == DATA ||
+		    img_sp->option == SCD || img_sp->option == SCFW ||
+		    img_sp->option == SECO || img_sp->option == MSG_BLOCK) {
+			copy_file_aligned(ofd, img_sp->filename, img_sp->src,
+					  sector_size);
+		}
+		img_sp++;
+	}
+
+	return 0;
+}
+
+int imx8image_copy_image(int outfd, struct image_tool_params *mparams)
+{
+	image_t *img_sp = param_stack;
+
+	/*
+	 * SECO FW is a container image, this is to calculate the
+	 * 2nd container offset.
+	 */
+	fprintf(stdout, "parsing %s\n", mparams->imagename);
+	parse_cfg_file(img_sp, mparams->imagename);
+
+	if (sector_size == 0) {
+		fprintf(stderr, "Wrong sector size\n");
+		exit(EXIT_FAILURE);
+	}
+
+	fprintf(stdout, "CONTAINER Sector size:\t%08x\n", sector_size);
+	fprintf(stdout, "CONTAINER FUSE VERSION:\t0x%02x\n", fuse_version);
+	fprintf(stdout, "CONTAINER SW VERSION:\t0x%04x\n", sw_version);
+
+	build_container(soc, sector_size, emmc_fastboot,
+			img_sp, true, fuse_version, sw_version, outfd);
+
+	return 0;
+}
+
+/*
+ * imx8image parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	imx8image,
+	"NXP i.MX8 Boot Image support",
+	0,
+	NULL,
+	imx8image_check_params,
+	NULL,
+	imx8image_print_header,
+	imx8image_set_header,
+	NULL,
+	imx8image_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/imx8image.o b/tools/u-boot-tools/imx8image.o
new file mode 100644
index 0000000000000000000000000000000000000000..ebfb3c7ead367849a386f19f3d624fc06f0c8659
Binary files /dev/null and b/tools/u-boot-tools/imx8image.o differ
diff --git a/tools/u-boot-tools/imx8m_image.sh b/tools/u-boot-tools/imx8m_image.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6346fb64d8c56f12c80a7428f28c4d9591d1fdca
--- /dev/null
+++ b/tools/u-boot-tools/imx8m_image.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# script to check whether the file exists in imximage.cfg for i.MX8M
+#
+
+file=$1
+
+post_process=$2
+
+blobs=`awk '/^SIGNED_HDMI/ {print $2} /^LOADER/ {print $2} /^SECOND_LOADER/ {print $2} /^DDR_FW/ {print $2}' $file`
+for f in $blobs; do
+	tmp=$srctree/$f
+
+	if [ $f == "spl/u-boot-spl-ddr.bin" ] || [ $f == "u-boot.itb" ]; then
+		continue
+	fi
+
+	if [ -f $f ]; then
+		continue
+	fi
+
+	if [ ! -f $tmp ]; then
+		echo "WARNING '$tmp' not found, resulting binary is not-functional" >&2
+		exit 1
+	fi
+
+	sed -in "s;$f;$tmp;" $file
+done
+
+if [ $post_process == 1 ]; then
+	if [ -f $srctree/lpddr4_pmu_train_1d_imem.bin ]; then
+		objcopy -I binary -O binary --pad-to 0x8000 --gap-fill=0x0 $srctree/lpddr4_pmu_train_1d_imem.bin lpddr4_pmu_train_1d_imem_pad.bin
+		objcopy -I binary -O binary --pad-to 0x4000 --gap-fill=0x0 $srctree/lpddr4_pmu_train_1d_dmem.bin lpddr4_pmu_train_1d_dmem_pad.bin
+		objcopy -I binary -O binary --pad-to 0x8000 --gap-fill=0x0 $srctree/lpddr4_pmu_train_2d_imem.bin lpddr4_pmu_train_2d_imem_pad.bin
+		cat lpddr4_pmu_train_1d_imem_pad.bin lpddr4_pmu_train_1d_dmem_pad.bin > lpddr4_pmu_train_1d_fw.bin
+		cat lpddr4_pmu_train_2d_imem_pad.bin $srctree/lpddr4_pmu_train_2d_dmem.bin > lpddr4_pmu_train_2d_fw.bin
+		cat spl/u-boot-spl.bin lpddr4_pmu_train_1d_fw.bin lpddr4_pmu_train_2d_fw.bin > spl/u-boot-spl-ddr.bin
+		rm -f lpddr4_pmu_train_1d_fw.bin lpddr4_pmu_train_2d_fw.bin lpddr4_pmu_train_1d_imem_pad.bin lpddr4_pmu_train_1d_dmem_pad.bin lpddr4_pmu_train_2d_imem_pad.bin
+	fi
+fi
+
+exit 0
diff --git a/tools/u-boot-tools/imx8mimage.c b/tools/u-boot-tools/imx8mimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..50a256cbac56dc984c0e2458f0d2e2ac323aae5c
--- /dev/null
+++ b/tools/u-boot-tools/imx8mimage.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+
+#include "imagetool.h"
+#include <image.h>
+#include "imximage.h"
+#include "compiler.h"
+
+static uint32_t ap_start_addr, sld_start_addr, sld_src_off;
+static char *ap_img, *sld_img, *signed_hdmi;
+static imx_header_v3_t imx_header[2]; /* At most there are 3 IVT headers */
+static uint32_t rom_image_offset;
+static uint32_t sector_size = 0x200;
+static uint32_t image_off;
+static uint32_t sld_header_off;
+static uint32_t ivt_offset;
+static uint32_t using_fit;
+
+#define CSF_SIZE 0x2000
+#define HDMI_IVT_ID 0
+#define IMAGE_IVT_ID 1
+
+#define HDMI_FW_SIZE		0x17000 /* Use Last 0x1000 for IVT and CSF */
+#define ALIGN_SIZE		0x1000
+#define ALIGN(x,a)	__ALIGN_MASK((x), (__typeof__(x))(a) - 1, a)
+#define __ALIGN_MASK(x,mask,mask2) (((x) + (mask)) / (mask2) * (mask2))
+
+static uint32_t get_cfg_value(char *token, char *name,  int linenr)
+{
+	char *endptr;
+	uint32_t value;
+
+	errno = 0;
+	value = strtoul(token, &endptr, 16);
+	if (errno || token == endptr) {
+		fprintf(stderr, "Error: %s[%d] - Invalid hex data(%s)\n",
+			name,  linenr, token);
+		exit(EXIT_FAILURE);
+	}
+	return value;
+}
+
+int imx8mimage_check_params(struct image_tool_params *params)
+{
+	return 0;
+}
+
+static void imx8mimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				  struct image_tool_params *params)
+{
+}
+
+static void imx8mimage_print_header(const void *ptr)
+{
+}
+
+static int imx8mimage_check_image_types(uint8_t type)
+{
+	return (type == IH_TYPE_IMX8MIMAGE) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+static table_entry_t imx8mimage_cmds[] = {
+	{CMD_BOOT_FROM,         "BOOT_FROM",            "boot command",	      },
+	{CMD_FIT,               "FIT",                  "fit image",	      },
+	{CMD_SIGNED_HDMI,       "SIGNED_HDMI",          "signed hdmi image",  },
+	{CMD_LOADER,            "LOADER",               "loader image",       },
+	{CMD_SECOND_LOADER,     "SECOND_LOADER",        "2nd loader image",   },
+	{CMD_DDR_FW,            "DDR_FW",               "ddr firmware",       },
+	{-1,                    "",                     "",	              },
+};
+
+static table_entry_t imx8mimage_ivt_offset[] = {
+	{0x400,		"sd",			"sd/emmc",},
+	{0x400,		"emmc_fastboot",	"emmc fastboot",},
+	{0x1000,	"fspi",			"flexspi",	},
+	{-1,		"",			"Invalid",	},
+};
+
+static void parse_cfg_cmd(int32_t cmd, char *token, char *name, int lineno)
+{
+	switch (cmd) {
+	case CMD_BOOT_FROM:
+		ivt_offset = get_table_entry_id(imx8mimage_ivt_offset,
+						"imx8mimage ivt offset",
+						token);
+		if (!strncmp(token, "sd", 2))
+			rom_image_offset = 0x8000;
+		break;
+	case CMD_LOADER:
+		ap_img = token;
+		break;
+	case CMD_SECOND_LOADER:
+		sld_img = token;
+		break;
+	case CMD_SIGNED_HDMI:
+		signed_hdmi = token;
+	case CMD_FIT:
+		using_fit = 1;
+		break;
+	case CMD_DDR_FW:
+		/* Do nothing */
+		break;
+	}
+}
+
+static void parse_cfg_fld(int32_t *cmd, char *token,
+			  char *name, int lineno, int fld)
+{
+	switch (fld) {
+	case CFG_COMMAND:
+		*cmd = get_table_entry_id(imx8mimage_cmds,
+					  "imx8mimage commands", token);
+		if (*cmd < 0) {
+			fprintf(stderr, "Error: %s[%d] - Invalid command" "(%s)\n",
+				name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+		break;
+	case CFG_REG_SIZE:
+		parse_cfg_cmd(*cmd, token, name, lineno);
+		break;
+	case CFG_REG_ADDRESS:
+		switch (*cmd) {
+		case CMD_LOADER:
+			ap_start_addr = get_cfg_value(token, name, lineno);
+			break;
+		case CMD_SECOND_LOADER:
+			sld_start_addr = get_cfg_value(token, name, lineno);
+			break;
+		}
+		break;
+	case CFG_REG_VALUE:
+		switch (*cmd) {
+		case CMD_SECOND_LOADER:
+			sld_src_off = get_cfg_value(token, name, lineno);
+			break;
+		}
+	default:
+		break;
+	}
+}
+
+static uint32_t parse_cfg_file(char *name)
+{
+	FILE *fd = NULL;
+	char *line = NULL;
+	char *token, *saveptr1, *saveptr2;
+	int lineno = 0;
+	int fld;
+	size_t len;
+	int32_t cmd;
+
+	fd = fopen(name, "r");
+	if (fd == 0) {
+		fprintf(stderr, "Error: %s - Can't open cfg file\n", name);
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Very simple parsing, line starting with # are comments
+	 * and are dropped
+	 */
+	while ((getline(&line, &len, fd)) > 0) {
+		lineno++;
+
+		token = strtok_r(line, "\r\n", &saveptr1);
+		if (!token)
+			continue;
+
+		/* Check inside the single line */
+		for (fld = CFG_COMMAND, cmd = CFG_INVALID,
+		     line = token; ; line = NULL, fld++) {
+			token = strtok_r(line, " \t", &saveptr2);
+			if (!token)
+				break;
+
+			/* Drop all text starting with '#' as comments */
+			if (token[0] == '#')
+				break;
+
+			parse_cfg_fld(&cmd, token, name, lineno, fld);
+		}
+	}
+
+	return 0;
+}
+
+static void fill_zero(int ifd, int size, int offset)
+{
+	int fill_size;
+	uint8_t zeros[4096];
+	int ret;
+
+	memset(zeros, 0, sizeof(zeros));
+
+	ret = lseek(ifd, offset, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "%s seek: %s\n", __func__, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	while (size) {
+		if (size > 4096)
+			fill_size = 4096;
+		else
+			fill_size = size;
+
+		if (write(ifd, (char *)&zeros, fill_size) != fill_size) {
+			fprintf(stderr, "Write error: %s\n",
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		size -= fill_size;
+	};
+}
+
+static void copy_file(int ifd, const char *datafile, int pad, int offset,
+		      int datafile_offset)
+{
+	int dfd;
+	struct stat sbuf;
+	unsigned char *ptr;
+	int tail;
+	int zero = 0;
+	uint8_t zeros[4096];
+	int size, ret;
+
+	memset(zeros, 0, sizeof(zeros));
+
+	dfd = open(datafile, O_RDONLY | O_BINARY);
+	if (dfd < 0) {
+		fprintf(stderr, "Can't open %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf(stderr, "Can't stat %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, dfd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf(stderr, "Can't read %s: %s\n",
+			datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	size = sbuf.st_size - datafile_offset;
+	ret = lseek(ifd, offset, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "lseek ifd fail\n");
+		exit(EXIT_FAILURE);
+	}
+
+	if (write(ifd, ptr + datafile_offset, size) != size) {
+		fprintf(stderr, "Write error %s\n",
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	tail = size % 4;
+	pad = pad - size;
+	if (pad == 1 && tail != 0) {
+		if (write(ifd, (char *)&zero, 4 - tail) != 4 - tail) {
+			fprintf(stderr, "Write error on %s\n",
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+	} else if (pad > 1) {
+		while (pad > 0) {
+			int todo = sizeof(zeros);
+
+			if (todo > pad)
+				todo = pad;
+			if (write(ifd, (char *)&zeros, todo) != todo) {
+				fprintf(stderr, "Write error: %s\n",
+					strerror(errno));
+				exit(EXIT_FAILURE);
+			}
+			pad -= todo;
+		}
+	}
+
+	munmap((void *)ptr, sbuf.st_size);
+	close(dfd);
+}
+
+/* Return this IVT offset in the final output file */
+static int generate_ivt_for_fit(int fd, int fit_offset, uint32_t ep,
+				uint32_t *fit_load_addr)
+{
+	image_header_t image_header;
+	int ret;
+
+	uint32_t fit_size, load_addr;
+	int align_len = 64 - 1; /* 64 is cacheline size */
+
+	ret = lseek(fd, fit_offset, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "lseek fd fail for fit\n");
+		exit(EXIT_FAILURE);
+	}
+
+	if (read(fd, (char *)&image_header, sizeof(image_header_t)) !=
+	    sizeof(image_header_t)) {
+		fprintf(stderr, "generate_ivt_for_fit read failed: %s\n",
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (be32_to_cpu(image_header.ih_magic) != FDT_MAGIC) {
+		fprintf(stderr, "%s error: not a FIT file\n", __func__);
+		exit(EXIT_FAILURE);
+	}
+
+	fit_size = fdt_totalsize(&image_header);
+	fit_size = (fit_size + 3) & ~3;
+
+	fit_size = ALIGN(fit_size, ALIGN_SIZE);
+
+	ret = lseek(fd, fit_offset + fit_size, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "lseek fd fail for fit\n");
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * ep is the u-boot entry. SPL loads the FIT before the u-boot
+	 * address. 0x2000 is for CSF_SIZE
+	 */
+	load_addr = (ep - (fit_size + CSF_SIZE) - 512 - align_len) &
+		~align_len;
+
+	flash_header_v2_t ivt_header = { { 0xd1, 0x2000, 0x40 },
+		load_addr, 0, 0, 0,
+		(load_addr + fit_size),
+		(load_addr + fit_size + 0x20),
+		0 };
+
+	if (write(fd, &ivt_header, sizeof(flash_header_v2_t)) !=
+	    sizeof(flash_header_v2_t)) {
+		fprintf(stderr, "IVT writing error on fit image\n");
+		exit(EXIT_FAILURE);
+	}
+
+	*fit_load_addr = load_addr;
+
+	return fit_offset + fit_size;
+}
+
+static void dump_header_v2(imx_header_v3_t *imx_header, int index)
+{
+	const char *ivt_name[2] = {"HDMI FW", "LOADER IMAGE"};
+
+	fprintf(stdout, "========= IVT HEADER [%s] =========\n",
+		ivt_name[index]);
+	fprintf(stdout, "header.tag: \t\t0x%x\n",
+		imx_header[index].fhdr.header.tag);
+	fprintf(stdout, "header.length: \t\t0x%x\n",
+		imx_header[index].fhdr.header.length);
+	fprintf(stdout, "header.version: \t0x%x\n",
+		imx_header[index].fhdr.header.version);
+	fprintf(stdout, "entry: \t\t\t0x%x\n",
+		imx_header[index].fhdr.entry);
+	fprintf(stdout, "reserved1: \t\t0x%x\n",
+		imx_header[index].fhdr.reserved1);
+	fprintf(stdout, "dcd_ptr: \t\t0x%x\n",
+		imx_header[index].fhdr.dcd_ptr);
+	fprintf(stdout, "boot_data_ptr: \t\t0x%x\n",
+		imx_header[index].fhdr.boot_data_ptr);
+	fprintf(stdout, "self: \t\t\t0x%x\n",
+		imx_header[index].fhdr.self);
+	fprintf(stdout, "csf: \t\t\t0x%x\n",
+		imx_header[index].fhdr.csf);
+	fprintf(stdout, "reserved2: \t\t0x%x\n",
+		imx_header[index].fhdr.reserved2);
+
+	fprintf(stdout, "boot_data.start: \t0x%x\n",
+		imx_header[index].boot_data.start);
+	fprintf(stdout, "boot_data.size: \t0x%x\n",
+		imx_header[index].boot_data.size);
+	fprintf(stdout, "boot_data.plugin: \t0x%x\n",
+		imx_header[index].boot_data.plugin);
+}
+
+void build_image(int ofd)
+{
+	int file_off, header_hdmi_off = 0, header_image_off;
+	int hdmi_fd, ap_fd, sld_fd;
+	uint32_t sld_load_addr = 0;
+	uint32_t csf_off, sld_csf_off = 0;
+	int ret;
+	struct stat sbuf;
+
+	if (!ap_img) {
+		fprintf(stderr, "No LOADER image specificed\n");
+		exit(EXIT_FAILURE);
+	}
+
+	file_off = 0;
+
+	if (signed_hdmi) {
+		header_hdmi_off = file_off + ivt_offset;
+
+		hdmi_fd = open(signed_hdmi, O_RDONLY | O_BINARY);
+		if (hdmi_fd < 0) {
+			fprintf(stderr, "%s: Can't open: %s\n",
+				signed_hdmi, strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		if (fstat(hdmi_fd, &sbuf) < 0) {
+			fprintf(stderr, "%s: Can't stat: %s\n",
+				signed_hdmi, strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+		close(hdmi_fd);
+
+		/*
+		 * Aligned to 104KB = 92KB FW image + 0x8000
+		 * (IVT and alignment) + 0x4000 (second IVT + CSF)
+		 */
+		file_off += ALIGN(sbuf.st_size,
+				  HDMI_FW_SIZE + 0x2000 + 0x1000);
+	}
+
+	header_image_off = file_off + ivt_offset;
+
+	ap_fd = open(ap_img, O_RDONLY | O_BINARY);
+	if (ap_fd < 0) {
+		fprintf(stderr, "%s: Can't open: %s\n",
+			ap_img, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+	if (fstat(ap_fd, &sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat: %s\n",
+			ap_img, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+	close(ap_fd);
+
+	imx_header[IMAGE_IVT_ID].fhdr.header.tag = IVT_HEADER_TAG; /* 0xD1 */
+	imx_header[IMAGE_IVT_ID].fhdr.header.length =
+		cpu_to_be16(sizeof(flash_header_v2_t));
+	imx_header[IMAGE_IVT_ID].fhdr.header.version = IVT_VERSION_V3; /* 0x41 */
+	imx_header[IMAGE_IVT_ID].fhdr.entry = ap_start_addr;
+	imx_header[IMAGE_IVT_ID].fhdr.self = ap_start_addr -
+		sizeof(imx_header_v3_t);
+	imx_header[IMAGE_IVT_ID].fhdr.dcd_ptr = 0;
+	imx_header[IMAGE_IVT_ID].fhdr.boot_data_ptr =
+		imx_header[IMAGE_IVT_ID].fhdr.self +
+		offsetof(imx_header_v3_t, boot_data);
+	imx_header[IMAGE_IVT_ID].boot_data.start =
+		imx_header[IMAGE_IVT_ID].fhdr.self - ivt_offset;
+	imx_header[IMAGE_IVT_ID].boot_data.size =
+		ALIGN(sbuf.st_size + sizeof(imx_header_v3_t) + ivt_offset,
+		      sector_size);
+
+	image_off = header_image_off + sizeof(imx_header_v3_t);
+	file_off +=  imx_header[IMAGE_IVT_ID].boot_data.size;
+
+	imx_header[IMAGE_IVT_ID].boot_data.plugin = 0;
+	imx_header[IMAGE_IVT_ID].fhdr.csf =
+		imx_header[IMAGE_IVT_ID].boot_data.start +
+		imx_header[IMAGE_IVT_ID].boot_data.size;
+
+	imx_header[IMAGE_IVT_ID].boot_data.size += CSF_SIZE; /* 8K region dummy CSF */
+
+	csf_off = file_off;
+	file_off += CSF_SIZE;
+
+	/* Second boot loader image */
+	if (sld_img) {
+		if (!using_fit) {
+			fprintf(stderr, "Not support no fit\n");
+			exit(EXIT_FAILURE);
+		} else {
+			sld_header_off = sld_src_off - rom_image_offset;
+			/*
+			 * Record the second bootloader relative offset in
+			 * image's IVT reserved1
+			 */
+			imx_header[IMAGE_IVT_ID].fhdr.reserved1 =
+				sld_header_off - header_image_off;
+			sld_fd = open(sld_img, O_RDONLY | O_BINARY);
+			if (sld_fd < 0) {
+				fprintf(stderr, "%s: Can't open: %s\n",
+					sld_img, strerror(errno));
+				exit(EXIT_FAILURE);
+			}
+
+			if (fstat(sld_fd, &sbuf) < 0) {
+				fprintf(stderr, "%s: Can't stat: %s\n",
+					sld_img, strerror(errno));
+				exit(EXIT_FAILURE);
+			}
+
+			close(sld_fd);
+
+			file_off = sld_header_off;
+			file_off += sbuf.st_size + sizeof(image_header_t);
+		}
+	}
+
+	if (signed_hdmi) {
+		header_hdmi_off -= ivt_offset;
+		ret = lseek(ofd, header_hdmi_off, SEEK_SET);
+		if (ret < 0) {
+			fprintf(stderr, "lseek ofd fail for hdmi\n");
+			exit(EXIT_FAILURE);
+		}
+
+		/* The signed HDMI FW has 0x400 IVT offset, need remove it */
+		copy_file(ofd, signed_hdmi, 0, header_hdmi_off, 0x400);
+	}
+
+	/* Main Image */
+	header_image_off -= ivt_offset;
+	image_off -= ivt_offset;
+	ret = lseek(ofd, header_image_off, SEEK_SET);
+	if (ret < 0) {
+		fprintf(stderr, "lseek ofd fail\n");
+		exit(EXIT_FAILURE);
+	}
+
+	/* Write image header */
+	if (write(ofd, &imx_header[IMAGE_IVT_ID], sizeof(imx_header_v3_t)) !=
+	    sizeof(imx_header_v3_t)) {
+		fprintf(stderr, "error writing image hdr\n");
+		exit(1);
+	}
+
+	copy_file(ofd, ap_img, 0, image_off, 0);
+
+	csf_off -= ivt_offset;
+	fill_zero(ofd, CSF_SIZE, csf_off);
+
+	if (sld_img) {
+		sld_header_off -= ivt_offset;
+		ret = lseek(ofd, sld_header_off, SEEK_SET);
+		if (ret < 0) {
+			fprintf(stderr, "lseek ofd fail for sld_img\n");
+			exit(EXIT_FAILURE);
+		}
+
+		/* Write image header */
+		if (!using_fit) {
+			/* TODO */
+		} else {
+			copy_file(ofd, sld_img, 0, sld_header_off, 0);
+			sld_csf_off =
+				generate_ivt_for_fit(ofd, sld_header_off,
+						     sld_start_addr,
+						     &sld_load_addr) + 0x20;
+		}
+	}
+
+	if (!signed_hdmi)
+		dump_header_v2(imx_header, 0);
+	dump_header_v2(imx_header, 1);
+
+	fprintf(stdout, "========= OFFSET dump =========");
+	if (signed_hdmi) {
+		fprintf(stdout, "\nSIGNED HDMI FW:\n");
+		fprintf(stdout, " header_hdmi_off \t0x%x\n",
+			header_hdmi_off);
+	}
+
+	fprintf(stdout, "\nLoader IMAGE:\n");
+	fprintf(stdout, " header_image_off \t0x%x\n image_off \t\t0x%x\n csf_off \t\t0x%x\n",
+		header_image_off, image_off, csf_off);
+	fprintf(stdout, " spl hab block: \t0x%x 0x%x 0x%x\n",
+		imx_header[IMAGE_IVT_ID].fhdr.self, header_image_off,
+		csf_off - header_image_off);
+
+	fprintf(stdout, "\nSecond Loader IMAGE:\n");
+	fprintf(stdout, " sld_header_off \t0x%x\n",
+		sld_header_off);
+	fprintf(stdout, " sld_csf_off \t\t0x%x\n",
+		sld_csf_off);
+	fprintf(stdout, " sld hab block: \t0x%x 0x%x 0x%x\n",
+		sld_load_addr, sld_header_off, sld_csf_off - sld_header_off);
+}
+
+int imx8mimage_copy_image(int outfd, struct image_tool_params *mparams)
+{
+	/*
+	 * SECO FW is a container image, this is to calculate the
+	 * 2nd container offset.
+	 */
+	fprintf(stdout, "parsing %s\n", mparams->imagename);
+	parse_cfg_file(mparams->imagename);
+
+	build_image(outfd);
+
+	return 0;
+}
+
+/*
+ * imx8mimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	imx8mimage,
+	"NXP i.MX8M Boot Image support",
+	0,
+	NULL,
+	imx8mimage_check_params,
+	NULL,
+	imx8mimage_print_header,
+	imx8mimage_set_header,
+	NULL,
+	imx8mimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/imx8mimage.o b/tools/u-boot-tools/imx8mimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..33f2bdc367edd696c1bed64c20303aaa52a383df
Binary files /dev/null and b/tools/u-boot-tools/imx8mimage.o differ
diff --git a/tools/u-boot-tools/imx_cntr_image.sh b/tools/u-boot-tools/imx_cntr_image.sh
new file mode 100755
index 0000000000000000000000000000000000000000..972b95ccbeef9133492a78e6d2771e8a21ebcfcb
--- /dev/null
+++ b/tools/u-boot-tools/imx_cntr_image.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# script to check whether the file exists in imximage.cfg for i.MX8
+#
+# usage: $0 <imximage.cfg>
+
+file=$1
+
+blobs=`awk '/^APPEND/ {print $2} /^IMAGE/ || /^DATA/ {print $3}' $file`
+for f in $blobs; do
+	tmp=$srctree/$f
+	if [ $f = "u-boot-dtb.bin" ]; then
+		continue
+	fi
+
+	if [ -f $f ]; then
+		continue
+	fi
+
+	if [ ! -f $tmp ]; then
+		echo "WARNING '$tmp' not found, resulting binary is not-functional" >&2
+		exit 1
+	fi
+
+	sed -in "s;$f;$tmp;" $file
+done
+
+exit 0
diff --git a/tools/u-boot-tools/imximage.c b/tools/u-boot-tools/imximage.c
new file mode 100644
index 0000000000000000000000000000000000000000..d7c0b6e883f7c8e3d5449227d77da5f0e325e035
--- /dev/null
+++ b/tools/u-boot-tools/imximage.c
@@ -0,0 +1,996 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include "imagetool.h"
+#include <image.h>
+#include "imximage.h"
+
+#define UNDEFINED 0xFFFFFFFF
+
+/*
+ * Supported commands for configuration file
+ */
+static table_entry_t imximage_cmds[] = {
+	{CMD_BOOT_FROM,         "BOOT_FROM",            "boot command",	  },
+	{CMD_BOOT_OFFSET,       "BOOT_OFFSET",          "Boot offset",	  },
+	{CMD_WRITE_DATA,        "DATA",                 "Reg Write Data", },
+	{CMD_WRITE_CLR_BIT,     "CLR_BIT",              "Reg clear bit",  },
+	{CMD_WRITE_SET_BIT,     "SET_BIT",              "Reg set bit",  },
+	{CMD_CHECK_BITS_SET,    "CHECK_BITS_SET",   "Reg Check bits set", },
+	{CMD_CHECK_BITS_CLR,    "CHECK_BITS_CLR",   "Reg Check bits clr", },
+	{CMD_CSF,               "CSF",           "Command Sequence File", },
+	{CMD_IMAGE_VERSION,     "IMAGE_VERSION",        "image version",  },
+	{CMD_PLUGIN,            "PLUGIN",               "file plugin_addr",  },
+	{-1,                    "",                     "",	          },
+};
+
+/*
+ * Supported Boot options for configuration file
+ * this is needed to set the correct flash offset
+ */
+static table_entry_t imximage_boot_offset[] = {
+	{FLASH_OFFSET_ONENAND,	"onenand",	"OneNAND Flash",},
+	{FLASH_OFFSET_NAND,	"nand",		"NAND Flash",	},
+	{FLASH_OFFSET_NOR,	"nor",		"NOR Flash",	},
+	{FLASH_OFFSET_SATA,	"sata",		"SATA Disk",	},
+	{FLASH_OFFSET_SD,	"sd",		"SD Card",	},
+	{FLASH_OFFSET_SPI,	"spi",		"SPI Flash",	},
+	{FLASH_OFFSET_QSPI,	"qspi",		"QSPI NOR Flash",},
+	{-1,			"",		"Invalid",	},
+};
+
+/*
+ * Supported Boot options for configuration file
+ * this is needed to determine the initial load size
+ */
+static table_entry_t imximage_boot_loadsize[] = {
+	{FLASH_LOADSIZE_ONENAND,	"onenand",	"OneNAND Flash",},
+	{FLASH_LOADSIZE_NAND,		"nand",		"NAND Flash",	},
+	{FLASH_LOADSIZE_NOR,		"nor",		"NOR Flash",	},
+	{FLASH_LOADSIZE_SATA,		"sata",		"SATA Disk",	},
+	{FLASH_LOADSIZE_SD,		"sd",		"SD Card",	},
+	{FLASH_LOADSIZE_SPI,		"spi",		"SPI Flash",	},
+	{FLASH_LOADSIZE_QSPI,		"qspi",		"QSPI NOR Flash",},
+	{-1,				"",		"Invalid",	},
+};
+
+/*
+ * IMXIMAGE version definition for i.MX chips
+ */
+static table_entry_t imximage_versions[] = {
+	{IMXIMAGE_V1,	"",	" (i.MX25/35/51 compatible)", },
+	{IMXIMAGE_V2,	"",	" (i.MX53/6/7 compatible)",   },
+	{-1,            "",     " (Invalid)",                 },
+};
+
+static struct imx_header imximage_header;
+static uint32_t imximage_version;
+/*
+ * Image Vector Table Offset
+ * Initialized to a wrong not 4-bytes aligned address to
+ * check if it is was set by the cfg file.
+ */
+static uint32_t imximage_ivt_offset = UNDEFINED;
+static uint32_t imximage_csf_size = UNDEFINED;
+/* Initial Load Region Size */
+static uint32_t imximage_init_loadsize;
+static uint32_t imximage_iram_free_start;
+static uint32_t imximage_plugin_size;
+static uint32_t plugin_image;
+
+static set_dcd_val_t set_dcd_val;
+static set_dcd_param_t set_dcd_param;
+static set_dcd_rst_t set_dcd_rst;
+static set_imx_hdr_t set_imx_hdr;
+static uint32_t max_dcd_entries;
+static uint32_t *header_size_ptr;
+static uint32_t *csf_ptr;
+
+static uint32_t get_cfg_value(char *token, char *name,  int linenr)
+{
+	char *endptr;
+	uint32_t value;
+
+	errno = 0;
+	value = strtoul(token, &endptr, 16);
+	if (errno || (token == endptr)) {
+		fprintf(stderr, "Error: %s[%d] - Invalid hex data(%s)\n",
+			name,  linenr, token);
+		exit(EXIT_FAILURE);
+	}
+	return value;
+}
+
+static uint32_t detect_imximage_version(struct imx_header *imx_hdr)
+{
+	imx_header_v1_t *hdr_v1 = &imx_hdr->header.hdr_v1;
+	imx_header_v2_t *hdr_v2 = &imx_hdr->header.hdr_v2;
+	flash_header_v1_t *fhdr_v1 = &hdr_v1->fhdr;
+	flash_header_v2_t *fhdr_v2 = &hdr_v2->fhdr;
+
+	/* Try to detect V1 */
+	if ((fhdr_v1->app_code_barker == APP_CODE_BARKER) &&
+		(hdr_v1->dcd_table.preamble.barker == DCD_BARKER))
+		return IMXIMAGE_V1;
+
+	/* Try to detect V2 */
+	if ((fhdr_v2->header.tag == IVT_HEADER_TAG) &&
+		(hdr_v2->data.dcd_table.header.tag == DCD_HEADER_TAG))
+		return IMXIMAGE_V2;
+
+	if ((fhdr_v2->header.tag == IVT_HEADER_TAG) &&
+	    hdr_v2->boot_data.plugin)
+		return IMXIMAGE_V2;
+
+	return IMXIMAGE_VER_INVALID;
+}
+
+static void err_imximage_version(int version)
+{
+	fprintf(stderr,
+		"Error: Unsupported imximage version:%d\n", version);
+
+	exit(EXIT_FAILURE);
+}
+
+static void set_dcd_val_v1(struct imx_header *imxhdr, char *name, int lineno,
+					int fld, uint32_t value, uint32_t off)
+{
+	dcd_v1_t *dcd_v1 = &imxhdr->header.hdr_v1.dcd_table;
+
+	switch (fld) {
+	case CFG_REG_SIZE:
+		/* Byte, halfword, word */
+		if ((value != 1) && (value != 2) && (value != 4)) {
+			fprintf(stderr, "Error: %s[%d] - "
+				"Invalid register size " "(%d)\n",
+				name, lineno, value);
+			exit(EXIT_FAILURE);
+		}
+		dcd_v1->addr_data[off].type = value;
+		break;
+	case CFG_REG_ADDRESS:
+		dcd_v1->addr_data[off].addr = value;
+		break;
+	case CFG_REG_VALUE:
+		dcd_v1->addr_data[off].value = value;
+		break;
+	default:
+		break;
+
+	}
+}
+
+static struct dcd_v2_cmd *gd_last_cmd;
+
+static void set_dcd_param_v2(struct imx_header *imxhdr, uint32_t dcd_len,
+		int32_t cmd)
+{
+	dcd_v2_t *dcd_v2 = &imxhdr->header.hdr_v2.data.dcd_table;
+	struct dcd_v2_cmd *d = gd_last_cmd;
+	struct dcd_v2_cmd *d2;
+	int len;
+
+	if (!d)
+		d = &dcd_v2->dcd_cmd;
+	d2 = d;
+	len = be16_to_cpu(d->write_dcd_command.length);
+	if (len > 4)
+		d2 = (struct dcd_v2_cmd *)(((char *)d) + len);
+
+	switch (cmd) {
+	case CMD_WRITE_DATA:
+		if ((d->write_dcd_command.tag == DCD_WRITE_DATA_COMMAND_TAG) &&
+		    (d->write_dcd_command.param == DCD_WRITE_DATA_PARAM))
+			break;
+		d = d2;
+		d->write_dcd_command.tag = DCD_WRITE_DATA_COMMAND_TAG;
+		d->write_dcd_command.length = cpu_to_be16(4);
+		d->write_dcd_command.param = DCD_WRITE_DATA_PARAM;
+		break;
+	case CMD_WRITE_CLR_BIT:
+		if ((d->write_dcd_command.tag == DCD_WRITE_DATA_COMMAND_TAG) &&
+		    (d->write_dcd_command.param == DCD_WRITE_CLR_BIT_PARAM))
+			break;
+		d = d2;
+		d->write_dcd_command.tag = DCD_WRITE_DATA_COMMAND_TAG;
+		d->write_dcd_command.length = cpu_to_be16(4);
+		d->write_dcd_command.param = DCD_WRITE_CLR_BIT_PARAM;
+		break;
+	case CMD_WRITE_SET_BIT:
+		if ((d->write_dcd_command.tag == DCD_WRITE_DATA_COMMAND_TAG) &&
+		    (d->write_dcd_command.param == DCD_WRITE_SET_BIT_PARAM))
+			break;
+		d = d2;
+		d->write_dcd_command.tag = DCD_WRITE_DATA_COMMAND_TAG;
+		d->write_dcd_command.length = cpu_to_be16(4);
+		d->write_dcd_command.param = DCD_WRITE_SET_BIT_PARAM;
+		break;
+	/*
+	 * Check data command only supports one entry,
+	 */
+	case CMD_CHECK_BITS_SET:
+		d = d2;
+		d->write_dcd_command.tag = DCD_CHECK_DATA_COMMAND_TAG;
+		d->write_dcd_command.length = cpu_to_be16(4);
+		d->write_dcd_command.param = DCD_CHECK_BITS_SET_PARAM;
+		break;
+	case CMD_CHECK_BITS_CLR:
+		d = d2;
+		d->write_dcd_command.tag = DCD_CHECK_DATA_COMMAND_TAG;
+		d->write_dcd_command.length = cpu_to_be16(4);
+		d->write_dcd_command.param = DCD_CHECK_BITS_CLR_PARAM;
+		break;
+	default:
+		break;
+	}
+	gd_last_cmd = d;
+}
+
+static void set_dcd_val_v2(struct imx_header *imxhdr, char *name, int lineno,
+					int fld, uint32_t value, uint32_t off)
+{
+	struct dcd_v2_cmd *d = gd_last_cmd;
+	int len;
+
+	len = be16_to_cpu(d->write_dcd_command.length);
+	off = (len - 4) >> 3;
+
+	switch (fld) {
+	case CFG_REG_ADDRESS:
+		d->addr_data[off].addr = cpu_to_be32(value);
+		break;
+	case CFG_REG_VALUE:
+		d->addr_data[off].value = cpu_to_be32(value);
+		off++;
+		d->write_dcd_command.length = cpu_to_be16((off << 3) + 4);
+		break;
+	default:
+		break;
+
+	}
+}
+
+/*
+ * Complete setting up the rest field of DCD of V1
+ * such as barker code and DCD data length.
+ */
+static void set_dcd_rst_v1(struct imx_header *imxhdr, uint32_t dcd_len,
+						char *name, int lineno)
+{
+	dcd_v1_t *dcd_v1 = &imxhdr->header.hdr_v1.dcd_table;
+
+	dcd_v1->preamble.barker = DCD_BARKER;
+	dcd_v1->preamble.length = dcd_len * sizeof(dcd_type_addr_data_t);
+}
+
+/*
+ * Complete setting up the reset field of DCD of V2
+ * such as DCD tag, version, length, etc.
+ */
+static void set_dcd_rst_v2(struct imx_header *imxhdr, uint32_t dcd_len,
+						char *name, int lineno)
+{
+	if (!imxhdr->header.hdr_v2.boot_data.plugin) {
+		dcd_v2_t *dcd_v2 = &imxhdr->header.hdr_v2.data.dcd_table;
+		struct dcd_v2_cmd *d = gd_last_cmd;
+		int len;
+
+		if (!d)
+			d = &dcd_v2->dcd_cmd;
+		len = be16_to_cpu(d->write_dcd_command.length);
+		if (len > 4)
+			d = (struct dcd_v2_cmd *)(((char *)d) + len);
+
+		len = (char *)d - (char *)&dcd_v2->header;
+		dcd_v2->header.tag = DCD_HEADER_TAG;
+		dcd_v2->header.length = cpu_to_be16(len);
+		dcd_v2->header.version = DCD_VERSION;
+	}
+}
+
+static void set_imx_hdr_v1(struct imx_header *imxhdr, uint32_t dcd_len,
+		uint32_t entry_point, uint32_t flash_offset)
+{
+	imx_header_v1_t *hdr_v1 = &imxhdr->header.hdr_v1;
+	flash_header_v1_t *fhdr_v1 = &hdr_v1->fhdr;
+	dcd_v1_t *dcd_v1 = &hdr_v1->dcd_table;
+	uint32_t hdr_base;
+	uint32_t header_length = (((char *)&dcd_v1->addr_data[dcd_len].addr)
+			- ((char *)imxhdr));
+
+	/* Set magic number */
+	fhdr_v1->app_code_barker = APP_CODE_BARKER;
+
+	hdr_base = entry_point - imximage_init_loadsize + flash_offset;
+	fhdr_v1->app_dest_ptr = hdr_base - flash_offset;
+	fhdr_v1->app_code_jump_vector = entry_point;
+
+	fhdr_v1->dcd_ptr_ptr = hdr_base + offsetof(flash_header_v1_t, dcd_ptr);
+	fhdr_v1->dcd_ptr = hdr_base + offsetof(imx_header_v1_t, dcd_table);
+
+	/* Security feature are not supported */
+	fhdr_v1->app_code_csf = 0;
+	fhdr_v1->super_root_key = 0;
+	header_size_ptr = (uint32_t *)(((char *)imxhdr) + header_length - 4);
+}
+
+static void set_imx_hdr_v2(struct imx_header *imxhdr, uint32_t dcd_len,
+		uint32_t entry_point, uint32_t flash_offset)
+{
+	imx_header_v2_t *hdr_v2 = &imxhdr->header.hdr_v2;
+	flash_header_v2_t *fhdr_v2 = &hdr_v2->fhdr;
+	uint32_t hdr_base;
+
+	/* Set magic number */
+	fhdr_v2->header.tag = IVT_HEADER_TAG; /* 0xD1 */
+	fhdr_v2->header.length = cpu_to_be16(sizeof(flash_header_v2_t));
+	fhdr_v2->header.version = IVT_VERSION; /* 0x40 */
+
+	if (!hdr_v2->boot_data.plugin) {
+		fhdr_v2->entry = entry_point;
+		fhdr_v2->reserved1 = 0;
+		fhdr_v2->reserved1 = 0;
+		hdr_base = entry_point - imximage_init_loadsize +
+			flash_offset;
+		fhdr_v2->self = hdr_base;
+		if (dcd_len > 0)
+			fhdr_v2->dcd_ptr = hdr_base +
+				offsetof(imx_header_v2_t, data);
+		else
+			fhdr_v2->dcd_ptr = 0;
+		fhdr_v2->boot_data_ptr = hdr_base
+				+ offsetof(imx_header_v2_t, boot_data);
+		hdr_v2->boot_data.start = entry_point - imximage_init_loadsize;
+
+		fhdr_v2->csf = 0;
+
+		header_size_ptr = &hdr_v2->boot_data.size;
+		csf_ptr = &fhdr_v2->csf;
+	} else {
+		imx_header_v2_t *next_hdr_v2;
+		flash_header_v2_t *next_fhdr_v2;
+
+		if (imximage_csf_size != 0) {
+			fprintf(stderr, "Error: Header v2: SECURE_BOOT is only supported in DCD mode!");
+			exit(EXIT_FAILURE);
+		}
+
+		fhdr_v2->entry = imximage_iram_free_start +
+			flash_offset + sizeof(flash_header_v2_t) +
+			sizeof(boot_data_t);
+
+		fhdr_v2->reserved1 = 0;
+		fhdr_v2->reserved2 = 0;
+		fhdr_v2->self = imximage_iram_free_start + flash_offset;
+
+		fhdr_v2->dcd_ptr = 0;
+
+		fhdr_v2->boot_data_ptr = fhdr_v2->self +
+				offsetof(imx_header_v2_t, boot_data);
+
+		hdr_v2->boot_data.start = imximage_iram_free_start;
+		/*
+		 * The actural size of plugin image is "imximage_plugin_size +
+		 * sizeof(flash_header_v2_t) + sizeof(boot_data_t)", plus the
+		 * flash_offset space.The ROM code only need to copy this size
+		 * to run the plugin code. However, later when copy the whole
+		 * U-Boot image to DDR, the ROM code use memcpy to copy the
+		 * first part of the image, and use the storage read function
+		 * to get the remaining part. This requires the dividing point
+		 * must be multiple of storage sector size. Here we set the
+		 * first section to be MAX_PLUGIN_CODE_SIZE(64KB) for this
+		 * purpose.
+		 */
+		hdr_v2->boot_data.size = MAX_PLUGIN_CODE_SIZE;
+
+		/* Security feature are not supported */
+		fhdr_v2->csf = 0;
+
+		next_hdr_v2 = (imx_header_v2_t *)((char *)hdr_v2 +
+			       imximage_plugin_size);
+
+		next_fhdr_v2 = &next_hdr_v2->fhdr;
+
+		next_fhdr_v2->header.tag = IVT_HEADER_TAG; /* 0xD1 */
+		next_fhdr_v2->header.length =
+			cpu_to_be16(sizeof(flash_header_v2_t));
+		next_fhdr_v2->header.version = IVT_VERSION; /* 0x40 */
+
+		next_fhdr_v2->entry = entry_point;
+		hdr_base = entry_point - sizeof(struct imx_header);
+		next_fhdr_v2->reserved1 = 0;
+		next_fhdr_v2->reserved2 = 0;
+		next_fhdr_v2->self = hdr_base + imximage_plugin_size;
+
+		next_fhdr_v2->dcd_ptr = 0;
+		next_fhdr_v2->boot_data_ptr = next_fhdr_v2->self +
+				offsetof(imx_header_v2_t, boot_data);
+
+		next_hdr_v2->boot_data.start = hdr_base - flash_offset;
+
+		header_size_ptr = &next_hdr_v2->boot_data.size;
+
+		next_hdr_v2->boot_data.plugin = 0;
+
+		next_fhdr_v2->csf = 0;
+	}
+}
+
+static void set_hdr_func(void)
+{
+	switch (imximage_version) {
+	case IMXIMAGE_V1:
+		set_dcd_val = set_dcd_val_v1;
+		set_dcd_param = NULL;
+		set_dcd_rst = set_dcd_rst_v1;
+		set_imx_hdr = set_imx_hdr_v1;
+		max_dcd_entries = MAX_HW_CFG_SIZE_V1;
+		break;
+	case IMXIMAGE_V2:
+		gd_last_cmd = NULL;
+		set_dcd_val = set_dcd_val_v2;
+		set_dcd_param = set_dcd_param_v2;
+		set_dcd_rst = set_dcd_rst_v2;
+		set_imx_hdr = set_imx_hdr_v2;
+		max_dcd_entries = MAX_HW_CFG_SIZE_V2;
+		break;
+	default:
+		err_imximage_version(imximage_version);
+		break;
+	}
+}
+
+static void print_hdr_v1(struct imx_header *imx_hdr)
+{
+	imx_header_v1_t *hdr_v1 = &imx_hdr->header.hdr_v1;
+	flash_header_v1_t *fhdr_v1 = &hdr_v1->fhdr;
+	dcd_v1_t *dcd_v1 = &hdr_v1->dcd_table;
+	uint32_t size, length, ver;
+
+	size = dcd_v1->preamble.length;
+	if (size > (MAX_HW_CFG_SIZE_V1 * sizeof(dcd_type_addr_data_t))) {
+		fprintf(stderr,
+			"Error: Image corrupt DCD size %d exceed maximum %d\n",
+			(uint32_t)(size / sizeof(dcd_type_addr_data_t)),
+			MAX_HW_CFG_SIZE_V1);
+		exit(EXIT_FAILURE);
+	}
+
+	length = dcd_v1->preamble.length / sizeof(dcd_type_addr_data_t);
+	ver = detect_imximage_version(imx_hdr);
+
+	printf("Image Type:   Freescale IMX Boot Image\n");
+	printf("Image Ver:    %x", ver);
+	printf("%s\n", get_table_entry_name(imximage_versions, NULL, ver));
+	printf("Data Size:    ");
+	genimg_print_size(dcd_v1->addr_data[length].type);
+	printf("Load Address: %08x\n", (uint32_t)fhdr_v1->app_dest_ptr);
+	printf("Entry Point:  %08x\n", (uint32_t)fhdr_v1->app_code_jump_vector);
+}
+
+static void print_hdr_v2(struct imx_header *imx_hdr)
+{
+	imx_header_v2_t *hdr_v2 = &imx_hdr->header.hdr_v2;
+	flash_header_v2_t *fhdr_v2 = &hdr_v2->fhdr;
+	dcd_v2_t *dcd_v2 = &hdr_v2->data.dcd_table;
+	uint32_t size, version, plugin;
+
+	plugin = hdr_v2->boot_data.plugin;
+	if (!plugin) {
+		size = be16_to_cpu(dcd_v2->header.length);
+		if (size > (MAX_HW_CFG_SIZE_V2 * sizeof(dcd_addr_data_t))) {
+			fprintf(stderr,
+				"Error: Image corrupt DCD size %d exceed maximum %d\n",
+				(uint32_t)(size / sizeof(dcd_addr_data_t)),
+				MAX_HW_CFG_SIZE_V2);
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	version = detect_imximage_version(imx_hdr);
+
+	printf("Image Type:   Freescale IMX Boot Image\n");
+	printf("Image Ver:    %x", version);
+	printf("%s\n", get_table_entry_name(imximage_versions, NULL, version));
+	printf("Mode:         %s\n", plugin ? "PLUGIN" : "DCD");
+	if (!plugin) {
+		printf("Data Size:    ");
+		genimg_print_size(hdr_v2->boot_data.size);
+		printf("Load Address: %08x\n", (uint32_t)fhdr_v2->boot_data_ptr);
+		printf("Entry Point:  %08x\n", (uint32_t)fhdr_v2->entry);
+		if (fhdr_v2->csf) {
+			uint16_t dcdlen;
+			int offs;
+
+			dcdlen = hdr_v2->data.dcd_table.header.length;
+			offs = (char *)&hdr_v2->data.dcd_table
+				- (char *)hdr_v2;
+
+			/*
+			 * The HAB block is the first part of the image, from
+			 * start of IVT header (fhdr_v2->self) to the start of
+			 * the CSF block (fhdr_v2->csf). So HAB size is
+			 * calculated as:
+			 * HAB_size = fhdr_v2->csf - fhdr_v2->self
+			 */
+			printf("HAB Blocks:   0x%08x 0x%08x 0x%08x\n",
+			       (uint32_t)fhdr_v2->self, 0,
+			       (uint32_t)(fhdr_v2->csf - fhdr_v2->self));
+			printf("DCD Blocks:   0x00910000 0x%08x 0x%08x\n",
+			       offs, be16_to_cpu(dcdlen));
+		}
+	} else {
+		imx_header_v2_t *next_hdr_v2;
+		flash_header_v2_t *next_fhdr_v2;
+
+		/*First Header*/
+		printf("Plugin Data Size:     ");
+		genimg_print_size(hdr_v2->boot_data.size);
+		printf("Plugin Code Size:     ");
+		genimg_print_size(imximage_plugin_size);
+		printf("Plugin Load Address:  %08x\n", hdr_v2->boot_data.start);
+		printf("Plugin Entry Point:   %08x\n", (uint32_t)fhdr_v2->entry);
+
+		/*Second Header*/
+		next_hdr_v2 = (imx_header_v2_t *)((char *)hdr_v2 +
+				imximage_plugin_size);
+		next_fhdr_v2 = &next_hdr_v2->fhdr;
+		printf("U-Boot Data Size:     ");
+		genimg_print_size(next_hdr_v2->boot_data.size);
+		printf("U-Boot Load Address:  %08x\n",
+		       next_hdr_v2->boot_data.start);
+		printf("U-Boot Entry Point:   %08x\n",
+		       (uint32_t)next_fhdr_v2->entry);
+	}
+}
+
+static void copy_plugin_code(struct imx_header *imxhdr, char *plugin_file)
+{
+	int ifd;
+	struct stat sbuf;
+	char *plugin_buf = imxhdr->header.hdr_v2.data.plugin_code;
+	char *ptr;
+
+	ifd = open(plugin_file, O_RDONLY|O_BINARY);
+	if (ifd < 0) {
+		fprintf(stderr, "Can't open %s: %s\n",
+			plugin_file,
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(ifd, &sbuf) < 0) {
+		fprintf(stderr, "Can't stat %s: %s\n",
+			plugin_file,
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, ifd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf(stderr, "Can't read %s: %s\n",
+			plugin_file,
+			strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (sbuf.st_size > MAX_PLUGIN_CODE_SIZE) {
+		printf("plugin binary size too large\n");
+		exit(EXIT_FAILURE);
+	}
+
+	memcpy(plugin_buf, ptr, sbuf.st_size);
+	imximage_plugin_size = sbuf.st_size;
+
+	(void) munmap((void *)ptr, sbuf.st_size);
+	(void) close(ifd);
+
+	imxhdr->header.hdr_v2.boot_data.plugin = 1;
+}
+
+static void parse_cfg_cmd(struct imx_header *imxhdr, int32_t cmd, char *token,
+				char *name, int lineno, int fld, int dcd_len)
+{
+	int value;
+	static int cmd_ver_first = ~0;
+
+	switch (cmd) {
+	case CMD_IMAGE_VERSION:
+		imximage_version = get_cfg_value(token, name, lineno);
+		if (cmd_ver_first == 0) {
+			fprintf(stderr, "Error: %s[%d] - IMAGE_VERSION "
+				"command need be the first before other "
+				"valid command in the file\n", name, lineno);
+			exit(EXIT_FAILURE);
+		}
+		cmd_ver_first = 1;
+		set_hdr_func();
+		break;
+	case CMD_BOOT_FROM:
+		imximage_ivt_offset = get_table_entry_id(imximage_boot_offset,
+					"imximage boot option", token);
+		if (imximage_ivt_offset == -1) {
+			fprintf(stderr, "Error: %s[%d] -Invalid boot device"
+				"(%s)\n", name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+
+		imximage_init_loadsize =
+			get_table_entry_id(imximage_boot_loadsize,
+					   "imximage boot option", token);
+
+		if (imximage_init_loadsize == -1) {
+			fprintf(stderr,
+				"Error: %s[%d] -Invalid boot device(%s)\n",
+				name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+
+		/*
+		 * The SOC loads from the storage starting at address 0
+		 * then ensures that the load size contains the offset
+		 */
+		if (imximage_init_loadsize < imximage_ivt_offset)
+			imximage_init_loadsize = imximage_ivt_offset;
+		if (unlikely(cmd_ver_first != 1))
+			cmd_ver_first = 0;
+		break;
+	case CMD_BOOT_OFFSET:
+		imximage_ivt_offset = get_cfg_value(token, name, lineno);
+		if (unlikely(cmd_ver_first != 1))
+			cmd_ver_first = 0;
+		break;
+	case CMD_WRITE_DATA:
+	case CMD_WRITE_CLR_BIT:
+	case CMD_WRITE_SET_BIT:
+	case CMD_CHECK_BITS_SET:
+	case CMD_CHECK_BITS_CLR:
+		value = get_cfg_value(token, name, lineno);
+		if (set_dcd_param)
+			(*set_dcd_param)(imxhdr, dcd_len, cmd);
+		(*set_dcd_val)(imxhdr, name, lineno, fld, value, dcd_len);
+		if (unlikely(cmd_ver_first != 1))
+			cmd_ver_first = 0;
+		break;
+	case CMD_CSF:
+		if (imximage_version != 2) {
+			fprintf(stderr,
+				"Error: %s[%d] - CSF only supported for VERSION 2(%s)\n",
+				name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+		imximage_csf_size = get_cfg_value(token, name, lineno);
+		if (unlikely(cmd_ver_first != 1))
+			cmd_ver_first = 0;
+		break;
+	case CMD_PLUGIN:
+		plugin_image = 1;
+		copy_plugin_code(imxhdr, token);
+		break;
+	}
+}
+
+static void parse_cfg_fld(struct imx_header *imxhdr, int32_t *cmd,
+		char *token, char *name, int lineno, int fld, int *dcd_len)
+{
+	int value;
+
+	switch (fld) {
+	case CFG_COMMAND:
+		*cmd = get_table_entry_id(imximage_cmds,
+			"imximage commands", token);
+		if (*cmd < 0) {
+			fprintf(stderr, "Error: %s[%d] - Invalid command"
+			"(%s)\n", name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+		break;
+	case CFG_REG_SIZE:
+		parse_cfg_cmd(imxhdr, *cmd, token, name, lineno, fld, *dcd_len);
+		break;
+	case CFG_REG_ADDRESS:
+	case CFG_REG_VALUE:
+		switch(*cmd) {
+		case CMD_WRITE_DATA:
+		case CMD_WRITE_CLR_BIT:
+		case CMD_WRITE_SET_BIT:
+		case CMD_CHECK_BITS_SET:
+		case CMD_CHECK_BITS_CLR:
+
+			value = get_cfg_value(token, name, lineno);
+			if (set_dcd_param)
+				(*set_dcd_param)(imxhdr, *dcd_len, *cmd);
+			(*set_dcd_val)(imxhdr, name, lineno, fld, value,
+					*dcd_len);
+
+			if (fld == CFG_REG_VALUE) {
+				(*dcd_len)++;
+				if (*dcd_len > max_dcd_entries) {
+					fprintf(stderr, "Error: %s[%d] -"
+						"DCD table exceeds maximum size(%d)\n",
+						name, lineno, max_dcd_entries);
+					exit(EXIT_FAILURE);
+				}
+			}
+			break;
+		case CMD_PLUGIN:
+			value = get_cfg_value(token, name, lineno);
+			imximage_iram_free_start = value;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+static uint32_t parse_cfg_file(struct imx_header *imxhdr, char *name)
+{
+	FILE *fd = NULL;
+	char *line = NULL;
+	char *token, *saveptr1, *saveptr2;
+	int lineno = 0;
+	int fld;
+	size_t len;
+	int dcd_len = 0;
+	int32_t cmd;
+
+	fd = fopen(name, "r");
+	if (fd == 0) {
+		fprintf(stderr, "Error: %s - Can't open DCD file\n", name);
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Very simple parsing, line starting with # are comments
+	 * and are dropped
+	 */
+	while ((getline(&line, &len, fd)) > 0) {
+		lineno++;
+
+		token = strtok_r(line, "\r\n", &saveptr1);
+		if (token == NULL)
+			continue;
+
+		/* Check inside the single line */
+		for (fld = CFG_COMMAND, cmd = CMD_INVALID,
+				line = token; ; line = NULL, fld++) {
+			token = strtok_r(line, " \t", &saveptr2);
+			if (token == NULL)
+				break;
+
+			/* Drop all text starting with '#' as comments */
+			if (token[0] == '#')
+				break;
+
+			parse_cfg_fld(imxhdr, &cmd, token, name,
+					lineno, fld, &dcd_len);
+		}
+
+	}
+
+	(*set_dcd_rst)(imxhdr, dcd_len, name, lineno);
+	fclose(fd);
+
+	/* Exit if there is no BOOT_FROM field specifying the flash_offset */
+	if (imximage_ivt_offset == FLASH_OFFSET_UNDEFINED) {
+		fprintf(stderr, "Error: No BOOT_FROM tag in %s\n", name);
+		exit(EXIT_FAILURE);
+	}
+	return dcd_len;
+}
+
+
+static int imximage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_IMXIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int imximage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct imx_header *imx_hdr = (struct imx_header *) ptr;
+
+	if (detect_imximage_version(imx_hdr) == IMXIMAGE_VER_INVALID)
+		return -FDT_ERR_BADSTRUCTURE;
+
+	return 0;
+}
+
+static void imximage_print_header(const void *ptr)
+{
+	struct imx_header *imx_hdr = (struct imx_header *) ptr;
+	uint32_t version = detect_imximage_version(imx_hdr);
+
+	switch (version) {
+	case IMXIMAGE_V1:
+		print_hdr_v1(imx_hdr);
+		break;
+	case IMXIMAGE_V2:
+		print_hdr_v2(imx_hdr);
+		break;
+	default:
+		err_imximage_version(version);
+		break;
+	}
+}
+
+static void imximage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	struct imx_header *imxhdr = (struct imx_header *)ptr;
+	uint32_t dcd_len;
+	uint32_t header_size;
+
+	/*
+	 * In order to not change the old imx cfg file
+	 * by adding VERSION command into it, here need
+	 * set up function ptr group to V1 by default.
+	 */
+	imximage_version = IMXIMAGE_V1;
+	/* Be able to detect if the cfg file has no BOOT_FROM tag */
+	imximage_ivt_offset = FLASH_OFFSET_UNDEFINED;
+	imximage_csf_size = 0;
+	set_hdr_func();
+
+	/* Parse dcd configuration file */
+	dcd_len = parse_cfg_file(imxhdr, params->imagename);
+
+	if (imximage_version == IMXIMAGE_V1)
+		header_size = sizeof(flash_header_v1_t);
+	else {
+		header_size = sizeof(flash_header_v2_t) + sizeof(boot_data_t);
+		if (!plugin_image)
+			header_size += sizeof(dcd_v2_t);
+		else
+			header_size += MAX_PLUGIN_CODE_SIZE;
+	}
+
+	if (imximage_init_loadsize < imximage_ivt_offset + header_size)
+			imximage_init_loadsize = imximage_ivt_offset + header_size;
+
+	/* Set the imx header */
+	(*set_imx_hdr)(imxhdr, dcd_len, params->ep, imximage_ivt_offset);
+
+	/*
+	 * ROM bug alert
+	 *
+	 * MX53 only loads 512 byte multiples in case of SD boot.
+	 * MX53 only loads NAND page multiples in case of NAND boot and
+	 * supports up to 4096 byte large pages, thus align to 4096.
+	 *
+	 * The remaining fraction of a block bytes would not be loaded!
+	 */
+	*header_size_ptr = ROUND((sbuf->st_size + imximage_ivt_offset), 4096);
+
+	if (csf_ptr && imximage_csf_size) {
+		*csf_ptr = params->ep - imximage_init_loadsize +
+			*header_size_ptr;
+		*header_size_ptr += imximage_csf_size;
+	}
+}
+
+int imximage_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return CFG_INVALID;
+	if (!strlen(params->imagename)) {
+		fprintf(stderr, "Error: %s - Configuration file not specified, "
+			"it is needed for imximage generation\n",
+			params->cmdname);
+		return CFG_INVALID;
+	}
+	/*
+	 * Check parameters:
+	 * XIP is not allowed and verify that incompatible
+	 * parameters are not sent at the same time
+	 * For example, if list is required a data image must not be provided
+	 */
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)) ||
+		(params->xflag) || !(strlen(params->imagename));
+}
+
+static int imximage_generate(struct image_tool_params *params,
+	struct image_type_params *tparams)
+{
+	struct imx_header *imxhdr;
+	size_t alloc_len;
+	struct stat sbuf;
+	char *datafile = params->datafile;
+	uint32_t pad_len, header_size;
+
+	memset(&imximage_header, 0, sizeof(imximage_header));
+
+	/*
+	 * In order to not change the old imx cfg file
+	 * by adding VERSION command into it, here need
+	 * set up function ptr group to V1 by default.
+	 */
+	imximage_version = IMXIMAGE_V1;
+	/* Be able to detect if the cfg file has no BOOT_FROM tag */
+	imximage_ivt_offset = FLASH_OFFSET_UNDEFINED;
+	imximage_csf_size = 0;
+	set_hdr_func();
+
+	/* Parse dcd configuration file */
+	parse_cfg_file(&imximage_header, params->imagename);
+
+	if (imximage_version == IMXIMAGE_V1)
+		header_size = sizeof(imx_header_v1_t);
+	else {
+		header_size = sizeof(flash_header_v2_t) + sizeof(boot_data_t);
+		if (!plugin_image)
+			header_size += sizeof(dcd_v2_t);
+		else
+			header_size += MAX_PLUGIN_CODE_SIZE;
+	}
+
+	if (imximage_init_loadsize < imximage_ivt_offset + header_size)
+			imximage_init_loadsize = imximage_ivt_offset + header_size;
+
+	alloc_len = imximage_init_loadsize - imximage_ivt_offset;
+
+	if (alloc_len < header_size) {
+		fprintf(stderr, "%s: header error\n",
+			params->cmdname);
+		exit(EXIT_FAILURE);
+	}
+
+	imxhdr = malloc(alloc_len);
+
+	if (!imxhdr) {
+		fprintf(stderr, "%s: malloc return failure: %s\n",
+			params->cmdname, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	memset(imxhdr, 0, alloc_len);
+
+	tparams->header_size = alloc_len;
+	tparams->hdr         = imxhdr;
+
+	/* determine data image file length */
+
+	if (stat(datafile, &sbuf) < 0) {
+		fprintf(stderr, "%s: Can't stat %s: %s\n",
+			params->cmdname, datafile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	pad_len = ROUND(sbuf.st_size, 4096) - sbuf.st_size;
+
+	return pad_len;
+}
+
+
+/*
+ * imximage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	imximage,
+	"Freescale i.MX Boot Image support",
+	0,
+	NULL,
+	imximage_check_params,
+	imximage_verify_header,
+	imximage_print_header,
+	imximage_set_header,
+	NULL,
+	imximage_check_image_types,
+	NULL,
+	imximage_generate
+);
diff --git a/tools/u-boot-tools/imximage.o b/tools/u-boot-tools/imximage.o
new file mode 100644
index 0000000000000000000000000000000000000000..8d06d2b4bec88ad1c67cae73893f1b377736a3ba
Binary files /dev/null and b/tools/u-boot-tools/imximage.o differ
diff --git a/tools/u-boot-tools/jtagconsole b/tools/u-boot-tools/jtagconsole
new file mode 100755
index 0000000000000000000000000000000000000000..d404fac5e802a2182974ff354f2dd3846797f743
--- /dev/null
+++ b/tools/u-boot-tools/jtagconsole
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+usage() {
+	(
+	echo "Usage: $0 [board IP] [board port]"
+	echo ""
+	echo "If IP is not specified, 'localhost' will be used"
+	echo "If port is not specified, '2001' will be used"
+	[ -z "$*" ] && exit 0
+	echo ""
+	echo "ERROR: $*"
+	exit 1
+	) 1>&2
+	exit $?
+}
+
+while [ -n "$1" ] ; do
+	case $1 in
+		-h|--help) usage;;
+		--)        break;;
+		-*)        usage "Invalid option $1";;
+		*)         break;;
+	esac
+	shift
+done
+
+ip=${1:-localhost}
+port=${2:-2001}
+
+if [ -z "${ip}" ] || [ -n "$3" ] ; then
+	usage "Invalid number of arguments"
+fi
+
+trap "stty icanon echo opost intr ^C" 0 2 3 5 10 13 15
+echo "NOTE: the interrupt signal (normally ^C) has been remapped to ^T"
+
+stty -icanon -echo -opost intr ^T
+nc ${ip} ${port}
+exit 0
diff --git a/tools/u-boot-tools/k3_fit_atf.sh b/tools/u-boot-tools/k3_fit_atf.sh
new file mode 100755
index 0000000000000000000000000000000000000000..430b5ca616ae069200b5df2e4846ba55795c278d
--- /dev/null
+++ b/tools/u-boot-tools/k3_fit_atf.sh
@@ -0,0 +1,99 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# script to generate FIT image source for K3 Family boards with
+# ATF, OPTEE, SPL and multiple device trees (given on the command line).
+# Inspired from board/sunxi/mksunxi_fit_atf.sh
+#
+# usage: $0 <dt_name> [<dt_name> [<dt_name] ...]
+
+[ -z "$ATF" ] && ATF="bl31.bin"
+
+if [ ! -f $ATF ]; then
+	echo "WARNING ATF file $ATF NOT found, resulting binary is non-functional" >&2
+	ATF=/dev/null
+fi
+
+[ -z "$TEE" ] && TEE="bl32.bin"
+
+if [ ! -f $TEE ]; then
+	echo "WARNING OPTEE file $TEE NOT found, resulting might be non-functional" >&2
+	TEE=/dev/null
+fi
+
+cat << __HEADER_EOF
+/dts-v1/;
+
+/ {
+	description = "Configuration to load ATF and SPL";
+	#address-cells = <1>;
+
+	images {
+		atf {
+			description = "ARM Trusted Firmware";
+			data = /incbin/("$ATF");
+			type = "firmware";
+			arch = "arm64";
+			compression = "none";
+			os = "arm-trusted-firmware";
+			load = <0x70000000>;
+			entry = <0x70000000>;
+		};
+		tee {
+			description = "OPTEE";
+			data = /incbin/("$TEE");
+			type = "tee";
+			arch = "arm64";
+			compression = "none";
+			os = "tee";
+			load = <0x9e800000>;
+			entry = <0x9e800000>;
+		};
+		spl {
+			description = "SPL (64-bit)";
+			data = /incbin/("spl/u-boot-spl-nodtb.bin");
+			type = "standalone";
+			os = "U-Boot";
+			arch = "arm64";
+			compression = "none";
+			load = <0x80080000>;
+			entry = <0x80080000>;
+		};
+__HEADER_EOF
+
+for dtname in $*
+do
+	cat << __FDT_IMAGE_EOF
+		$(basename $dtname) {
+			description = "$(basename $dtname .dtb)";
+			data = /incbin/("$dtname");
+			type = "flat_dt";
+			arch = "arm";
+			compression = "none";
+		};
+__FDT_IMAGE_EOF
+done
+
+cat << __CONF_HEADER_EOF
+	};
+	configurations {
+		default = "$(basename $1)";
+
+__CONF_HEADER_EOF
+
+for dtname in $*
+do
+	cat << __CONF_SECTION_EOF
+		$(basename $dtname) {
+			description = "$(basename $dtname .dtb)";
+			firmware = "atf";
+			loadables = "tee", "spl";
+			fdt = "$(basename $dtname)";
+		};
+__CONF_SECTION_EOF
+done
+
+cat << __ITS_EOF
+	};
+};
+__ITS_EOF
diff --git a/tools/u-boot-tools/k3_x509template.txt b/tools/u-boot-tools/k3_x509template.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bd3a9ab05608ee397f994dde3eff751156785fbc
--- /dev/null
+++ b/tools/u-boot-tools/k3_x509template.txt
@@ -0,0 +1,48 @@
+ [ req ]
+ distinguished_name     = req_distinguished_name
+ x509_extensions        = v3_ca
+ prompt                 = no
+ dirstring_type         = nobmp
+
+ [ req_distinguished_name ]
+ C                      = US
+ ST                     = TX
+ L                      = Dallas
+ O                      = Texas Instruments Incorporated
+ OU                     = Processors
+ CN                     = TI Support
+ emailAddress           = support@ti.com
+
+ [ v3_ca ]
+ basicConstraints = CA:true
+ 1.3.6.1.4.1.294.1.1 = ASN1:SEQUENCE:boot_seq
+ 1.3.6.1.4.1.294.1.2 = ASN1:SEQUENCE:image_integrity
+ 1.3.6.1.4.1.294.1.3 = ASN1:SEQUENCE:swrv
+# 1.3.6.1.4.1.294.1.4 = ASN1:SEQUENCE:encryption
+ 1.3.6.1.4.1.294.1.8 = ASN1:SEQUENCE:debug
+
+ [ boot_seq ]
+ certType = INTEGER:TEST_CERT_TYPE
+ bootCore = INTEGER:TEST_BOOT_CORE
+ bootCoreOpts = INTEGER:TEST_BOOT_ARCH_WIDTH
+ destAddr = FORMAT:HEX,OCT:TEST_BOOT_ADDR
+ imageSize = INTEGER:TEST_IMAGE_LENGTH
+
+ [ image_integrity ]
+ shaType = OID:2.16.840.1.101.3.4.2.3
+ shaValue = FORMAT:HEX,OCT:TEST_IMAGE_SHA_VAL
+
+ [ swrv ]
+ swrv = INTEGER:0
+
+# [ encryption ]
+# initalVector = FORMAT:HEX,OCT:TEST_IMAGE_ENC_IV
+# randomString = FORMAT:HEX,OCT:TEST_IMAGE_ENC_RS
+# iterationCnt = INTEGER:TEST_IMAGE_KEY_DERIVE_INDEX
+# salt = FORMAT:HEX,OCT:TEST_IMAGE_KEY_DERIVE_SALT
+
+ [ debug ]
+ debugType = INTEGER:4
+ coreDbgEn = INTEGER:0
+ coreDbgSecEn = INTEGER:0
+ debugUID = FORMAT:HEX,OCT:0000000000000000000000000000000000000000000000000000000000000000
diff --git a/tools/u-boot-tools/kermit/README b/tools/u-boot-tools/kermit/README
new file mode 100644
index 0000000000000000000000000000000000000000..2b3f0b5cd48fe46e1cbcb13ead8a3258e991669d
--- /dev/null
+++ b/tools/u-boot-tools/kermit/README
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2001
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+This directory contains scripts that help to perform certain actions
+that need to be done frequently when working with U-Boot.
+
+They are meant as EXAMPLE code, so it is very likely that you will
+have to modify them before use.
+
+
+Short description:
+==================
+
+dot.kermrc:
+
+	Example for "~/.kermrc" Kermit init file for use with U-Boot
+
+	by Wolfgang Denk, 24 Jun 2001
+
+flash_param:
+
+	"kermit" script to automatically initialize the environment
+	variables on your target. This is most useful during
+	development when your environment variables are stored in an
+	embedded flash sector which is erased whenever you install a
+	new U-Boot image.
+
+	by Swen Anderson, 10 May 2001
+
+send_cmd:
+
+	send_cmd U_BOOT_COMMAND
+
+	"kermit" script to send a U-Boot command and print the
+	results. When used from a shell with history (like the bash)
+	this indirectly adds kind of history to U-Boot ;-)
+
+	by Swen Anderson, 10 May 2001
+
+send_image:
+
+	send_image FILE_NAME OFFSET
+
+	"kermit" script to automatically download a file to the
+	target using the "loadb" command (kermit binary protocol)
+
+	by Swen Anderson, 10 May 2001
diff --git a/tools/u-boot-tools/kermit/dot.kermrc b/tools/u-boot-tools/kermit/dot.kermrc
new file mode 100644
index 0000000000000000000000000000000000000000..0fc6c15d35fa88ceabd04c447b8cba330fdb3d3f
--- /dev/null
+++ b/tools/u-boot-tools/kermit/dot.kermrc
@@ -0,0 +1,16 @@
+set line /dev/ttyS0
+set speed 115200
+set carrier-watch off
+set handshake none
+set flow-control none
+robust
+set file type bin
+set file name lit
+set rec pack 1000
+set send pack 1000
+set window 5
+set prompt Kermit>
+define sz !sz \%1 \%2 \%3 \%4 \%5 \%6 \%7 \%8 \%9 < \v(line) > \v(line)
+define rz !rz \%1 \%2 \%3 \%4 \%5 \%6 \%7 \%8 \%9 < \v(line) > \v(line)
+define sx !sx \%1 \%2 \%3 \%4 \%5 \%6 \%7 \%8 \%9 < \v(line) > \v(line)
+define rx !rx \%1 \%2 \%3 \%4 \%5 \%6 \%7 \%8 \%9 < \v(line) > \v(line)
diff --git a/tools/u-boot-tools/kermit/flash_param b/tools/u-boot-tools/kermit/flash_param
new file mode 100644
index 0000000000000000000000000000000000000000..847f99e1ebd0364eb9bc58ebeb5af61128c772be
--- /dev/null
+++ b/tools/u-boot-tools/kermit/flash_param
@@ -0,0 +1,60 @@
+#!/usr/bin/kermit +
+# usage: ./flash_param parameters
+# Parameters: IP Address       ETH Address        ERIC Number
+# Format:     xxx.xxx.xxx.xxx  xx:xx:xx:xx:xx:xx  xxxx
+
+set line /dev/ttyS0
+set speed 115200
+set serial 8N1
+set carrier-watch off
+set handshake none
+#set flow-control none
+set flow-control xon/xoff
+#robust
+set file type bin
+set file name lit
+set rec pack 1000
+set send pack 1000
+set window 5
+set prompt Kermit>
+#robust
+# Milliseconds to pause between each OUTPUT character
+set output pacing 1
+
+out \13
+in 10 =>
+#first erase the environment memory within NVRAM
+out mw f0000000 0 200\13
+in 10 =>
+out reset\13
+in 5 autoboot
+out \13\13
+in 10 =>
+#set additional env parameter
+out setenv ethaddr \%2\13
+in 10 =>
+out setenv serial# ERIC 1.0 \%3\13
+in 10 =>
+out setenv eric_id \%3\13
+in 10 =>
+#out setenv prec_videocard_bus unknown\13
+#in 10 =>
+#out setenv prec_bios_type unknown\13
+#in 10 =>
+out setenv eric_passwd .eRIC.\13
+in 10 =>
+#out setenv bootargs root=/dev/ram ramdisk_size=8192 init=/sbin/init ip=\%1:192.168.1.100:192.168.1.254:255.255.255.0\13
+#out setenv bootargs root=/dev/ram ramdisk_size=8192 init=/sbin/init ip=\%1:192.168.0.1\13
+#out setenv bootargs root=/dev/ram ramdisk_size=8192 init=/sbin/init ip=\%1\13
+out setenv bootargs console=/dev/ttyS0,115200 root=/dev/nfs nfsroot=192.168.1.26:/eric_root_devel ip=\%1:192.168.1.26\13
+in 10 =>
+out setenv bootcmd bootm FFC00000\13
+in 10 =>
+out saveenv\13
+in 10 =>
+out reset\13
+in 5 autoboot
+out \13\13
+in 10 =>
+quit
+exit 0
diff --git a/tools/u-boot-tools/kermit/send_cmd b/tools/u-boot-tools/kermit/send_cmd
new file mode 100644
index 0000000000000000000000000000000000000000..4131331f0388430302c400d8b063ebdf360a1af0
--- /dev/null
+++ b/tools/u-boot-tools/kermit/send_cmd
@@ -0,0 +1,21 @@
+#!/usr/bin/kermit +
+set line /dev/ttyS0
+set speed 115200
+set serial 8N1
+set carrier-watch off
+set handshake none
+set flow-control none
+robust
+set file type bin
+set file name lit
+set rec pack 1000
+set send pack 1000
+set window 5
+set prompt Kermit>
+
+#out \13
+#in 10 =>
+out \%1 \%2 \%3 \%4 \%5 \%6 \%7\13
+in 10 =>
+quit
+exit 0
diff --git a/tools/u-boot-tools/kermit/send_image b/tools/u-boot-tools/kermit/send_image
new file mode 100644
index 0000000000000000000000000000000000000000..9b89d6b059845684615c9eb9689ba5abdc41df48
--- /dev/null
+++ b/tools/u-boot-tools/kermit/send_image
@@ -0,0 +1,26 @@
+#!/usr/bin/kermit +
+# usage: send_image FILE_NAME OFFSET
+# e.g.   send_image output.bin 1F00000
+set line /dev/ttyS0
+set speed 115200
+set serial 8N1
+set carrier-watch off
+set handshake none
+set flow-control none
+robust
+set file type bin
+set file name lit
+set rec pack 1000
+set send pack 1000
+set window 5
+set prompt Kermit>
+
+out \13
+in 10 =>
+out loadb \%2 \13
+in 10 download ...
+send \%1
+out \13
+in 10 ## Start Addr
+quit
+exit 0
diff --git a/tools/u-boot-tools/kwbimage.c b/tools/u-boot-tools/kwbimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..a88a3830c0c8d41c93765474622205f8b43c22d8
--- /dev/null
+++ b/tools/u-boot-tools/kwbimage.c
@@ -0,0 +1,1757 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Image manipulator for Marvell SoCs
+ *  supports Kirkwood, Dove, Armada 370, Armada XP, and Armada 38x
+ *
+ * (C) Copyright 2013 Thomas Petazzoni
+ * <thomas.petazzoni@free-electrons.com>
+ *
+ * Not implemented: support for the register headers in v1 images
+ */
+
+#include "imagetool.h"
+#include <limits.h>
+#include <image.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include "kwbimage.h"
+
+#ifdef CONFIG_KWB_SECURE
+#include <openssl/bn.h>
+#include <openssl/rsa.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/evp.h>
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L || \
+    (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000fL)
+static void RSA_get0_key(const RSA *r,
+                 const BIGNUM **n, const BIGNUM **e, const BIGNUM **d)
+{
+   if (n != NULL)
+       *n = r->n;
+   if (e != NULL)
+       *e = r->e;
+   if (d != NULL)
+       *d = r->d;
+}
+
+#elif !defined(LIBRESSL_VERSION_NUMBER)
+void EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx)
+{
+	EVP_MD_CTX_reset(ctx);
+}
+#endif
+#endif
+
+static struct image_cfg_element *image_cfg;
+static int cfgn;
+#ifdef CONFIG_KWB_SECURE
+static int verbose_mode;
+#endif
+
+struct boot_mode {
+	unsigned int id;
+	const char *name;
+};
+
+/*
+ * SHA2-256 hash
+ */
+struct hash_v1 {
+	uint8_t hash[32];
+};
+
+struct boot_mode boot_modes[] = {
+	{ 0x4D, "i2c"  },
+	{ 0x5A, "spi"  },
+	{ 0x8B, "nand" },
+	{ 0x78, "sata" },
+	{ 0x9C, "pex"  },
+	{ 0x69, "uart" },
+	{ 0xAE, "sdio" },
+	{},
+};
+
+struct nand_ecc_mode {
+	unsigned int id;
+	const char *name;
+};
+
+struct nand_ecc_mode nand_ecc_modes[] = {
+	{ 0x00, "default" },
+	{ 0x01, "hamming" },
+	{ 0x02, "rs" },
+	{ 0x03, "disabled" },
+	{},
+};
+
+/* Used to identify an undefined execution or destination address */
+#define ADDR_INVALID ((uint32_t)-1)
+
+#define BINARY_MAX_ARGS 8
+
+/* In-memory representation of a line of the configuration file */
+
+enum image_cfg_type {
+	IMAGE_CFG_VERSION = 0x1,
+	IMAGE_CFG_BOOT_FROM,
+	IMAGE_CFG_DEST_ADDR,
+	IMAGE_CFG_EXEC_ADDR,
+	IMAGE_CFG_NAND_BLKSZ,
+	IMAGE_CFG_NAND_BADBLK_LOCATION,
+	IMAGE_CFG_NAND_ECC_MODE,
+	IMAGE_CFG_NAND_PAGESZ,
+	IMAGE_CFG_BINARY,
+	IMAGE_CFG_PAYLOAD,
+	IMAGE_CFG_DATA,
+	IMAGE_CFG_BAUDRATE,
+	IMAGE_CFG_DEBUG,
+	IMAGE_CFG_KAK,
+	IMAGE_CFG_CSK,
+	IMAGE_CFG_CSK_INDEX,
+	IMAGE_CFG_JTAG_DELAY,
+	IMAGE_CFG_BOX_ID,
+	IMAGE_CFG_FLASH_ID,
+	IMAGE_CFG_SEC_COMMON_IMG,
+	IMAGE_CFG_SEC_SPECIALIZED_IMG,
+	IMAGE_CFG_SEC_BOOT_DEV,
+	IMAGE_CFG_SEC_FUSE_DUMP,
+
+	IMAGE_CFG_COUNT
+} type;
+
+static const char * const id_strs[] = {
+	[IMAGE_CFG_VERSION] = "VERSION",
+	[IMAGE_CFG_BOOT_FROM] = "BOOT_FROM",
+	[IMAGE_CFG_DEST_ADDR] = "DEST_ADDR",
+	[IMAGE_CFG_EXEC_ADDR] = "EXEC_ADDR",
+	[IMAGE_CFG_NAND_BLKSZ] = "NAND_BLKSZ",
+	[IMAGE_CFG_NAND_BADBLK_LOCATION] = "NAND_BADBLK_LOCATION",
+	[IMAGE_CFG_NAND_ECC_MODE] = "NAND_ECC_MODE",
+	[IMAGE_CFG_NAND_PAGESZ] = "NAND_PAGE_SIZE",
+	[IMAGE_CFG_BINARY] = "BINARY",
+	[IMAGE_CFG_PAYLOAD] = "PAYLOAD",
+	[IMAGE_CFG_DATA] = "DATA",
+	[IMAGE_CFG_BAUDRATE] = "BAUDRATE",
+	[IMAGE_CFG_DEBUG] = "DEBUG",
+	[IMAGE_CFG_KAK] = "KAK",
+	[IMAGE_CFG_CSK] = "CSK",
+	[IMAGE_CFG_CSK_INDEX] = "CSK_INDEX",
+	[IMAGE_CFG_JTAG_DELAY] = "JTAG_DELAY",
+	[IMAGE_CFG_BOX_ID] = "BOX_ID",
+	[IMAGE_CFG_FLASH_ID] = "FLASH_ID",
+	[IMAGE_CFG_SEC_COMMON_IMG] = "SEC_COMMON_IMG",
+	[IMAGE_CFG_SEC_SPECIALIZED_IMG] = "SEC_SPECIALIZED_IMG",
+	[IMAGE_CFG_SEC_BOOT_DEV] = "SEC_BOOT_DEV",
+	[IMAGE_CFG_SEC_FUSE_DUMP] = "SEC_FUSE_DUMP"
+};
+
+struct image_cfg_element {
+	enum image_cfg_type type;
+	union {
+		unsigned int version;
+		unsigned int bootfrom;
+		struct {
+			const char *file;
+			unsigned int args[BINARY_MAX_ARGS];
+			unsigned int nargs;
+		} binary;
+		const char *payload;
+		unsigned int dstaddr;
+		unsigned int execaddr;
+		unsigned int nandblksz;
+		unsigned int nandbadblklocation;
+		unsigned int nandeccmode;
+		unsigned int nandpagesz;
+		struct ext_hdr_v0_reg regdata;
+		unsigned int baudrate;
+		unsigned int debug;
+		const char *key_name;
+		int csk_idx;
+		uint8_t jtag_delay;
+		uint32_t boxid;
+		uint32_t flashid;
+		bool sec_specialized_img;
+		unsigned int sec_boot_dev;
+		const char *name;
+	};
+};
+
+#define IMAGE_CFG_ELEMENT_MAX 256
+
+/*
+ * Utility functions to manipulate boot mode and ecc modes (convert
+ * them back and forth between description strings and the
+ * corresponding numerical identifiers).
+ */
+
+static const char *image_boot_mode_name(unsigned int id)
+{
+	int i;
+
+	for (i = 0; boot_modes[i].name; i++)
+		if (boot_modes[i].id == id)
+			return boot_modes[i].name;
+	return NULL;
+}
+
+int image_boot_mode_id(const char *boot_mode_name)
+{
+	int i;
+
+	for (i = 0; boot_modes[i].name; i++)
+		if (!strcmp(boot_modes[i].name, boot_mode_name))
+			return boot_modes[i].id;
+
+	return -1;
+}
+
+int image_nand_ecc_mode_id(const char *nand_ecc_mode_name)
+{
+	int i;
+
+	for (i = 0; nand_ecc_modes[i].name; i++)
+		if (!strcmp(nand_ecc_modes[i].name, nand_ecc_mode_name))
+			return nand_ecc_modes[i].id;
+	return -1;
+}
+
+static struct image_cfg_element *
+image_find_option(unsigned int optiontype)
+{
+	int i;
+
+	for (i = 0; i < cfgn; i++) {
+		if (image_cfg[i].type == optiontype)
+			return &image_cfg[i];
+	}
+
+	return NULL;
+}
+
+static unsigned int
+image_count_options(unsigned int optiontype)
+{
+	int i;
+	unsigned int count = 0;
+
+	for (i = 0; i < cfgn; i++)
+		if (image_cfg[i].type == optiontype)
+			count++;
+
+	return count;
+}
+
+#if defined(CONFIG_KWB_SECURE)
+
+static int image_get_csk_index(void)
+{
+	struct image_cfg_element *e;
+
+	e = image_find_option(IMAGE_CFG_CSK_INDEX);
+	if (!e)
+		return -1;
+
+	return e->csk_idx;
+}
+
+static bool image_get_spezialized_img(void)
+{
+	struct image_cfg_element *e;
+
+	e = image_find_option(IMAGE_CFG_SEC_SPECIALIZED_IMG);
+	if (!e)
+		return false;
+
+	return e->sec_specialized_img;
+}
+
+#endif
+
+/*
+ * Compute a 8-bit checksum of a memory area. This algorithm follows
+ * the requirements of the Marvell SoC BootROM specifications.
+ */
+static uint8_t image_checksum8(void *start, uint32_t len)
+{
+	uint8_t csum = 0;
+	uint8_t *p = start;
+
+	/* check len and return zero checksum if invalid */
+	if (!len)
+		return 0;
+
+	do {
+		csum += *p;
+		p++;
+	} while (--len);
+
+	return csum;
+}
+
+size_t kwbimage_header_size(unsigned char *ptr)
+{
+	if (image_version((void *)ptr) == 0)
+		return sizeof(struct main_hdr_v0);
+	else
+		return KWBHEADER_V1_SIZE((struct main_hdr_v1 *)ptr);
+}
+
+/*
+ * Verify checksum over a complete header that includes the checksum field.
+ * Return 1 when OK, otherwise 0.
+ */
+static int main_hdr_checksum_ok(void *hdr)
+{
+	/* Offsets of checksum in v0 and v1 headers are the same */
+	struct main_hdr_v0 *main_hdr = (struct main_hdr_v0 *)hdr;
+	uint8_t checksum;
+
+	checksum = image_checksum8(hdr, kwbimage_header_size(hdr));
+	/* Calculated checksum includes the header checksum field. Compensate
+	 * for that.
+	 */
+	checksum -= main_hdr->checksum;
+
+	return checksum == main_hdr->checksum;
+}
+
+static uint32_t image_checksum32(void *start, uint32_t len)
+{
+	uint32_t csum = 0;
+	uint32_t *p = start;
+
+	/* check len and return zero checksum if invalid */
+	if (!len)
+		return 0;
+
+	if (len % sizeof(uint32_t)) {
+		fprintf(stderr, "Length %d is not in multiple of %zu\n",
+			len, sizeof(uint32_t));
+		return 0;
+	}
+
+	do {
+		csum += *p;
+		p++;
+		len -= sizeof(uint32_t);
+	} while (len > 0);
+
+	return csum;
+}
+
+static uint8_t baudrate_to_option(unsigned int baudrate)
+{
+	switch (baudrate) {
+	case 2400:
+		return MAIN_HDR_V1_OPT_BAUD_2400;
+	case 4800:
+		return MAIN_HDR_V1_OPT_BAUD_4800;
+	case 9600:
+		return MAIN_HDR_V1_OPT_BAUD_9600;
+	case 19200:
+		return MAIN_HDR_V1_OPT_BAUD_19200;
+	case 38400:
+		return MAIN_HDR_V1_OPT_BAUD_38400;
+	case 57600:
+		return MAIN_HDR_V1_OPT_BAUD_57600;
+	case 115200:
+		return MAIN_HDR_V1_OPT_BAUD_115200;
+	default:
+		return MAIN_HDR_V1_OPT_BAUD_DEFAULT;
+	}
+}
+
+#if defined(CONFIG_KWB_SECURE)
+static void kwb_msg(const char *fmt, ...)
+{
+	if (verbose_mode) {
+		va_list ap;
+
+		va_start(ap, fmt);
+		vfprintf(stdout, fmt, ap);
+		va_end(ap);
+	}
+}
+
+static int openssl_err(const char *msg)
+{
+	unsigned long ssl_err = ERR_get_error();
+
+	fprintf(stderr, "%s", msg);
+	fprintf(stderr, ": %s\n",
+		ERR_error_string(ssl_err, 0));
+
+	return -1;
+}
+
+static int kwb_load_rsa_key(const char *keydir, const char *name, RSA **p_rsa)
+{
+	char path[PATH_MAX];
+	RSA *rsa;
+	FILE *f;
+
+	if (!keydir)
+		keydir = ".";
+
+	snprintf(path, sizeof(path), "%s/%s.key", keydir, name);
+	f = fopen(path, "r");
+	if (!f) {
+		fprintf(stderr, "Couldn't open RSA private key: '%s': %s\n",
+			path, strerror(errno));
+		return -ENOENT;
+	}
+
+	rsa = PEM_read_RSAPrivateKey(f, 0, NULL, "");
+	if (!rsa) {
+		openssl_err("Failure reading private key");
+		fclose(f);
+		return -EPROTO;
+	}
+	fclose(f);
+	*p_rsa = rsa;
+
+	return 0;
+}
+
+static int kwb_load_cfg_key(struct image_tool_params *params,
+			    unsigned int cfg_option, const char *key_name,
+			    RSA **p_key)
+{
+	struct image_cfg_element *e_key;
+	RSA *key;
+	int res;
+
+	*p_key = NULL;
+
+	e_key = image_find_option(cfg_option);
+	if (!e_key) {
+		fprintf(stderr, "%s not configured\n", key_name);
+		return -ENOENT;
+	}
+
+	res = kwb_load_rsa_key(params->keydir, e_key->key_name, &key);
+	if (res < 0) {
+		fprintf(stderr, "Failed to load %s\n", key_name);
+		return -ENOENT;
+	}
+
+	*p_key = key;
+
+	return 0;
+}
+
+static int kwb_load_kak(struct image_tool_params *params, RSA **p_kak)
+{
+	return kwb_load_cfg_key(params, IMAGE_CFG_KAK, "KAK", p_kak);
+}
+
+static int kwb_load_csk(struct image_tool_params *params, RSA **p_csk)
+{
+	return kwb_load_cfg_key(params, IMAGE_CFG_CSK, "CSK", p_csk);
+}
+
+static int kwb_compute_pubkey_hash(struct pubkey_der_v1 *pk,
+				   struct hash_v1 *hash)
+{
+	EVP_MD_CTX *ctx;
+	unsigned int key_size;
+	unsigned int hash_size;
+	int ret = 0;
+
+	if (!pk || !hash || pk->key[0] != 0x30 || pk->key[1] != 0x82)
+		return -EINVAL;
+
+	key_size = (pk->key[2] << 8) + pk->key[3] + 4;
+
+	ctx = EVP_MD_CTX_create();
+	if (!ctx)
+		return openssl_err("EVP context creation failed");
+
+	EVP_MD_CTX_init(ctx);
+	if (!EVP_DigestInit(ctx, EVP_sha256())) {
+		ret = openssl_err("Digest setup failed");
+		goto hash_err_ctx;
+	}
+
+	if (!EVP_DigestUpdate(ctx, pk->key, key_size)) {
+		ret = openssl_err("Hashing data failed");
+		goto hash_err_ctx;
+	}
+
+	if (!EVP_DigestFinal(ctx, hash->hash, &hash_size)) {
+		ret = openssl_err("Could not obtain hash");
+		goto hash_err_ctx;
+	}
+
+	EVP_MD_CTX_cleanup(ctx);
+
+hash_err_ctx:
+	EVP_MD_CTX_destroy(ctx);
+	return ret;
+}
+
+static int kwb_import_pubkey(RSA **key, struct pubkey_der_v1 *src, char *keyname)
+{
+	RSA *rsa;
+	const unsigned char *ptr;
+
+	if (!key || !src)
+		goto fail;
+
+	ptr = src->key;
+	rsa = d2i_RSAPublicKey(key, &ptr, sizeof(src->key));
+	if (!rsa) {
+		openssl_err("error decoding public key");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	fprintf(stderr, "Failed to decode %s pubkey\n", keyname);
+	return -EINVAL;
+}
+
+static int kwb_export_pubkey(RSA *key, struct pubkey_der_v1 *dst, FILE *hashf,
+			     char *keyname)
+{
+	int size_exp, size_mod, size_seq;
+	const BIGNUM *key_e, *key_n;
+	uint8_t *cur;
+	char *errmsg = "Failed to encode %s\n";
+
+	RSA_get0_key(key, NULL, &key_e, NULL);
+	RSA_get0_key(key, &key_n, NULL, NULL);
+
+	if (!key || !key_e || !key_n || !dst) {
+		fprintf(stderr, "export pk failed: (%p, %p, %p, %p)",
+			key, key_e, key_n, dst);
+		fprintf(stderr, errmsg, keyname);
+		return -EINVAL;
+	}
+
+	/*
+	 * According to the specs, the key should be PKCS#1 DER encoded.
+	 * But unfortunately the really required encoding seems to be different;
+	 * it violates DER...! (But it still conformes to BER.)
+	 * (Length always in long form w/ 2 byte length code; no leading zero
+	 * when MSB of first byte is set...)
+	 * So we cannot use the encoding func provided by OpenSSL and have to
+	 * do the encoding manually.
+	 */
+
+	size_exp = BN_num_bytes(key_e);
+	size_mod = BN_num_bytes(key_n);
+	size_seq = 4 + size_mod + 4 + size_exp;
+
+	if (size_mod > 256) {
+		fprintf(stderr, "export pk failed: wrong mod size: %d\n",
+			size_mod);
+		fprintf(stderr, errmsg, keyname);
+		return -EINVAL;
+	}
+
+	if (4 + size_seq > sizeof(dst->key)) {
+		fprintf(stderr, "export pk failed: seq too large (%d, %lu)\n",
+			4 + size_seq, sizeof(dst->key));
+		fprintf(stderr, errmsg, keyname);
+		return -ENOBUFS;
+	}
+
+	cur = dst->key;
+
+	/* PKCS#1 (RFC3447) RSAPublicKey structure */
+	*cur++ = 0x30;		/* SEQUENCE */
+	*cur++ = 0x82;
+	*cur++ = (size_seq >> 8) & 0xFF;
+	*cur++ = size_seq & 0xFF;
+	/* Modulus */
+	*cur++ = 0x02;		/* INTEGER */
+	*cur++ = 0x82;
+	*cur++ = (size_mod >> 8) & 0xFF;
+	*cur++ = size_mod & 0xFF;
+	BN_bn2bin(key_n, cur);
+	cur += size_mod;
+	/* Exponent */
+	*cur++ = 0x02;		/* INTEGER */
+	*cur++ = 0x82;
+	*cur++ = (size_exp >> 8) & 0xFF;
+	*cur++ = size_exp & 0xFF;
+	BN_bn2bin(key_e, cur);
+
+	if (hashf) {
+		struct hash_v1 pk_hash;
+		int i;
+		int ret = 0;
+
+		ret = kwb_compute_pubkey_hash(dst, &pk_hash);
+		if (ret < 0) {
+			fprintf(stderr, errmsg, keyname);
+			return ret;
+		}
+
+		fprintf(hashf, "SHA256 = ");
+		for (i = 0 ; i < sizeof(pk_hash.hash); ++i)
+			fprintf(hashf, "%02X", pk_hash.hash[i]);
+		fprintf(hashf, "\n");
+	}
+
+	return 0;
+}
+
+int kwb_sign(RSA *key, void *data, int datasz, struct sig_v1 *sig, char *signame)
+{
+	EVP_PKEY *evp_key;
+	EVP_MD_CTX *ctx;
+	unsigned int sig_size;
+	int size;
+	int ret = 0;
+
+	evp_key = EVP_PKEY_new();
+	if (!evp_key)
+		return openssl_err("EVP_PKEY object creation failed");
+
+	if (!EVP_PKEY_set1_RSA(evp_key, key)) {
+		ret = openssl_err("EVP key setup failed");
+		goto err_key;
+	}
+
+	size = EVP_PKEY_size(evp_key);
+	if (size > sizeof(sig->sig)) {
+		fprintf(stderr, "Buffer to small for signature (%d bytes)\n",
+			size);
+		ret = -ENOBUFS;
+		goto err_key;
+	}
+
+	ctx = EVP_MD_CTX_create();
+	if (!ctx) {
+		ret = openssl_err("EVP context creation failed");
+		goto err_key;
+	}
+	EVP_MD_CTX_init(ctx);
+	if (!EVP_SignInit(ctx, EVP_sha256())) {
+		ret = openssl_err("Signer setup failed");
+		goto err_ctx;
+	}
+
+	if (!EVP_SignUpdate(ctx, data, datasz)) {
+		ret = openssl_err("Signing data failed");
+		goto err_ctx;
+	}
+
+	if (!EVP_SignFinal(ctx, sig->sig, &sig_size, evp_key)) {
+		ret = openssl_err("Could not obtain signature");
+		goto err_ctx;
+	}
+
+	EVP_MD_CTX_cleanup(ctx);
+	EVP_MD_CTX_destroy(ctx);
+	EVP_PKEY_free(evp_key);
+
+	return 0;
+
+err_ctx:
+	EVP_MD_CTX_destroy(ctx);
+err_key:
+	EVP_PKEY_free(evp_key);
+	fprintf(stderr, "Failed to create %s signature\n", signame);
+	return ret;
+}
+
+int kwb_verify(RSA *key, void *data, int datasz, struct sig_v1 *sig,
+	       char *signame)
+{
+	EVP_PKEY *evp_key;
+	EVP_MD_CTX *ctx;
+	int size;
+	int ret = 0;
+
+	evp_key = EVP_PKEY_new();
+	if (!evp_key)
+		return openssl_err("EVP_PKEY object creation failed");
+
+	if (!EVP_PKEY_set1_RSA(evp_key, key)) {
+		ret = openssl_err("EVP key setup failed");
+		goto err_key;
+	}
+
+	size = EVP_PKEY_size(evp_key);
+	if (size > sizeof(sig->sig)) {
+		fprintf(stderr, "Invalid signature size (%d bytes)\n",
+			size);
+		ret = -EINVAL;
+		goto err_key;
+	}
+
+	ctx = EVP_MD_CTX_create();
+	if (!ctx) {
+		ret = openssl_err("EVP context creation failed");
+		goto err_key;
+	}
+	EVP_MD_CTX_init(ctx);
+	if (!EVP_VerifyInit(ctx, EVP_sha256())) {
+		ret = openssl_err("Verifier setup failed");
+		goto err_ctx;
+	}
+
+	if (!EVP_VerifyUpdate(ctx, data, datasz)) {
+		ret = openssl_err("Hashing data failed");
+		goto err_ctx;
+	}
+
+	if (!EVP_VerifyFinal(ctx, sig->sig, sizeof(sig->sig), evp_key)) {
+		ret = openssl_err("Could not verify signature");
+		goto err_ctx;
+	}
+
+	EVP_MD_CTX_cleanup(ctx);
+	EVP_MD_CTX_destroy(ctx);
+	EVP_PKEY_free(evp_key);
+
+	return 0;
+
+err_ctx:
+	EVP_MD_CTX_destroy(ctx);
+err_key:
+	EVP_PKEY_free(evp_key);
+	fprintf(stderr, "Failed to verify %s signature\n", signame);
+	return ret;
+}
+
+int kwb_sign_and_verify(RSA *key, void *data, int datasz, struct sig_v1 *sig,
+			char *signame)
+{
+	if (kwb_sign(key, data, datasz, sig, signame) < 0)
+		return -1;
+
+	if (kwb_verify(key, data, datasz, sig, signame) < 0)
+		return -1;
+
+	return 0;
+}
+
+
+int kwb_dump_fuse_cmds_38x(FILE *out, struct secure_hdr_v1 *sec_hdr)
+{
+	struct hash_v1 kak_pub_hash;
+	struct image_cfg_element *e;
+	unsigned int fuse_line;
+	int i, idx;
+	uint8_t *ptr;
+	uint32_t val;
+	int ret = 0;
+
+	if (!out || !sec_hdr)
+		return -EINVAL;
+
+	ret = kwb_compute_pubkey_hash(&sec_hdr->kak, &kak_pub_hash);
+	if (ret < 0)
+		goto done;
+
+	fprintf(out, "# burn KAK pub key hash\n");
+	ptr = kak_pub_hash.hash;
+	for (fuse_line = 26; fuse_line <= 30; ++fuse_line) {
+		fprintf(out, "fuse prog -y %u 0 ", fuse_line);
+
+		for (i = 4; i-- > 0;)
+			fprintf(out, "%02hx", (ushort)ptr[i]);
+		ptr += 4;
+		fprintf(out, " 00");
+
+		if (fuse_line < 30) {
+			for (i = 3; i-- > 0;)
+				fprintf(out, "%02hx", (ushort)ptr[i]);
+			ptr += 3;
+		} else {
+			fprintf(out, "000000");
+		}
+
+		fprintf(out, " 1\n");
+	}
+
+	fprintf(out, "# burn CSK selection\n");
+
+	idx = image_get_csk_index();
+	if (idx < 0 || idx > 15) {
+		ret = -EINVAL;
+		goto done;
+	}
+	if (idx > 0) {
+		for (fuse_line = 31; fuse_line < 31 + idx; ++fuse_line)
+			fprintf(out, "fuse prog -y %u 0 00000001 00000000 1\n",
+				fuse_line);
+	} else {
+		fprintf(out, "# CSK index is 0; no mods needed\n");
+	}
+
+	e = image_find_option(IMAGE_CFG_BOX_ID);
+	if (e) {
+		fprintf(out, "# set box ID\n");
+		fprintf(out, "fuse prog -y 48 0 %08x 00000000 1\n", e->boxid);
+	}
+
+	e = image_find_option(IMAGE_CFG_FLASH_ID);
+	if (e) {
+		fprintf(out, "# set flash ID\n");
+		fprintf(out, "fuse prog -y 47 0 %08x 00000000 1\n", e->flashid);
+	}
+
+	fprintf(out, "# enable secure mode ");
+	fprintf(out, "(must be the last fuse line written)\n");
+
+	val = 1;
+	e = image_find_option(IMAGE_CFG_SEC_BOOT_DEV);
+	if (!e) {
+		fprintf(stderr, "ERROR: secured mode boot device not given\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (e->sec_boot_dev > 0xff) {
+		fprintf(stderr, "ERROR: secured mode boot device invalid\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	val |= (e->sec_boot_dev << 8);
+
+	fprintf(out, "fuse prog -y 24 0 %08x 0103e0a9 1\n", val);
+
+	fprintf(out, "# lock (unused) fuse lines (0-23)s\n");
+	for (fuse_line = 0; fuse_line < 24; ++fuse_line)
+		fprintf(out, "fuse prog -y %u 2 1\n", fuse_line);
+
+	fprintf(out, "# OK, that's all :-)\n");
+
+done:
+	return ret;
+}
+
+static int kwb_dump_fuse_cmds(struct secure_hdr_v1 *sec_hdr)
+{
+	int ret = 0;
+	struct image_cfg_element *e;
+
+	e = image_find_option(IMAGE_CFG_SEC_FUSE_DUMP);
+	if (!e)
+		return 0;
+
+	if (!strcmp(e->name, "a38x")) {
+		FILE *out = fopen("kwb_fuses_a38x.txt", "w+");
+
+		kwb_dump_fuse_cmds_38x(out, sec_hdr);
+		fclose(out);
+		goto done;
+	}
+
+	ret = -ENOSYS;
+
+done:
+	return ret;
+}
+
+#endif
+
+static void *image_create_v0(size_t *imagesz, struct image_tool_params *params,
+			     int payloadsz)
+{
+	struct image_cfg_element *e;
+	size_t headersz;
+	struct main_hdr_v0 *main_hdr;
+	uint8_t *image;
+	int has_ext = 0;
+
+	/*
+	 * Calculate the size of the header and the size of the
+	 * payload
+	 */
+	headersz  = sizeof(struct main_hdr_v0);
+
+	if (image_count_options(IMAGE_CFG_DATA) > 0) {
+		has_ext = 1;
+		headersz += sizeof(struct ext_hdr_v0);
+	}
+
+	if (image_count_options(IMAGE_CFG_PAYLOAD) > 1) {
+		fprintf(stderr, "More than one payload, not possible\n");
+		return NULL;
+	}
+
+	image = malloc(headersz);
+	if (!image) {
+		fprintf(stderr, "Cannot allocate memory for image\n");
+		return NULL;
+	}
+
+	memset(image, 0, headersz);
+
+	main_hdr = (struct main_hdr_v0 *)image;
+
+	/* Fill in the main header */
+	main_hdr->blocksize =
+		cpu_to_le32(payloadsz + sizeof(uint32_t) - headersz);
+	main_hdr->srcaddr   = cpu_to_le32(headersz);
+	main_hdr->ext       = has_ext;
+	main_hdr->destaddr  = cpu_to_le32(params->addr);
+	main_hdr->execaddr  = cpu_to_le32(params->ep);
+
+	e = image_find_option(IMAGE_CFG_BOOT_FROM);
+	if (e)
+		main_hdr->blockid = e->bootfrom;
+	e = image_find_option(IMAGE_CFG_NAND_ECC_MODE);
+	if (e)
+		main_hdr->nandeccmode = e->nandeccmode;
+	e = image_find_option(IMAGE_CFG_NAND_PAGESZ);
+	if (e)
+		main_hdr->nandpagesize = cpu_to_le16(e->nandpagesz);
+	main_hdr->checksum = image_checksum8(image,
+					     sizeof(struct main_hdr_v0));
+
+	/* Generate the ext header */
+	if (has_ext) {
+		struct ext_hdr_v0 *ext_hdr;
+		int cfgi, datai;
+
+		ext_hdr = (struct ext_hdr_v0 *)
+				(image + sizeof(struct main_hdr_v0));
+		ext_hdr->offset = cpu_to_le32(0x40);
+
+		for (cfgi = 0, datai = 0; cfgi < cfgn; cfgi++) {
+			e = &image_cfg[cfgi];
+			if (e->type != IMAGE_CFG_DATA)
+				continue;
+
+			ext_hdr->rcfg[datai].raddr =
+				cpu_to_le32(e->regdata.raddr);
+			ext_hdr->rcfg[datai].rdata =
+				cpu_to_le32(e->regdata.rdata);
+			datai++;
+		}
+
+		ext_hdr->checksum = image_checksum8(ext_hdr,
+						    sizeof(struct ext_hdr_v0));
+	}
+
+	*imagesz = headersz;
+	return image;
+}
+
+static size_t image_headersz_v1(int *hasext)
+{
+	struct image_cfg_element *binarye;
+	size_t headersz;
+
+	/*
+	 * Calculate the size of the header and the size of the
+	 * payload
+	 */
+	headersz = sizeof(struct main_hdr_v1);
+
+	if (image_count_options(IMAGE_CFG_BINARY) > 1) {
+		fprintf(stderr, "More than one binary blob, not supported\n");
+		return 0;
+	}
+
+	if (image_count_options(IMAGE_CFG_PAYLOAD) > 1) {
+		fprintf(stderr, "More than one payload, not possible\n");
+		return 0;
+	}
+
+	binarye = image_find_option(IMAGE_CFG_BINARY);
+	if (binarye) {
+		int ret;
+		struct stat s;
+
+		ret = stat(binarye->binary.file, &s);
+		if (ret < 0) {
+			char cwd[PATH_MAX];
+			char *dir = cwd;
+
+			memset(cwd, 0, sizeof(cwd));
+			if (!getcwd(cwd, sizeof(cwd))) {
+				dir = "current working directory";
+				perror("getcwd() failed");
+			}
+
+			fprintf(stderr,
+				"Didn't find the file '%s' in '%s' which is mandatory to generate the image\n"
+				"This file generally contains the DDR3 training code, and should be extracted from an existing bootable\n"
+				"image for your board. See 'kwbimage -x' to extract it from an existing image.\n",
+				binarye->binary.file, dir);
+			return 0;
+		}
+
+		headersz += sizeof(struct opt_hdr_v1) +
+			s.st_size +
+			(binarye->binary.nargs + 2) * sizeof(uint32_t);
+		if (hasext)
+			*hasext = 1;
+	}
+
+#if defined(CONFIG_KWB_SECURE)
+	if (image_get_csk_index() >= 0) {
+		headersz += sizeof(struct secure_hdr_v1);
+		if (hasext)
+			*hasext = 1;
+	}
+#endif
+
+#if defined(CONFIG_SYS_U_BOOT_OFFS)
+	if (headersz > CONFIG_SYS_U_BOOT_OFFS) {
+		fprintf(stderr,
+			"Error: Image header (incl. SPL image) too big!\n");
+		fprintf(stderr, "header=0x%x CONFIG_SYS_U_BOOT_OFFS=0x%x!\n",
+			(int)headersz, CONFIG_SYS_U_BOOT_OFFS);
+		fprintf(stderr, "Increase CONFIG_SYS_U_BOOT_OFFS!\n");
+		return 0;
+	}
+
+	headersz = CONFIG_SYS_U_BOOT_OFFS;
+#endif
+
+	/*
+	 * The payload should be aligned on some reasonable
+	 * boundary
+	 */
+	return ALIGN_SUP(headersz, 4096);
+}
+
+int add_binary_header_v1(uint8_t *cur)
+{
+	struct image_cfg_element *binarye;
+	struct opt_hdr_v1 *hdr = (struct opt_hdr_v1 *)cur;
+	uint32_t *args;
+	size_t binhdrsz;
+	struct stat s;
+	int argi;
+	FILE *bin;
+	int ret;
+
+	binarye = image_find_option(IMAGE_CFG_BINARY);
+
+	if (!binarye)
+		return 0;
+
+	hdr->headertype = OPT_HDR_V1_BINARY_TYPE;
+
+	bin = fopen(binarye->binary.file, "r");
+	if (!bin) {
+		fprintf(stderr, "Cannot open binary file %s\n",
+			binarye->binary.file);
+		return -1;
+	}
+
+	if (fstat(fileno(bin), &s)) {
+		fprintf(stderr, "Cannot stat binary file %s\n",
+			binarye->binary.file);
+		goto err_close;
+	}
+
+	binhdrsz = sizeof(struct opt_hdr_v1) +
+		(binarye->binary.nargs + 2) * sizeof(uint32_t) +
+		s.st_size;
+
+	/*
+	 * The size includes the binary image size, rounded
+	 * up to a 4-byte boundary. Plus 4 bytes for the
+	 * next-header byte and 3-byte alignment at the end.
+	 */
+	binhdrsz = ALIGN_SUP(binhdrsz, 4) + 4;
+	hdr->headersz_lsb = cpu_to_le16(binhdrsz & 0xFFFF);
+	hdr->headersz_msb = (binhdrsz & 0xFFFF0000) >> 16;
+
+	cur += sizeof(struct opt_hdr_v1);
+
+	args = (uint32_t *)cur;
+	*args = cpu_to_le32(binarye->binary.nargs);
+	args++;
+	for (argi = 0; argi < binarye->binary.nargs; argi++)
+		args[argi] = cpu_to_le32(binarye->binary.args[argi]);
+
+	cur += (binarye->binary.nargs + 1) * sizeof(uint32_t);
+
+	ret = fread(cur, s.st_size, 1, bin);
+	if (ret != 1) {
+		fprintf(stderr,
+			"Could not read binary image %s\n",
+			binarye->binary.file);
+		goto err_close;
+	}
+
+	fclose(bin);
+
+	cur += ALIGN_SUP(s.st_size, 4);
+
+	/*
+	 * For now, we don't support more than one binary
+	 * header, and no other header types are
+	 * supported. So, the binary header is necessarily the
+	 * last one
+	 */
+	*((uint32_t *)cur) = 0x00000000;
+
+	cur += sizeof(uint32_t);
+
+	return 0;
+
+err_close:
+	fclose(bin);
+
+	return -1;
+}
+
+#if defined(CONFIG_KWB_SECURE)
+
+int export_pub_kak_hash(RSA *kak, struct secure_hdr_v1 *secure_hdr)
+{
+	FILE *hashf;
+	int res;
+
+	hashf = fopen("pub_kak_hash.txt", "w");
+
+	res = kwb_export_pubkey(kak, &secure_hdr->kak, hashf, "KAK");
+
+	fclose(hashf);
+
+	return res < 0 ? 1 : 0;
+}
+
+int kwb_sign_csk_with_kak(struct image_tool_params *params,
+			  struct secure_hdr_v1 *secure_hdr, RSA *csk)
+{
+	RSA *kak = NULL;
+	RSA *kak_pub = NULL;
+	int csk_idx = image_get_csk_index();
+	struct sig_v1 tmp_sig;
+
+	if (csk_idx >= 16) {
+		fprintf(stderr, "Invalid CSK index %d\n", csk_idx);
+		return 1;
+	}
+
+	if (kwb_load_kak(params, &kak) < 0)
+		return 1;
+
+	if (export_pub_kak_hash(kak, secure_hdr))
+		return 1;
+
+	if (kwb_import_pubkey(&kak_pub, &secure_hdr->kak, "KAK") < 0)
+		return 1;
+
+	if (kwb_export_pubkey(csk, &secure_hdr->csk[csk_idx], NULL, "CSK") < 0)
+		return 1;
+
+	if (kwb_sign_and_verify(kak, &secure_hdr->csk,
+				sizeof(secure_hdr->csk) +
+				sizeof(secure_hdr->csksig),
+				&tmp_sig, "CSK") < 0)
+		return 1;
+
+	if (kwb_verify(kak_pub, &secure_hdr->csk,
+		       sizeof(secure_hdr->csk) +
+		       sizeof(secure_hdr->csksig),
+		       &tmp_sig, "CSK (2)") < 0)
+		return 1;
+
+	secure_hdr->csksig = tmp_sig;
+
+	return 0;
+}
+
+int add_secure_header_v1(struct image_tool_params *params, uint8_t *ptr,
+			 int payloadsz, size_t headersz, uint8_t *image,
+			 struct secure_hdr_v1 *secure_hdr)
+{
+	struct image_cfg_element *e_jtagdelay;
+	struct image_cfg_element *e_boxid;
+	struct image_cfg_element *e_flashid;
+	RSA *csk = NULL;
+	unsigned char *image_ptr;
+	size_t image_size;
+	struct sig_v1 tmp_sig;
+	bool specialized_img = image_get_spezialized_img();
+
+	kwb_msg("Create secure header content\n");
+
+	e_jtagdelay = image_find_option(IMAGE_CFG_JTAG_DELAY);
+	e_boxid = image_find_option(IMAGE_CFG_BOX_ID);
+	e_flashid = image_find_option(IMAGE_CFG_FLASH_ID);
+
+	if (kwb_load_csk(params, &csk) < 0)
+		return 1;
+
+	secure_hdr->headertype = OPT_HDR_V1_SECURE_TYPE;
+	secure_hdr->headersz_msb = 0;
+	secure_hdr->headersz_lsb = cpu_to_le16(sizeof(struct secure_hdr_v1));
+	if (e_jtagdelay)
+		secure_hdr->jtag_delay = e_jtagdelay->jtag_delay;
+	if (e_boxid && specialized_img)
+		secure_hdr->boxid = cpu_to_le32(e_boxid->boxid);
+	if (e_flashid && specialized_img)
+		secure_hdr->flashid = cpu_to_le32(e_flashid->flashid);
+
+	if (kwb_sign_csk_with_kak(params, secure_hdr, csk))
+		return 1;
+
+	image_ptr = ptr + headersz;
+	image_size = payloadsz - headersz;
+
+	if (kwb_sign_and_verify(csk, image_ptr, image_size,
+				&secure_hdr->imgsig, "image") < 0)
+		return 1;
+
+	if (kwb_sign_and_verify(csk, image, headersz, &tmp_sig, "header") < 0)
+		return 1;
+
+	secure_hdr->hdrsig = tmp_sig;
+
+	kwb_dump_fuse_cmds(secure_hdr);
+
+	return 0;
+}
+#endif
+
+static void *image_create_v1(size_t *imagesz, struct image_tool_params *params,
+			     uint8_t *ptr, int payloadsz)
+{
+	struct image_cfg_element *e;
+	struct main_hdr_v1 *main_hdr;
+#if defined(CONFIG_KWB_SECURE)
+	struct secure_hdr_v1 *secure_hdr = NULL;
+#endif
+	size_t headersz;
+	uint8_t *image, *cur;
+	int hasext = 0;
+	uint8_t *next_ext = NULL;
+
+	/*
+	 * Calculate the size of the header and the size of the
+	 * payload
+	 */
+	headersz = image_headersz_v1(&hasext);
+	if (headersz == 0)
+		return NULL;
+
+	image = malloc(headersz);
+	if (!image) {
+		fprintf(stderr, "Cannot allocate memory for image\n");
+		return NULL;
+	}
+
+	memset(image, 0, headersz);
+
+	main_hdr = (struct main_hdr_v1 *)image;
+	cur = image;
+	cur += sizeof(struct main_hdr_v1);
+	next_ext = &main_hdr->ext;
+
+	/* Fill the main header */
+	main_hdr->blocksize    =
+		cpu_to_le32(payloadsz - headersz + sizeof(uint32_t));
+	main_hdr->headersz_lsb = cpu_to_le16(headersz & 0xFFFF);
+	main_hdr->headersz_msb = (headersz & 0xFFFF0000) >> 16;
+	main_hdr->destaddr     = cpu_to_le32(params->addr)
+				 - sizeof(image_header_t);
+	main_hdr->execaddr     = cpu_to_le32(params->ep);
+	main_hdr->srcaddr      = cpu_to_le32(headersz);
+	main_hdr->ext          = hasext;
+	main_hdr->version      = 1;
+	e = image_find_option(IMAGE_CFG_BOOT_FROM);
+	if (e)
+		main_hdr->blockid = e->bootfrom;
+	e = image_find_option(IMAGE_CFG_NAND_BLKSZ);
+	if (e)
+		main_hdr->nandblocksize = e->nandblksz / (64 * 1024);
+	e = image_find_option(IMAGE_CFG_NAND_BADBLK_LOCATION);
+	if (e)
+		main_hdr->nandbadblklocation = e->nandbadblklocation;
+	e = image_find_option(IMAGE_CFG_BAUDRATE);
+	if (e)
+		main_hdr->options = baudrate_to_option(e->baudrate);
+	e = image_find_option(IMAGE_CFG_DEBUG);
+	if (e)
+		main_hdr->flags = e->debug ? 0x1 : 0;
+
+#if defined(CONFIG_KWB_SECURE)
+	if (image_get_csk_index() >= 0) {
+		/*
+		 * only reserve the space here; we fill the header later since
+		 * we need the header to be complete to compute the signatures
+		 */
+		secure_hdr = (struct secure_hdr_v1 *)cur;
+		cur += sizeof(struct secure_hdr_v1);
+		next_ext = &secure_hdr->next;
+	}
+#endif
+	*next_ext = 1;
+
+	if (add_binary_header_v1(cur))
+		return NULL;
+
+#if defined(CONFIG_KWB_SECURE)
+	if (secure_hdr && add_secure_header_v1(params, ptr, payloadsz,
+					       headersz, image, secure_hdr))
+		return NULL;
+#endif
+
+	/* Calculate and set the header checksum */
+	main_hdr->checksum = image_checksum8(main_hdr, headersz);
+
+	*imagesz = headersz;
+	return image;
+}
+
+int recognize_keyword(char *keyword)
+{
+	int kw_id;
+
+	for (kw_id = 1; kw_id < IMAGE_CFG_COUNT; ++kw_id)
+		if (!strcmp(keyword, id_strs[kw_id]))
+			return kw_id;
+
+	return 0;
+}
+
+static int image_create_config_parse_oneline(char *line,
+					     struct image_cfg_element *el)
+{
+	char *keyword, *saveptr, *value1, *value2;
+	char delimiters[] = " \t";
+	int keyword_id, ret, argi;
+	char *unknown_msg = "Ignoring unknown line '%s'\n";
+
+	keyword = strtok_r(line, delimiters, &saveptr);
+	keyword_id = recognize_keyword(keyword);
+
+	if (!keyword_id) {
+		fprintf(stderr, unknown_msg, line);
+		return 0;
+	}
+
+	el->type = keyword_id;
+
+	value1 = strtok_r(NULL, delimiters, &saveptr);
+
+	if (!value1) {
+		fprintf(stderr, "Parameter missing in line '%s'\n", line);
+		return -1;
+	}
+
+	switch (keyword_id) {
+	case IMAGE_CFG_VERSION:
+		el->version = atoi(value1);
+		break;
+	case IMAGE_CFG_BOOT_FROM:
+		ret = image_boot_mode_id(value1);
+
+		if (ret < 0) {
+			fprintf(stderr, "Invalid boot media '%s'\n", value1);
+			return -1;
+		}
+		el->bootfrom = ret;
+		break;
+	case IMAGE_CFG_NAND_BLKSZ:
+		el->nandblksz = strtoul(value1, NULL, 16);
+		break;
+	case IMAGE_CFG_NAND_BADBLK_LOCATION:
+		el->nandbadblklocation = strtoul(value1, NULL, 16);
+		break;
+	case IMAGE_CFG_NAND_ECC_MODE:
+		ret = image_nand_ecc_mode_id(value1);
+
+		if (ret < 0) {
+			fprintf(stderr, "Invalid NAND ECC mode '%s'\n", value1);
+			return -1;
+		}
+		el->nandeccmode = ret;
+		break;
+	case IMAGE_CFG_NAND_PAGESZ:
+		el->nandpagesz = strtoul(value1, NULL, 16);
+		break;
+	case IMAGE_CFG_BINARY:
+		argi = 0;
+
+		el->binary.file = strdup(value1);
+		while (1) {
+			char *value = strtok_r(NULL, delimiters, &saveptr);
+
+			if (!value)
+				break;
+			el->binary.args[argi] = strtoul(value, NULL, 16);
+			argi++;
+			if (argi >= BINARY_MAX_ARGS) {
+				fprintf(stderr,
+					"Too many arguments for BINARY\n");
+				return -1;
+			}
+		}
+		el->binary.nargs = argi;
+		break;
+	case IMAGE_CFG_DATA:
+		value2 = strtok_r(NULL, delimiters, &saveptr);
+
+		if (!value1 || !value2) {
+			fprintf(stderr,
+				"Invalid number of arguments for DATA\n");
+			return -1;
+		}
+
+		el->regdata.raddr = strtoul(value1, NULL, 16);
+		el->regdata.rdata = strtoul(value2, NULL, 16);
+		break;
+	case IMAGE_CFG_BAUDRATE:
+		el->baudrate = strtoul(value1, NULL, 10);
+		break;
+	case IMAGE_CFG_DEBUG:
+		el->debug = strtoul(value1, NULL, 10);
+		break;
+	case IMAGE_CFG_KAK:
+		el->key_name = strdup(value1);
+		break;
+	case IMAGE_CFG_CSK:
+		el->key_name = strdup(value1);
+		break;
+	case IMAGE_CFG_CSK_INDEX:
+		el->csk_idx = strtol(value1, NULL, 0);
+		break;
+	case IMAGE_CFG_JTAG_DELAY:
+		el->jtag_delay = strtoul(value1, NULL, 0);
+		break;
+	case IMAGE_CFG_BOX_ID:
+		el->boxid = strtoul(value1, NULL, 0);
+		break;
+	case IMAGE_CFG_FLASH_ID:
+		el->flashid = strtoul(value1, NULL, 0);
+		break;
+	case IMAGE_CFG_SEC_SPECIALIZED_IMG:
+		el->sec_specialized_img = true;
+		break;
+	case IMAGE_CFG_SEC_COMMON_IMG:
+		el->sec_specialized_img = false;
+		break;
+	case IMAGE_CFG_SEC_BOOT_DEV:
+		el->sec_boot_dev = strtoul(value1, NULL, 0);
+		break;
+	case IMAGE_CFG_SEC_FUSE_DUMP:
+		el->name = strdup(value1);
+		break;
+	default:
+		fprintf(stderr, unknown_msg, line);
+	}
+
+	return 0;
+}
+
+/*
+ * Parse the configuration file 'fcfg' into the array of configuration
+ * elements 'image_cfg', and return the number of configuration
+ * elements in 'cfgn'.
+ */
+static int image_create_config_parse(FILE *fcfg)
+{
+	int ret;
+	int cfgi = 0;
+
+	/* Parse the configuration file */
+	while (!feof(fcfg)) {
+		char *line;
+		char buf[256];
+
+		/* Read the current line */
+		memset(buf, 0, sizeof(buf));
+		line = fgets(buf, sizeof(buf), fcfg);
+		if (!line)
+			break;
+
+		/* Ignore useless lines */
+		if (line[0] == '\n' || line[0] == '#')
+			continue;
+
+		/* Strip final newline */
+		if (line[strlen(line) - 1] == '\n')
+			line[strlen(line) - 1] = 0;
+
+		/* Parse the current line */
+		ret = image_create_config_parse_oneline(line,
+							&image_cfg[cfgi]);
+		if (ret)
+			return ret;
+
+		cfgi++;
+
+		if (cfgi >= IMAGE_CFG_ELEMENT_MAX) {
+			fprintf(stderr,
+				"Too many configuration elements in .cfg file\n");
+			return -1;
+		}
+	}
+
+	cfgn = cfgi;
+	return 0;
+}
+
+static int image_get_version(void)
+{
+	struct image_cfg_element *e;
+
+	e = image_find_option(IMAGE_CFG_VERSION);
+	if (!e)
+		return -1;
+
+	return e->version;
+}
+
+static void kwbimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	FILE *fcfg;
+	void *image = NULL;
+	int version;
+	size_t headersz = 0;
+	uint32_t checksum;
+	int ret;
+	int size;
+
+	fcfg = fopen(params->imagename, "r");
+	if (!fcfg) {
+		fprintf(stderr, "Could not open input file %s\n",
+			params->imagename);
+		exit(EXIT_FAILURE);
+	}
+
+	image_cfg = malloc(IMAGE_CFG_ELEMENT_MAX *
+			   sizeof(struct image_cfg_element));
+	if (!image_cfg) {
+		fprintf(stderr, "Cannot allocate memory\n");
+		fclose(fcfg);
+		exit(EXIT_FAILURE);
+	}
+
+	memset(image_cfg, 0,
+	       IMAGE_CFG_ELEMENT_MAX * sizeof(struct image_cfg_element));
+	rewind(fcfg);
+
+	ret = image_create_config_parse(fcfg);
+	fclose(fcfg);
+	if (ret) {
+		free(image_cfg);
+		exit(EXIT_FAILURE);
+	}
+
+	/* The MVEBU BootROM does not allow non word aligned payloads */
+	sbuf->st_size = ALIGN_SUP(sbuf->st_size, 4);
+
+	version = image_get_version();
+	switch (version) {
+		/*
+		 * Fallback to version 0 if no version is provided in the
+		 * cfg file
+		 */
+	case -1:
+	case 0:
+		image = image_create_v0(&headersz, params, sbuf->st_size);
+		break;
+
+	case 1:
+		image = image_create_v1(&headersz, params, ptr, sbuf->st_size);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported version %d\n", version);
+		free(image_cfg);
+		exit(EXIT_FAILURE);
+	}
+
+	if (!image) {
+		fprintf(stderr, "Could not create image\n");
+		free(image_cfg);
+		exit(EXIT_FAILURE);
+	}
+
+	free(image_cfg);
+
+	/* Build and add image checksum header */
+	checksum =
+		cpu_to_le32(image_checksum32((uint32_t *)ptr, sbuf->st_size));
+	size = write(ifd, &checksum, sizeof(uint32_t));
+	if (size != sizeof(uint32_t)) {
+		fprintf(stderr, "Error:%s - Checksum write %d bytes %s\n",
+			params->cmdname, size, params->imagefile);
+		exit(EXIT_FAILURE);
+	}
+
+	sbuf->st_size += sizeof(uint32_t);
+
+	/* Finally copy the header into the image area */
+	memcpy(ptr, image, headersz);
+
+	free(image);
+}
+
+static void kwbimage_print_header(const void *ptr)
+{
+	struct main_hdr_v0 *mhdr = (struct main_hdr_v0 *)ptr;
+
+	printf("Image Type:   MVEBU Boot from %s Image\n",
+	       image_boot_mode_name(mhdr->blockid));
+	printf("Image version:%d\n", image_version((void *)ptr));
+	printf("Data Size:    ");
+	genimg_print_size(mhdr->blocksize - sizeof(uint32_t));
+	printf("Load Address: %08x\n", mhdr->destaddr);
+	printf("Entry Point:  %08x\n", mhdr->execaddr);
+}
+
+static int kwbimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_KWBIMAGE)
+		return EXIT_SUCCESS;
+
+	return EXIT_FAILURE;
+}
+
+static int kwbimage_verify_header(unsigned char *ptr, int image_size,
+				  struct image_tool_params *params)
+{
+	uint8_t checksum;
+	size_t header_size = kwbimage_header_size(ptr);
+
+	if (header_size > image_size)
+		return -FDT_ERR_BADSTRUCTURE;
+
+	if (!main_hdr_checksum_ok(ptr))
+		return -FDT_ERR_BADSTRUCTURE;
+
+	/* Only version 0 extended header has checksum */
+	if (image_version((void *)ptr) == 0) {
+		struct ext_hdr_v0 *ext_hdr;
+
+		ext_hdr = (struct ext_hdr_v0 *)
+				(ptr + sizeof(struct main_hdr_v0));
+		checksum = image_checksum8(ext_hdr,
+					   sizeof(struct ext_hdr_v0)
+					   - sizeof(uint8_t));
+		if (checksum != ext_hdr->checksum)
+			return -FDT_ERR_BADSTRUCTURE;
+	}
+
+	return 0;
+}
+
+static int kwbimage_generate(struct image_tool_params *params,
+			     struct image_type_params *tparams)
+{
+	FILE *fcfg;
+	int alloc_len;
+	int version;
+	void *hdr;
+	int ret;
+
+	fcfg = fopen(params->imagename, "r");
+	if (!fcfg) {
+		fprintf(stderr, "Could not open input file %s\n",
+			params->imagename);
+		exit(EXIT_FAILURE);
+	}
+
+	image_cfg = malloc(IMAGE_CFG_ELEMENT_MAX *
+			   sizeof(struct image_cfg_element));
+	if (!image_cfg) {
+		fprintf(stderr, "Cannot allocate memory\n");
+		fclose(fcfg);
+		exit(EXIT_FAILURE);
+	}
+
+	memset(image_cfg, 0,
+	       IMAGE_CFG_ELEMENT_MAX * sizeof(struct image_cfg_element));
+	rewind(fcfg);
+
+	ret = image_create_config_parse(fcfg);
+	fclose(fcfg);
+	if (ret) {
+		free(image_cfg);
+		exit(EXIT_FAILURE);
+	}
+
+	version = image_get_version();
+	switch (version) {
+		/*
+		 * Fallback to version 0 if no version is provided in the
+		 * cfg file
+		 */
+	case -1:
+	case 0:
+		alloc_len = sizeof(struct main_hdr_v0) +
+			sizeof(struct ext_hdr_v0);
+		break;
+
+	case 1:
+		alloc_len = image_headersz_v1(NULL);
+		break;
+
+	default:
+		fprintf(stderr, "Unsupported version %d\n", version);
+		free(image_cfg);
+		exit(EXIT_FAILURE);
+	}
+
+	free(image_cfg);
+
+	hdr = malloc(alloc_len);
+	if (!hdr) {
+		fprintf(stderr, "%s: malloc return failure: %s\n",
+			params->cmdname, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	memset(hdr, 0, alloc_len);
+	tparams->header_size = alloc_len;
+	tparams->hdr = hdr;
+
+	/*
+	 * The resulting image needs to be 4-byte aligned. At least
+	 * the Marvell hdrparser tool complains if its unaligned.
+	 * By returning 1 here in this function, called via
+	 * tparams->vrec_header() in mkimage.c, mkimage will
+	 * automatically pad the the resulting image to a 4-byte
+	 * size if necessary.
+	 */
+	return 1;
+}
+
+/*
+ * Report Error if xflag is set in addition to default
+ */
+static int kwbimage_check_params(struct image_tool_params *params)
+{
+	if (!strlen(params->imagename)) {
+		char *msg = "Configuration file for kwbimage creation omitted";
+
+		fprintf(stderr, "Error:%s - %s\n", params->cmdname, msg);
+		return CFG_INVALID;
+	}
+
+	return (params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)) ||
+		(params->xflag) || !(strlen(params->imagename));
+}
+
+/*
+ * kwbimage type parameters definition
+ */
+U_BOOT_IMAGE_TYPE(
+	kwbimage,
+	"Marvell MVEBU Boot Image support",
+	0,
+	NULL,
+	kwbimage_check_params,
+	kwbimage_verify_header,
+	kwbimage_print_header,
+	kwbimage_set_header,
+	NULL,
+	kwbimage_check_image_types,
+	NULL,
+	kwbimage_generate
+);
diff --git a/tools/u-boot-tools/kwbimage.h b/tools/u-boot-tools/kwbimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..25bc08c5ce5514a46378cae7f7b6a8be8ead03ac
--- /dev/null
+++ b/tools/u-boot-tools/kwbimage.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#ifndef _KWBIMAGE_H_
+#define _KWBIMAGE_H_
+
+#include <compiler.h>
+#include <stdint.h>
+
+#define KWBIMAGE_MAX_CONFIG	((0x1dc - 0x20)/sizeof(struct reg_config))
+#define MAX_TEMPBUF_LEN		32
+
+/* NAND ECC Mode */
+#define IBR_HDR_ECC_DEFAULT		0x00
+#define IBR_HDR_ECC_FORCED_HAMMING	0x01
+#define IBR_HDR_ECC_FORCED_RS  		0x02
+#define IBR_HDR_ECC_DISABLED  		0x03
+
+/* Boot Type - block ID */
+#define IBR_HDR_I2C_ID			0x4D
+#define IBR_HDR_SPI_ID			0x5A
+#define IBR_HDR_NAND_ID			0x8B
+#define IBR_HDR_SATA_ID			0x78
+#define IBR_HDR_PEX_ID			0x9C
+#define IBR_HDR_UART_ID			0x69
+#define IBR_DEF_ATTRIB	 		0x00
+
+#define ALIGN_SUP(x, a) (((x) + (a - 1)) & ~(a - 1))
+
+/* Structure of the main header, version 0 (Kirkwood, Dove) */
+struct main_hdr_v0 {
+	uint8_t  blockid;		/* 0x0       */
+	uint8_t  nandeccmode;		/* 0x1       */
+	uint16_t nandpagesize;		/* 0x2-0x3   */
+	uint32_t blocksize;		/* 0x4-0x7   */
+	uint32_t rsvd1;			/* 0x8-0xB   */
+	uint32_t srcaddr;		/* 0xC-0xF   */
+	uint32_t destaddr;		/* 0x10-0x13 */
+	uint32_t execaddr;		/* 0x14-0x17 */
+	uint8_t  satapiomode;		/* 0x18      */
+	uint8_t  rsvd3;			/* 0x19      */
+	uint16_t ddrinitdelay;		/* 0x1A-0x1B */
+	uint16_t rsvd2;			/* 0x1C-0x1D */
+	uint8_t  ext;			/* 0x1E      */
+	uint8_t  checksum;		/* 0x1F      */
+};
+
+struct ext_hdr_v0_reg {
+	uint32_t raddr;
+	uint32_t rdata;
+};
+
+#define EXT_HDR_V0_REG_COUNT ((0x1dc - 0x20) / sizeof(struct ext_hdr_v0_reg))
+
+struct ext_hdr_v0 {
+	uint32_t              offset;
+	uint8_t               reserved[0x20 - sizeof(uint32_t)];
+	struct ext_hdr_v0_reg rcfg[EXT_HDR_V0_REG_COUNT];
+	uint8_t               reserved2[7];
+	uint8_t               checksum;
+};
+
+struct kwb_header {
+	struct main_hdr_v0	kwb_hdr;
+	struct ext_hdr_v0	kwb_exthdr;
+};
+
+/* Structure of the main header, version 1 (Armada 370/38x/XP) */
+struct main_hdr_v1 {
+	uint8_t  blockid;               /* 0x0       */
+	uint8_t  flags;                 /* 0x1       */
+	uint16_t reserved2;             /* 0x2-0x3   */
+	uint32_t blocksize;             /* 0x4-0x7   */
+	uint8_t  version;               /* 0x8       */
+	uint8_t  headersz_msb;          /* 0x9       */
+	uint16_t headersz_lsb;          /* 0xA-0xB   */
+	uint32_t srcaddr;               /* 0xC-0xF   */
+	uint32_t destaddr;              /* 0x10-0x13 */
+	uint32_t execaddr;              /* 0x14-0x17 */
+	uint8_t  options;               /* 0x18      */
+	uint8_t  nandblocksize;         /* 0x19      */
+	uint8_t  nandbadblklocation;    /* 0x1A      */
+	uint8_t  reserved4;             /* 0x1B      */
+	uint16_t reserved5;             /* 0x1C-0x1D */
+	uint8_t  ext;                   /* 0x1E      */
+	uint8_t  checksum;              /* 0x1F      */
+};
+
+/*
+ * Main header options
+ */
+#define MAIN_HDR_V1_OPT_BAUD_DEFAULT	0
+#define MAIN_HDR_V1_OPT_BAUD_2400	0x1
+#define MAIN_HDR_V1_OPT_BAUD_4800	0x2
+#define MAIN_HDR_V1_OPT_BAUD_9600	0x3
+#define MAIN_HDR_V1_OPT_BAUD_19200	0x4
+#define MAIN_HDR_V1_OPT_BAUD_38400	0x5
+#define MAIN_HDR_V1_OPT_BAUD_57600	0x6
+#define MAIN_HDR_V1_OPT_BAUD_115200	0x7
+
+/*
+ * Header for the optional headers, version 1 (Armada 370, Armada XP)
+ */
+struct opt_hdr_v1 {
+	uint8_t  headertype;
+	uint8_t  headersz_msb;
+	uint16_t headersz_lsb;
+	char     data[0];
+};
+
+/*
+ * Public Key data in DER format
+ */
+struct pubkey_der_v1 {
+	uint8_t key[524];
+};
+
+/*
+ * Signature (RSA 2048)
+ */
+struct sig_v1 {
+	uint8_t sig[256];
+};
+
+/*
+ * Structure of secure header (Armada 38x)
+ */
+struct secure_hdr_v1 {
+	uint8_t  headertype;		/* 0x0 */
+	uint8_t  headersz_msb;		/* 0x1 */
+	uint16_t headersz_lsb;		/* 0x2 - 0x3 */
+	uint32_t reserved1;		/* 0x4 - 0x7 */
+	struct pubkey_der_v1 kak;	/* 0x8 - 0x213 */
+	uint8_t  jtag_delay;		/* 0x214 */
+	uint8_t  reserved2;		/* 0x215 */
+	uint16_t reserved3;		/* 0x216 - 0x217 */
+	uint32_t boxid;			/* 0x218 - 0x21B */
+	uint32_t flashid;		/* 0x21C - 0x21F */
+	struct sig_v1 hdrsig;		/* 0x220 - 0x31F */
+	struct sig_v1 imgsig;		/* 0x320 - 0x41F */
+	struct pubkey_der_v1 csk[16];	/* 0x420 - 0x24DF */
+	struct sig_v1 csksig;		/* 0x24E0 - 0x25DF */
+	uint8_t  next;			/* 0x25E0 */
+	uint8_t  reserved4;		/* 0x25E1 */
+	uint16_t reserved5;		/* 0x25E2 - 0x25E3 */
+};
+
+/*
+ * Various values for the opt_hdr_v1->headertype field, describing the
+ * different types of optional headers. The "secure" header contains
+ * informations related to secure boot (encryption keys, etc.). The
+ * "binary" header contains ARM binary code to be executed prior to
+ * executing the main payload (usually the bootloader). This is
+ * typically used to execute DDR3 training code. The "register" header
+ * allows to describe a set of (address, value) tuples that are
+ * generally used to configure the DRAM controller.
+ */
+#define OPT_HDR_V1_SECURE_TYPE   0x1
+#define OPT_HDR_V1_BINARY_TYPE   0x2
+#define OPT_HDR_V1_REGISTER_TYPE 0x3
+
+#define KWBHEADER_V1_SIZE(hdr) \
+	(((hdr)->headersz_msb << 16) | le16_to_cpu((hdr)->headersz_lsb))
+
+enum kwbimage_cmd {
+	CMD_INVALID,
+	CMD_BOOT_FROM,
+	CMD_NAND_ECC_MODE,
+	CMD_NAND_PAGE_SIZE,
+	CMD_SATA_PIO_MODE,
+	CMD_DDR_INIT_DELAY,
+	CMD_DATA
+};
+
+enum kwbimage_cmd_types {
+	CFG_INVALID = -1,
+	CFG_COMMAND,
+	CFG_DATA0,
+	CFG_DATA1
+};
+
+/*
+ * functions
+ */
+void init_kwb_image_type (void);
+
+/*
+ * Byte 8 of the image header contains the version number. In the v0
+ * header, byte 8 was reserved, and always set to 0. In the v1 header,
+ * byte 8 has been changed to a proper field, set to 1.
+ */
+static inline unsigned int image_version(void *header)
+{
+	unsigned char *ptr = header;
+	return ptr[8];
+}
+
+#endif /* _KWBIMAGE_H_ */
diff --git a/tools/u-boot-tools/kwbimage.o b/tools/u-boot-tools/kwbimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..5bddaa2ae5368bd770a51bbf2c7389b4a85f2d9b
Binary files /dev/null and b/tools/u-boot-tools/kwbimage.o differ
diff --git a/tools/u-boot-tools/kwboot.c b/tools/u-boot-tools/kwboot.c
new file mode 100644
index 0000000000000000000000000000000000000000..4be094c9c8d81ebca06170a386718527880a587e
--- /dev/null
+++ b/tools/u-boot-tools/kwboot.c
@@ -0,0 +1,867 @@
+/*
+ * Boot a Marvell SoC, with Xmodem over UART0.
+ *  supports Kirkwood, Dove, Armada 370, Armada XP
+ *
+ * (c) 2012 Daniel Stodden <daniel.stodden@gmail.com>
+ *
+ * References: marvell.com, "88F6180, 88F6190, 88F6192, and 88F6281
+ *   Integrated Controller: Functional Specifications" December 2,
+ *   2008. Chapter 24.2 "BootROM Firmware".
+ */
+
+#include "kwbimage.h"
+#include "mkimage.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <image.h>
+#include <libgen.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <termios.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#ifdef __GNUC__
+#define PACKED __attribute((packed))
+#else
+#define PACKED
+#endif
+
+/*
+ * Marvell BootROM UART Sensing
+ */
+
+static unsigned char kwboot_msg_boot[] = {
+	0xBB, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77
+};
+
+static unsigned char kwboot_msg_debug[] = {
+	0xDD, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77
+};
+
+/* Defines known to work on Kirkwood */
+#define KWBOOT_MSG_REQ_DELAY	10 /* ms */
+#define KWBOOT_MSG_RSP_TIMEO	50 /* ms */
+
+/* Defines known to work on Armada XP */
+#define KWBOOT_MSG_REQ_DELAY_AXP	1000 /* ms */
+#define KWBOOT_MSG_RSP_TIMEO_AXP	1000 /* ms */
+
+/*
+ * Xmodem Transfers
+ */
+
+#define SOH	1	/* sender start of block header */
+#define EOT	4	/* sender end of block transfer */
+#define ACK	6	/* target block ack */
+#define NAK	21	/* target block negative ack */
+#define CAN	24	/* target/sender transfer cancellation */
+
+struct kwboot_block {
+	uint8_t soh;
+	uint8_t pnum;
+	uint8_t _pnum;
+	uint8_t data[128];
+	uint8_t csum;
+} PACKED;
+
+#define KWBOOT_BLK_RSP_TIMEO 1000 /* ms */
+
+static int kwboot_verbose;
+
+static int msg_req_delay = KWBOOT_MSG_REQ_DELAY;
+static int msg_rsp_timeo = KWBOOT_MSG_RSP_TIMEO;
+static int blk_rsp_timeo = KWBOOT_BLK_RSP_TIMEO;
+
+static void
+kwboot_printv(const char *fmt, ...)
+{
+	va_list ap;
+
+	if (kwboot_verbose) {
+		va_start(ap, fmt);
+		vprintf(fmt, ap);
+		va_end(ap);
+		fflush(stdout);
+	}
+}
+
+static void
+__spinner(void)
+{
+	const char seq[] = { '-', '\\', '|', '/' };
+	const int div = 8;
+	static int state, bs;
+
+	if (state % div == 0) {
+		fputc(bs, stdout);
+		fputc(seq[state / div % sizeof(seq)], stdout);
+		fflush(stdout);
+	}
+
+	bs = '\b';
+	state++;
+}
+
+static void
+kwboot_spinner(void)
+{
+	if (kwboot_verbose)
+		__spinner();
+}
+
+static void
+__progress(int pct, char c)
+{
+	const int width = 70;
+	static const char *nl = "";
+	static int pos;
+
+	if (pos % width == 0)
+		printf("%s%3d %% [", nl, pct);
+
+	fputc(c, stdout);
+
+	nl = "]\n";
+	pos++;
+
+	if (pct == 100) {
+		while (pos++ < width)
+			fputc(' ', stdout);
+		fputs(nl, stdout);
+	}
+
+	fflush(stdout);
+
+}
+
+static void
+kwboot_progress(int _pct, char c)
+{
+	static int pct;
+
+	if (_pct != -1)
+		pct = _pct;
+
+	if (kwboot_verbose)
+		__progress(pct, c);
+}
+
+static int
+kwboot_tty_recv(int fd, void *buf, size_t len, int timeo)
+{
+	int rc, nfds;
+	fd_set rfds;
+	struct timeval tv;
+	ssize_t n;
+
+	rc = -1;
+
+	FD_ZERO(&rfds);
+	FD_SET(fd, &rfds);
+
+	tv.tv_sec = 0;
+	tv.tv_usec = timeo * 1000;
+	if (tv.tv_usec > 1000000) {
+		tv.tv_sec += tv.tv_usec / 1000000;
+		tv.tv_usec %= 1000000;
+	}
+
+	do {
+		nfds = select(fd + 1, &rfds, NULL, NULL, &tv);
+		if (nfds < 0)
+			goto out;
+		if (!nfds) {
+			errno = ETIMEDOUT;
+			goto out;
+		}
+
+		n = read(fd, buf, len);
+		if (n <= 0)
+			goto out;
+
+		buf = (char *)buf + n;
+		len -= n;
+	} while (len > 0);
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+kwboot_tty_send(int fd, const void *buf, size_t len)
+{
+	int rc;
+	ssize_t n;
+
+	if (!buf)
+		return 0;
+
+	rc = -1;
+
+	do {
+		n = write(fd, buf, len);
+		if (n < 0)
+			goto out;
+
+		buf = (char *)buf + n;
+		len -= n;
+	} while (len > 0);
+
+	rc = tcdrain(fd);
+out:
+	return rc;
+}
+
+static int
+kwboot_tty_send_char(int fd, unsigned char c)
+{
+	return kwboot_tty_send(fd, &c, 1);
+}
+
+static speed_t
+kwboot_tty_speed(int baudrate)
+{
+	switch (baudrate) {
+	case 115200:
+		return B115200;
+	case 57600:
+		return B57600;
+	case 38400:
+		return B38400;
+	case 19200:
+		return B19200;
+	case 9600:
+		return B9600;
+	}
+
+	return -1;
+}
+
+static int
+kwboot_open_tty(const char *path, speed_t speed)
+{
+	int rc, fd;
+	struct termios tio;
+
+	rc = -1;
+
+	fd = open(path, O_RDWR|O_NOCTTY|O_NDELAY);
+	if (fd < 0)
+		goto out;
+
+	memset(&tio, 0, sizeof(tio));
+
+	tio.c_iflag = 0;
+	tio.c_cflag = CREAD|CLOCAL|CS8;
+
+	tio.c_cc[VMIN] = 1;
+	tio.c_cc[VTIME] = 10;
+
+	cfsetospeed(&tio, speed);
+	cfsetispeed(&tio, speed);
+
+	rc = tcsetattr(fd, TCSANOW, &tio);
+	if (rc)
+		goto out;
+
+	rc = fd;
+out:
+	if (rc < 0) {
+		if (fd >= 0)
+			close(fd);
+	}
+
+	return rc;
+}
+
+static int
+kwboot_bootmsg(int tty, void *msg)
+{
+	int rc;
+	char c;
+	int count;
+
+	if (msg == NULL)
+		kwboot_printv("Please reboot the target into UART boot mode...");
+	else
+		kwboot_printv("Sending boot message. Please reboot the target...");
+
+	do {
+		rc = tcflush(tty, TCIOFLUSH);
+		if (rc)
+			break;
+
+		for (count = 0; count < 128; count++) {
+			rc = kwboot_tty_send(tty, msg, 8);
+			if (rc) {
+				usleep(msg_req_delay * 1000);
+				continue;
+			}
+		}
+
+		rc = kwboot_tty_recv(tty, &c, 1, msg_rsp_timeo);
+
+		kwboot_spinner();
+
+	} while (rc || c != NAK);
+
+	kwboot_printv("\n");
+
+	return rc;
+}
+
+static int
+kwboot_debugmsg(int tty, void *msg)
+{
+	int rc;
+
+	kwboot_printv("Sending debug message. Please reboot the target...");
+
+	do {
+		char buf[16];
+
+		rc = tcflush(tty, TCIOFLUSH);
+		if (rc)
+			break;
+
+		rc = kwboot_tty_send(tty, msg, 8);
+		if (rc) {
+			usleep(msg_req_delay * 1000);
+			continue;
+		}
+
+		rc = kwboot_tty_recv(tty, buf, 16, msg_rsp_timeo);
+
+		kwboot_spinner();
+
+	} while (rc);
+
+	kwboot_printv("\n");
+
+	return rc;
+}
+
+static int
+kwboot_xm_makeblock(struct kwboot_block *block, const void *data,
+		    size_t size, int pnum)
+{
+	const size_t blksz = sizeof(block->data);
+	size_t n;
+	int i;
+
+	block->soh = SOH;
+	block->pnum = pnum;
+	block->_pnum = ~block->pnum;
+
+	n = size < blksz ? size : blksz;
+	memcpy(&block->data[0], data, n);
+	memset(&block->data[n], 0, blksz - n);
+
+	block->csum = 0;
+	for (i = 0; i < n; i++)
+		block->csum += block->data[i];
+
+	return n;
+}
+
+static int
+kwboot_xm_sendblock(int fd, struct kwboot_block *block)
+{
+	int rc, retries;
+	char c;
+
+	retries = 16;
+	do {
+		rc = kwboot_tty_send(fd, block, sizeof(*block));
+		if (rc)
+			break;
+
+		do {
+			rc = kwboot_tty_recv(fd, &c, 1, blk_rsp_timeo);
+			if (rc)
+				break;
+
+			if (c != ACK && c != NAK && c != CAN)
+				printf("%c", c);
+
+		} while (c != ACK && c != NAK && c != CAN);
+
+		if (c != ACK)
+			kwboot_progress(-1, '+');
+
+	} while (c == NAK && retries-- > 0);
+
+	rc = -1;
+
+	switch (c) {
+	case ACK:
+		rc = 0;
+		break;
+	case NAK:
+		errno = EBADMSG;
+		break;
+	case CAN:
+		errno = ECANCELED;
+		break;
+	default:
+		errno = EPROTO;
+		break;
+	}
+
+	return rc;
+}
+
+static int
+kwboot_xmodem(int tty, const void *_data, size_t size)
+{
+	const uint8_t *data = _data;
+	int rc, pnum, N, err;
+
+	pnum = 1;
+	N = 0;
+
+	kwboot_printv("Sending boot image...\n");
+
+	sleep(2); /* flush isn't effective without it */
+	tcflush(tty, TCIOFLUSH);
+
+	do {
+		struct kwboot_block block;
+		int n;
+
+		n = kwboot_xm_makeblock(&block,
+					data + N, size - N,
+					pnum++);
+		if (n < 0)
+			goto can;
+
+		if (!n)
+			break;
+
+		rc = kwboot_xm_sendblock(tty, &block);
+		if (rc)
+			goto out;
+
+		N += n;
+		kwboot_progress(N * 100 / size, '.');
+	} while (1);
+
+	rc = kwboot_tty_send_char(tty, EOT);
+
+out:
+	return rc;
+
+can:
+	err = errno;
+	kwboot_tty_send_char(tty, CAN);
+	errno = err;
+	goto out;
+}
+
+static int
+kwboot_term_pipe(int in, int out, char *quit, int *s)
+{
+	ssize_t nin, nout;
+	char _buf[128], *buf = _buf;
+
+	nin = read(in, buf, sizeof(buf));
+	if (nin <= 0)
+		return -1;
+
+	if (quit) {
+		int i;
+
+		for (i = 0; i < nin; i++) {
+			if (*buf == quit[*s]) {
+				(*s)++;
+				if (!quit[*s])
+					return 0;
+				buf++;
+				nin--;
+			} else
+				while (*s > 0) {
+					nout = write(out, quit, *s);
+					if (nout <= 0)
+						return -1;
+					(*s) -= nout;
+				}
+		}
+	}
+
+	while (nin > 0) {
+		nout = write(out, buf, nin);
+		if (nout <= 0)
+			return -1;
+		nin -= nout;
+	}
+
+	return 0;
+}
+
+static int
+kwboot_terminal(int tty)
+{
+	int rc, in, s;
+	char *quit = "\34c";
+	struct termios otio, tio;
+
+	rc = -1;
+
+	in = STDIN_FILENO;
+	if (isatty(in)) {
+		rc = tcgetattr(in, &otio);
+		if (!rc) {
+			tio = otio;
+			cfmakeraw(&tio);
+			rc = tcsetattr(in, TCSANOW, &tio);
+		}
+		if (rc) {
+			perror("tcsetattr");
+			goto out;
+		}
+
+		kwboot_printv("[Type Ctrl-%c + %c to quit]\r\n",
+			      quit[0]|0100, quit[1]);
+	} else
+		in = -1;
+
+	rc = 0;
+	s = 0;
+
+	do {
+		fd_set rfds;
+		int nfds = 0;
+
+		FD_SET(tty, &rfds);
+		nfds = nfds < tty ? tty : nfds;
+
+		if (in >= 0) {
+			FD_SET(in, &rfds);
+			nfds = nfds < in ? in : nfds;
+		}
+
+		nfds = select(nfds + 1, &rfds, NULL, NULL, NULL);
+		if (nfds < 0)
+			break;
+
+		if (FD_ISSET(tty, &rfds)) {
+			rc = kwboot_term_pipe(tty, STDOUT_FILENO, NULL, NULL);
+			if (rc)
+				break;
+		}
+
+		if (FD_ISSET(in, &rfds)) {
+			rc = kwboot_term_pipe(in, tty, quit, &s);
+			if (rc)
+				break;
+		}
+	} while (quit[s] != 0);
+
+	tcsetattr(in, TCSANOW, &otio);
+out:
+	return rc;
+}
+
+static void *
+kwboot_mmap_image(const char *path, size_t *size, int prot)
+{
+	int rc, fd, flags;
+	struct stat st;
+	void *img;
+
+	rc = -1;
+	img = NULL;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0)
+		goto out;
+
+	rc = fstat(fd, &st);
+	if (rc)
+		goto out;
+
+	flags = (prot & PROT_WRITE) ? MAP_PRIVATE : MAP_SHARED;
+
+	img = mmap(NULL, st.st_size, prot, flags, fd, 0);
+	if (img == MAP_FAILED) {
+		img = NULL;
+		goto out;
+	}
+
+	rc = 0;
+	*size = st.st_size;
+out:
+	if (rc && img) {
+		munmap(img, st.st_size);
+		img = NULL;
+	}
+	if (fd >= 0)
+		close(fd);
+
+	return img;
+}
+
+static uint8_t
+kwboot_img_csum8(void *_data, size_t size)
+{
+	uint8_t *data = _data, csum;
+
+	for (csum = 0; size-- > 0; data++)
+		csum += *data;
+
+	return csum;
+}
+
+static int
+kwboot_img_patch_hdr(void *img, size_t size)
+{
+	int rc;
+	struct main_hdr_v1 *hdr;
+	uint8_t csum;
+	size_t hdrsz = sizeof(*hdr);
+	int image_ver;
+
+	rc = -1;
+	hdr = img;
+
+	if (size < hdrsz) {
+		errno = EINVAL;
+		goto out;
+	}
+
+	image_ver = image_version(img);
+	if (image_ver < 0) {
+		fprintf(stderr, "Invalid image header version\n");
+		errno = EINVAL;
+		goto out;
+	}
+
+	if (image_ver == 0)
+		hdrsz = sizeof(*hdr);
+	else
+		hdrsz = KWBHEADER_V1_SIZE(hdr);
+
+	csum = kwboot_img_csum8(hdr, hdrsz) - hdr->checksum;
+	if (csum != hdr->checksum) {
+		errno = EINVAL;
+		goto out;
+	}
+
+	if (hdr->blockid == IBR_HDR_UART_ID) {
+		rc = 0;
+		goto out;
+	}
+
+	hdr->blockid = IBR_HDR_UART_ID;
+
+	if (image_ver == 0) {
+		struct main_hdr_v0 *hdr_v0 = img;
+
+		hdr_v0->nandeccmode = IBR_HDR_ECC_DISABLED;
+		hdr_v0->nandpagesize = 0;
+
+		hdr_v0->srcaddr = hdr_v0->ext
+			? sizeof(struct kwb_header)
+			: sizeof(*hdr_v0);
+	}
+
+	hdr->checksum = kwboot_img_csum8(hdr, hdrsz) - csum;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static void
+kwboot_usage(FILE *stream, char *progname)
+{
+	fprintf(stream,
+		"Usage: %s [OPTIONS] [-b <image> | -D <image> ] [-B <baud> ] <TTY>\n",
+		progname);
+	fprintf(stream, "\n");
+	fprintf(stream,
+		"  -b <image>: boot <image> with preamble (Kirkwood, Armada 370/XP)\n");
+	fprintf(stream, "  -p: patch <image> to type 0x69 (uart boot)\n");
+	fprintf(stream,
+		"  -D <image>: boot <image> without preamble (Dove)\n");
+	fprintf(stream, "  -d: enter debug mode\n");
+	fprintf(stream, "  -a: use timings for Armada XP\n");
+	fprintf(stream, "  -q <req-delay>:  use specific request-delay\n");
+	fprintf(stream, "  -s <resp-timeo>: use specific response-timeout\n");
+	fprintf(stream,
+		"  -o <block-timeo>: use specific xmodem block timeout\n");
+	fprintf(stream, "\n");
+	fprintf(stream, "  -t: mini terminal\n");
+	fprintf(stream, "\n");
+	fprintf(stream, "  -B <baud>: set baud rate\n");
+	fprintf(stream, "\n");
+}
+
+int
+main(int argc, char **argv)
+{
+	const char *ttypath, *imgpath;
+	int rv, rc, tty, term, prot, patch;
+	void *bootmsg;
+	void *debugmsg;
+	void *img;
+	size_t size;
+	speed_t speed;
+
+	rv = 1;
+	tty = -1;
+	bootmsg = NULL;
+	debugmsg = NULL;
+	imgpath = NULL;
+	img = NULL;
+	term = 0;
+	patch = 0;
+	size = 0;
+	speed = B115200;
+
+	kwboot_verbose = isatty(STDOUT_FILENO);
+
+	do {
+		int c = getopt(argc, argv, "hb:ptaB:dD:q:s:o:");
+		if (c < 0)
+			break;
+
+		switch (c) {
+		case 'b':
+			bootmsg = kwboot_msg_boot;
+			imgpath = optarg;
+			break;
+
+		case 'D':
+			bootmsg = NULL;
+			imgpath = optarg;
+			break;
+
+		case 'd':
+			debugmsg = kwboot_msg_debug;
+			break;
+
+		case 'p':
+			patch = 1;
+			break;
+
+		case 't':
+			term = 1;
+			break;
+
+		case 'a':
+			msg_req_delay = KWBOOT_MSG_REQ_DELAY_AXP;
+			msg_rsp_timeo = KWBOOT_MSG_RSP_TIMEO_AXP;
+			break;
+
+		case 'q':
+			msg_req_delay = atoi(optarg);
+			break;
+
+		case 's':
+			msg_rsp_timeo = atoi(optarg);
+			break;
+
+		case 'o':
+			blk_rsp_timeo = atoi(optarg);
+			break;
+
+		case 'B':
+			speed = kwboot_tty_speed(atoi(optarg));
+			if (speed == -1)
+				goto usage;
+			break;
+
+		case 'h':
+			rv = 0;
+		default:
+			goto usage;
+		}
+	} while (1);
+
+	if (!bootmsg && !term && !debugmsg)
+		goto usage;
+
+	if (patch && !imgpath)
+		goto usage;
+
+	if (argc - optind < 1)
+		goto usage;
+
+	ttypath = argv[optind++];
+
+	tty = kwboot_open_tty(ttypath, speed);
+	if (tty < 0) {
+		perror(ttypath);
+		goto out;
+	}
+
+	if (imgpath) {
+		prot = PROT_READ | (patch ? PROT_WRITE : 0);
+
+		img = kwboot_mmap_image(imgpath, &size, prot);
+		if (!img) {
+			perror(imgpath);
+			goto out;
+		}
+	}
+
+	if (patch) {
+		rc = kwboot_img_patch_hdr(img, size);
+		if (rc) {
+			fprintf(stderr, "%s: Invalid image.\n", imgpath);
+			goto out;
+		}
+	}
+
+	if (debugmsg) {
+		rc = kwboot_debugmsg(tty, debugmsg);
+		if (rc) {
+			perror("debugmsg");
+			goto out;
+		}
+	} else if (bootmsg) {
+		rc = kwboot_bootmsg(tty, bootmsg);
+		if (rc) {
+			perror("bootmsg");
+			goto out;
+		}
+	}
+
+	if (img) {
+		rc = kwboot_xmodem(tty, img, size);
+		if (rc) {
+			perror("xmodem");
+			goto out;
+		}
+	}
+
+	if (term) {
+		rc = kwboot_terminal(tty);
+		if (rc && !(errno == EINTR)) {
+			perror("terminal");
+			goto out;
+		}
+	}
+
+	rv = 0;
+out:
+	if (tty >= 0)
+		close(tty);
+
+	if (img)
+		munmap(img, size);
+
+	return rv;
+
+usage:
+	kwboot_usage(rv ? stderr : stdout, basename(argv[0]));
+	goto out;
+}
diff --git a/tools/u-boot-tools/lib/.crc16.o.cmd b/tools/u-boot-tools/lib/.crc16.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..889fc898477d12b7e81045383059db93760f177b
--- /dev/null
+++ b/tools/u-boot-tools/lib/.crc16.o.cmd
@@ -0,0 +1,105 @@
+cmd_tools/lib/crc16.o := cc -Wp,-MD,tools/lib/.crc16.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/lib/crc16.o tools/lib/crc16.c
+
+source_tools/lib/crc16.o := tools/lib/crc16.c
+
+deps_tools/lib/crc16.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/crc16.c \
+  /usr/include/arpa/inet.h \
+  /usr/include/netinet/in.h \
+  /usr/include/x86_64-linux-gnu/sys/socket.h \
+  /usr/include/x86_64-linux-gnu/bits/socket.h \
+  /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+  /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+  /usr/include/x86_64-linux-gnu/asm/socket.h \
+  /usr/include/asm-generic/socket.h \
+  /usr/include/x86_64-linux-gnu/asm/sockios.h \
+  /usr/include/asm-generic/sockios.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_osockaddr.h \
+  /usr/include/x86_64-linux-gnu/bits/in.h \
+  include/u-boot/crc.h \
+
+tools/lib/crc16.o: $(deps_tools/lib/crc16.o)
+
+$(deps_tools/lib/crc16.o):
diff --git a/tools/u-boot-tools/lib/.crc32.o.cmd b/tools/u-boot-tools/lib/.crc32.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..4352529cbbd1eec5f2181b26860457492419f3a0
--- /dev/null
+++ b/tools/u-boot-tools/lib/.crc32.o.cmd
@@ -0,0 +1,110 @@
+cmd_tools/lib/crc32.o := cc -Wp,-MD,tools/lib/.crc32.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE -pedantic -c -o tools/lib/crc32.o tools/lib/crc32.c
+
+source_tools/lib/crc32.o := tools/lib/crc32.c
+
+deps_tools/lib/crc32.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/crc32.c \
+    $(wildcard include/config/hw/watchdog.h) \
+    $(wildcard include/config/watchdog.h) \
+    $(wildcard include/config/dynamic/crc/table.h) \
+  /usr/include/arpa/inet.h \
+  /usr/include/netinet/in.h \
+  /usr/include/x86_64-linux-gnu/sys/socket.h \
+  /usr/include/x86_64-linux-gnu/bits/socket.h \
+  /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+  /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+  /usr/include/x86_64-linux-gnu/asm/socket.h \
+  /usr/include/asm-generic/socket.h \
+  /usr/include/x86_64-linux-gnu/asm/sockios.h \
+  /usr/include/asm-generic/sockios.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_osockaddr.h \
+  /usr/include/x86_64-linux-gnu/bits/in.h \
+  include/compiler.h \
+  include/u-boot/crc.h \
+  include/u-boot/zlib.h \
+
+tools/lib/crc32.o: $(deps_tools/lib/crc32.o)
+
+$(deps_tools/lib/crc32.o):
diff --git a/tools/u-boot-tools/lib/.crc8.o.cmd b/tools/u-boot-tools/lib/.crc8.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..8f1cfeaed767e98e88faae01cd46d81115fc9888
--- /dev/null
+++ b/tools/u-boot-tools/lib/.crc8.o.cmd
@@ -0,0 +1,93 @@
+cmd_tools/lib/crc8.o := cc -Wp,-MD,tools/lib/.crc8.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE -pedantic -c -o tools/lib/crc8.o tools/lib/crc8.c
+
+source_tools/lib/crc8.o := tools/lib/crc8.c
+
+deps_tools/lib/crc8.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/crc8.c \
+  include/linux/crc8.h \
+
+tools/lib/crc8.o: $(deps_tools/lib/crc8.o)
+
+$(deps_tools/lib/crc8.o):
diff --git a/tools/u-boot-tools/lib/.fdtdec.o.cmd b/tools/u-boot-tools/lib/.fdtdec.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..e9bffc31be266044dc9ddd0484a6ada4ce76cd6e
--- /dev/null
+++ b/tools/u-boot-tools/lib/.fdtdec.o.cmd
@@ -0,0 +1,113 @@
+cmd_tools/lib/fdtdec.o := cc -Wp,-MD,tools/lib/.fdtdec.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/lib/fdtdec.o tools/lib/fdtdec.c
+
+source_tools/lib/fdtdec.o := tools/lib/fdtdec.c
+
+deps_tools/lib/fdtdec.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/fdtdec.c \
+    $(wildcard include/config/of/translate.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/dm/pci.h) \
+    $(wildcard include/config/spl/build.h) \
+    $(wildcard include/config/of/embed.h) \
+    $(wildcard include/config/nr/dram/banks.h) \
+    $(wildcard include/config/multi/dtb/fit.h) \
+    $(wildcard include/config/multi/dtb/fit/gzip.h) \
+    $(wildcard include/config/multi/dtb/fit/lzo.h) \
+    $(wildcard include/config/spl/multi/dtb/fit/uncompress/sz.h) \
+    $(wildcard include/config/gzip.h) \
+    $(wildcard include/config/lzo.h) \
+    $(wildcard include/config/multi/dtb/fit/dyn/alloc.h) \
+    $(wildcard include/config/multi/dtb/fit/user/defined/area.h) \
+    $(wildcard include/config/multi/dtb/fit/user/def/addr.h) \
+    $(wildcard include/config/of/board.h) \
+    $(wildcard include/config/of/separate.h) \
+    $(wildcard include/config/spl/separate/bss.h) \
+    $(wildcard include/config/of/control.h) \
+    $(wildcard include/config/of/hostfile.h) \
+    $(wildcard include/config/of/prior/stage.h) \
+
+tools/lib/fdtdec.o: $(deps_tools/lib/fdtdec.o)
+
+$(deps_tools/lib/fdtdec.o):
diff --git a/tools/u-boot-tools/lib/.fdtdec_common.o.cmd b/tools/u-boot-tools/lib/.fdtdec_common.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..77ba37b56a39e2996d451661324dae85360b4f38
--- /dev/null
+++ b/tools/u-boot-tools/lib/.fdtdec_common.o.cmd
@@ -0,0 +1,108 @@
+cmd_tools/lib/fdtdec_common.o := cc -Wp,-MD,tools/lib/.fdtdec_common.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/lib/fdtdec_common.o tools/lib/fdtdec_common.c
+
+source_tools/lib/fdtdec_common.o := tools/lib/fdtdec_common.c
+
+deps_tools/lib/fdtdec_common.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/fdtdec_common.c \
+  scripts/dtc/libfdt/libfdt.h \
+  scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+
+tools/lib/fdtdec_common.o: $(deps_tools/lib/fdtdec_common.o)
+
+$(deps_tools/lib/fdtdec_common.o):
diff --git a/tools/u-boot-tools/lib/.md5.o.cmd b/tools/u-boot-tools/lib/.md5.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..5bcde5efccea1cbaa694beace88f85e3865576b2
--- /dev/null
+++ b/tools/u-boot-tools/lib/.md5.o.cmd
@@ -0,0 +1,96 @@
+cmd_tools/lib/md5.o := cc -Wp,-MD,tools/lib/.md5.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE -pedantic -c -o tools/lib/md5.o tools/lib/md5.c
+
+source_tools/lib/md5.o := tools/lib/md5.c
+
+deps_tools/lib/md5.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/md5.c \
+    $(wildcard include/config/hw/watchdog.h) \
+    $(wildcard include/config/watchdog.h) \
+  include/compiler.h \
+  include/u-boot/md5.h \
+
+tools/lib/md5.o: $(deps_tools/lib/md5.o)
+
+$(deps_tools/lib/md5.o):
diff --git a/tools/u-boot-tools/lib/.rc4.o.cmd b/tools/u-boot-tools/lib/.rc4.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..09cf31a1d5fd5924e4ff1749f7318c93ae3f95b5
--- /dev/null
+++ b/tools/u-boot-tools/lib/.rc4.o.cmd
@@ -0,0 +1,93 @@
+cmd_tools/lib/rc4.o := cc -Wp,-MD,tools/lib/.rc4.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/lib/rc4.o tools/lib/rc4.c
+
+source_tools/lib/rc4.o := tools/lib/rc4.c
+
+deps_tools/lib/rc4.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/rc4.c \
+  include/rc4.h \
+
+tools/lib/rc4.o: $(deps_tools/lib/rc4.o)
+
+$(deps_tools/lib/rc4.o):
diff --git a/tools/u-boot-tools/lib/.sha1.o.cmd b/tools/u-boot-tools/lib/.sha1.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..137f340fba169ba34043f9c9a22ceb9e381020f1
--- /dev/null
+++ b/tools/u-boot-tools/lib/.sha1.o.cmd
@@ -0,0 +1,97 @@
+cmd_tools/lib/sha1.o := cc -Wp,-MD,tools/lib/.sha1.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE -pedantic -c -o tools/lib/sha1.o tools/lib/sha1.c
+
+source_tools/lib/sha1.o := tools/lib/sha1.c
+
+deps_tools/lib/sha1.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/sha1.c \
+    $(wildcard include/config/hw/watchdog.h) \
+    $(wildcard include/config/watchdog.h) \
+  include/watchdog.h \
+    $(wildcard include/config/mpc85xx.h) \
+  include/u-boot/sha1.h \
+
+tools/lib/sha1.o: $(deps_tools/lib/sha1.o)
+
+$(deps_tools/lib/sha1.o):
diff --git a/tools/u-boot-tools/lib/.sha256.o.cmd b/tools/u-boot-tools/lib/.sha256.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..880821ade0d94f6a93a3f715be167b0f596f6105
--- /dev/null
+++ b/tools/u-boot-tools/lib/.sha256.o.cmd
@@ -0,0 +1,97 @@
+cmd_tools/lib/sha256.o := cc -Wp,-MD,tools/lib/.sha256.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE -pedantic -c -o tools/lib/sha256.o tools/lib/sha256.c
+
+source_tools/lib/sha256.o := tools/lib/sha256.c
+
+deps_tools/lib/sha256.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/sha256.c \
+    $(wildcard include/config/hw/watchdog.h) \
+    $(wildcard include/config/watchdog.h) \
+  include/watchdog.h \
+    $(wildcard include/config/mpc85xx.h) \
+  include/u-boot/sha256.h \
+
+tools/lib/sha256.o: $(deps_tools/lib/sha256.o)
+
+$(deps_tools/lib/sha256.o):
diff --git a/tools/u-boot-tools/lib/crc16.c b/tools/u-boot-tools/lib/crc16.c
new file mode 100644
index 0000000000000000000000000000000000000000..2e9e81a3f070be2e5cd36c45b122f874affd6ad7
--- /dev/null
+++ b/tools/u-boot-tools/lib/crc16.c
@@ -0,0 +1 @@
+#include <../lib/crc16.c>
diff --git a/tools/u-boot-tools/lib/crc16.o b/tools/u-boot-tools/lib/crc16.o
new file mode 100644
index 0000000000000000000000000000000000000000..2b6d0759e83eba679dd2d4325fd40ad243ff6a2a
Binary files /dev/null and b/tools/u-boot-tools/lib/crc16.o differ
diff --git a/tools/u-boot-tools/lib/crc32.c b/tools/u-boot-tools/lib/crc32.c
new file mode 100644
index 0000000000000000000000000000000000000000..5fa5f67afd4e22daafdbbe77014431014e8555f1
--- /dev/null
+++ b/tools/u-boot-tools/lib/crc32.c
@@ -0,0 +1 @@
+#include <../lib/crc32.c>
diff --git a/tools/u-boot-tools/lib/crc32.o b/tools/u-boot-tools/lib/crc32.o
new file mode 100644
index 0000000000000000000000000000000000000000..475c7445713fd50607b48d83137d897c701d9769
Binary files /dev/null and b/tools/u-boot-tools/lib/crc32.o differ
diff --git a/tools/u-boot-tools/lib/crc8.c b/tools/u-boot-tools/lib/crc8.c
new file mode 100644
index 0000000000000000000000000000000000000000..f50097ad9eded46e8fd66b22654b2bb3720d6f2f
--- /dev/null
+++ b/tools/u-boot-tools/lib/crc8.c
@@ -0,0 +1 @@
+#include <../lib/crc8.c>
diff --git a/tools/u-boot-tools/lib/crc8.o b/tools/u-boot-tools/lib/crc8.o
new file mode 100644
index 0000000000000000000000000000000000000000..237f1efb10d9d3d99cf45911a231cfb6cf117954
Binary files /dev/null and b/tools/u-boot-tools/lib/crc8.o differ
diff --git a/tools/u-boot-tools/lib/fdtdec.c b/tools/u-boot-tools/lib/fdtdec.c
new file mode 100644
index 0000000000000000000000000000000000000000..9568094d12a1fc6dac98bac4e791cc9864027039
--- /dev/null
+++ b/tools/u-boot-tools/lib/fdtdec.c
@@ -0,0 +1 @@
+#include <../lib/fdtdec.c>
diff --git a/tools/u-boot-tools/lib/fdtdec.o b/tools/u-boot-tools/lib/fdtdec.o
new file mode 100644
index 0000000000000000000000000000000000000000..4ff70e2a444864c8af648efa41dda3fbca12bd32
Binary files /dev/null and b/tools/u-boot-tools/lib/fdtdec.o differ
diff --git a/tools/u-boot-tools/lib/fdtdec_common.c b/tools/u-boot-tools/lib/fdtdec_common.c
new file mode 100644
index 0000000000000000000000000000000000000000..04843b78867e225174b902858b34aa8462386f2a
--- /dev/null
+++ b/tools/u-boot-tools/lib/fdtdec_common.c
@@ -0,0 +1 @@
+#include <../lib/fdtdec_common.c>
diff --git a/tools/u-boot-tools/lib/fdtdec_common.o b/tools/u-boot-tools/lib/fdtdec_common.o
new file mode 100644
index 0000000000000000000000000000000000000000..aafea987d198015f45b2d143224a2ba164104933
Binary files /dev/null and b/tools/u-boot-tools/lib/fdtdec_common.o differ
diff --git a/tools/u-boot-tools/lib/libfdt/.fdt_region.o.cmd b/tools/u-boot-tools/lib/libfdt/.fdt_region.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..b4aecbc60cb0210acd698e7ef2cca8eba04a9e63
--- /dev/null
+++ b/tools/u-boot-tools/lib/libfdt/.fdt_region.o.cmd
@@ -0,0 +1,112 @@
+cmd_tools/lib/libfdt/fdt_region.o := cc -Wp,-MD,tools/lib/libfdt/.fdt_region.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/lib/libfdt/fdt_region.o tools/lib/libfdt/fdt_region.c
+
+source_tools/lib/libfdt/fdt_region.o := tools/lib/libfdt/fdt_region.c
+
+deps_tools/lib/libfdt/fdt_region.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/libfdt/fdt_region.c \
+  include/linux/libfdt_env.h \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+
+tools/lib/libfdt/fdt_region.o: $(deps_tools/lib/libfdt/fdt_region.o)
+
+$(deps_tools/lib/libfdt/fdt_region.o):
diff --git a/tools/u-boot-tools/lib/libfdt/.fdt_ro.o.cmd b/tools/u-boot-tools/lib/libfdt/.fdt_ro.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..1458c907ad782d0471620dedf16c5b8ff6d24768
--- /dev/null
+++ b/tools/u-boot-tools/lib/libfdt/.fdt_ro.o.cmd
@@ -0,0 +1,115 @@
+cmd_tools/lib/libfdt/fdt_ro.o := cc -Wp,-MD,tools/lib/libfdt/.fdt_ro.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/lib/libfdt/fdt_ro.o tools/lib/libfdt/fdt_ro.c
+
+source_tools/lib/libfdt/fdt_ro.o := tools/lib/libfdt/fdt_ro.c
+
+deps_tools/lib/libfdt/fdt_ro.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/../lib/libfdt/fdt_ro.c \
+  include/linux/libfdt_env.h \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../lib/libfdt/libfdt_internal.h \
+  tools/../lib/libfdt/../../scripts/dtc/libfdt/libfdt_internal.h \
+  scripts/dtc/libfdt/fdt.h \
+
+tools/lib/libfdt/fdt_ro.o: $(deps_tools/lib/libfdt/fdt_ro.o)
+
+$(deps_tools/lib/libfdt/fdt_ro.o):
diff --git a/tools/u-boot-tools/lib/libfdt/fdt_region.c b/tools/u-boot-tools/lib/libfdt/fdt_region.c
new file mode 100644
index 0000000000000000000000000000000000000000..d85be27d6d54a3acc52ba87259b55e8851a19a63
--- /dev/null
+++ b/tools/u-boot-tools/lib/libfdt/fdt_region.c
@@ -0,0 +1 @@
+#include <../lib/libfdt/fdt_region.c>
diff --git a/tools/u-boot-tools/lib/libfdt/fdt_region.o b/tools/u-boot-tools/lib/libfdt/fdt_region.o
new file mode 100644
index 0000000000000000000000000000000000000000..d185c6b1edda497ec92735552c09bca808194ffa
Binary files /dev/null and b/tools/u-boot-tools/lib/libfdt/fdt_region.o differ
diff --git a/tools/u-boot-tools/lib/libfdt/fdt_ro.c b/tools/u-boot-tools/lib/libfdt/fdt_ro.c
new file mode 100644
index 0000000000000000000000000000000000000000..fd0a499fd0c94e8af90e6a24f2a2f91bcc9dc71f
--- /dev/null
+++ b/tools/u-boot-tools/lib/libfdt/fdt_ro.c
@@ -0,0 +1 @@
+#include <../lib/libfdt/fdt_ro.c>
diff --git a/tools/u-boot-tools/lib/libfdt/fdt_ro.o b/tools/u-boot-tools/lib/libfdt/fdt_ro.o
new file mode 100644
index 0000000000000000000000000000000000000000..5a925b0a25f3e535fb5d29b26910ba86fe3b1d35
Binary files /dev/null and b/tools/u-boot-tools/lib/libfdt/fdt_ro.o differ
diff --git a/tools/u-boot-tools/lib/md5.c b/tools/u-boot-tools/lib/md5.c
new file mode 100644
index 0000000000000000000000000000000000000000..d12053d00905acd79816aefd76f6e559fb10f1a1
--- /dev/null
+++ b/tools/u-boot-tools/lib/md5.c
@@ -0,0 +1 @@
+#include <../lib/md5.c>
diff --git a/tools/u-boot-tools/lib/md5.o b/tools/u-boot-tools/lib/md5.o
new file mode 100644
index 0000000000000000000000000000000000000000..9d765b8c8a2025e353c3873badb9adaa1d36fe0d
Binary files /dev/null and b/tools/u-boot-tools/lib/md5.o differ
diff --git a/tools/u-boot-tools/lib/rc4.c b/tools/u-boot-tools/lib/rc4.c
new file mode 100644
index 0000000000000000000000000000000000000000..79dd7ded1c617cd0eb191a48864d6006177319d8
--- /dev/null
+++ b/tools/u-boot-tools/lib/rc4.c
@@ -0,0 +1 @@
+#include <../lib/rc4.c>
diff --git a/tools/u-boot-tools/lib/rc4.o b/tools/u-boot-tools/lib/rc4.o
new file mode 100644
index 0000000000000000000000000000000000000000..0efe03e515935fbf572960ab55f0fca8b82fd2b0
Binary files /dev/null and b/tools/u-boot-tools/lib/rc4.o differ
diff --git a/tools/u-boot-tools/lib/sha1.c b/tools/u-boot-tools/lib/sha1.c
new file mode 100644
index 0000000000000000000000000000000000000000..c868d26de31134fc1fbd6fd77f38a0060c1a68ca
--- /dev/null
+++ b/tools/u-boot-tools/lib/sha1.c
@@ -0,0 +1 @@
+#include <../lib/sha1.c>
diff --git a/tools/u-boot-tools/lib/sha1.o b/tools/u-boot-tools/lib/sha1.o
new file mode 100644
index 0000000000000000000000000000000000000000..23e859dfa76d2ad7dd76704a3aa47a3f11d1730f
Binary files /dev/null and b/tools/u-boot-tools/lib/sha1.o differ
diff --git a/tools/u-boot-tools/lib/sha256.c b/tools/u-boot-tools/lib/sha256.c
new file mode 100644
index 0000000000000000000000000000000000000000..85b3f7f3c82ffb3ade039967432f6d1296b7e216
--- /dev/null
+++ b/tools/u-boot-tools/lib/sha256.c
@@ -0,0 +1 @@
+#include <../lib/sha256.c>
diff --git a/tools/u-boot-tools/lib/sha256.o b/tools/u-boot-tools/lib/sha256.o
new file mode 100644
index 0000000000000000000000000000000000000000..839b461cf71702b5109478bb0f0f6cee3e79be79
Binary files /dev/null and b/tools/u-boot-tools/lib/sha256.o differ
diff --git a/tools/u-boot-tools/libfdt/.fdt.o.cmd b/tools/u-boot-tools/libfdt/.fdt.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..3c7470d024e1673840657e01773b3f31aad5808b
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt.o := cc -Wp,-MD,tools/libfdt/.fdt.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt.o tools/libfdt/fdt.c
+
+source_tools/libfdt/fdt.o := tools/libfdt/fdt.c
+
+deps_tools/libfdt/fdt.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt.o: $(deps_tools/libfdt/fdt.o)
+
+$(deps_tools/libfdt/fdt.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_addresses.o.cmd b/tools/u-boot-tools/libfdt/.fdt_addresses.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..ebcbf3e8c08a41fcd6072b85c1bee0ee6d6631a0
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_addresses.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_addresses.o := cc -Wp,-MD,tools/libfdt/.fdt_addresses.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_addresses.o tools/libfdt/fdt_addresses.c
+
+source_tools/libfdt/fdt_addresses.o := tools/libfdt/fdt_addresses.c
+
+deps_tools/libfdt/fdt_addresses.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt_addresses.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_addresses.o: $(deps_tools/libfdt/fdt_addresses.o)
+
+$(deps_tools/libfdt/fdt_addresses.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_empty_tree.o.cmd b/tools/u-boot-tools/libfdt/.fdt_empty_tree.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..5f1dea73873a04e9b9cb5bc3936793a5d57e1cdd
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_empty_tree.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_empty_tree.o := cc -Wp,-MD,tools/libfdt/.fdt_empty_tree.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_empty_tree.o tools/libfdt/fdt_empty_tree.c
+
+source_tools/libfdt/fdt_empty_tree.o := tools/libfdt/fdt_empty_tree.c
+
+deps_tools/libfdt/fdt_empty_tree.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt_empty_tree.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_empty_tree.o: $(deps_tools/libfdt/fdt_empty_tree.o)
+
+$(deps_tools/libfdt/fdt_empty_tree.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_overlay.o.cmd b/tools/u-boot-tools/libfdt/.fdt_overlay.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..70e8e2cae6ec2bb91ee0b9f105577063150bf0e9
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_overlay.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_overlay.o := cc -Wp,-MD,tools/libfdt/.fdt_overlay.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_overlay.o tools/libfdt/fdt_overlay.c
+
+source_tools/libfdt/fdt_overlay.o := tools/libfdt/fdt_overlay.c
+
+deps_tools/libfdt/fdt_overlay.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt_overlay.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_overlay.o: $(deps_tools/libfdt/fdt_overlay.o)
+
+$(deps_tools/libfdt/fdt_overlay.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_rw.o.cmd b/tools/u-boot-tools/libfdt/.fdt_rw.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..61f53f4e2ea7b14f563456098f164dd12c9f54b5
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_rw.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_rw.o := cc -Wp,-MD,tools/libfdt/.fdt_rw.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_rw.o tools/libfdt/fdt_rw.c
+
+source_tools/libfdt/fdt_rw.o := tools/libfdt/fdt_rw.c
+
+deps_tools/libfdt/fdt_rw.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/libfdt/../../scripts/dtc/libfdt/fdt_rw.c \
+  tools/libfdt/../../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/libfdt/../../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_rw.o: $(deps_tools/libfdt/fdt_rw.o)
+
+$(deps_tools/libfdt/fdt_rw.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_strerror.o.cmd b/tools/u-boot-tools/libfdt/.fdt_strerror.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..1338e0e566a735b6896a35dee8b3d236247bdc20
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_strerror.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_strerror.o := cc -Wp,-MD,tools/libfdt/.fdt_strerror.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_strerror.o tools/libfdt/fdt_strerror.c
+
+source_tools/libfdt/fdt_strerror.o := tools/libfdt/fdt_strerror.c
+
+deps_tools/libfdt/fdt_strerror.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt_strerror.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_strerror.o: $(deps_tools/libfdt/fdt_strerror.o)
+
+$(deps_tools/libfdt/fdt_strerror.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_sw.o.cmd b/tools/u-boot-tools/libfdt/.fdt_sw.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..aaa346441d49c86bd972f71dc7efe28555782998
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_sw.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_sw.o := cc -Wp,-MD,tools/libfdt/.fdt_sw.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_sw.o tools/libfdt/fdt_sw.c
+
+source_tools/libfdt/fdt_sw.o := tools/libfdt/fdt_sw.c
+
+deps_tools/libfdt/fdt_sw.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt_sw.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_sw.o: $(deps_tools/libfdt/fdt_sw.o)
+
+$(deps_tools/libfdt/fdt_sw.o):
diff --git a/tools/u-boot-tools/libfdt/.fdt_wip.o.cmd b/tools/u-boot-tools/libfdt/.fdt_wip.o.cmd
new file mode 100644
index 0000000000000000000000000000000000000000..4793e4895d8b9aa9850894a910d3aa23f47b6bad
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/.fdt_wip.o.cmd
@@ -0,0 +1,114 @@
+cmd_tools/libfdt/fdt_wip.o := cc -Wp,-MD,tools/libfdt/.fdt_wip.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer  -std=gnu11   -include ./include/compiler.h -idirafterinclude -idirafter./arch/arm/include -I./scripts/dtc/libfdt -I./tools -DUSE_HOSTCC -D__KERNEL_STRICT_NAMES -D_GNU_SOURCE  -c -o tools/libfdt/fdt_wip.o tools/libfdt/fdt_wip.c
+
+source_tools/libfdt/fdt_wip.o := tools/libfdt/fdt_wip.c
+
+deps_tools/libfdt/fdt_wip.o := \
+  /usr/include/stdc-predef.h \
+  include/compiler.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stddef.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdint.h \
+  /usr/include/stdint.h \
+  /usr/include/x86_64-linux-gnu/bits/libc-header-start.h \
+  /usr/include/features.h \
+  /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+  /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+  /usr/include/x86_64-linux-gnu/bits/long-double.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+  /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+  /usr/include/x86_64-linux-gnu/bits/types.h \
+  /usr/include/x86_64-linux-gnu/bits/typesizes.h \
+  /usr/include/x86_64-linux-gnu/bits/wchar.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-intn.h \
+  /usr/include/x86_64-linux-gnu/bits/stdint-uintn.h \
+  /usr/include/errno.h \
+  /usr/include/x86_64-linux-gnu/bits/errno.h \
+  /usr/include/linux/errno.h \
+  /usr/include/x86_64-linux-gnu/asm/errno.h \
+  /usr/include/asm-generic/errno.h \
+  /usr/include/asm-generic/errno-base.h \
+  /usr/include/x86_64-linux-gnu/bits/types/error_t.h \
+  /usr/include/stdlib.h \
+  /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+  /usr/include/x86_64-linux-gnu/bits/waitstatus.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn.h \
+  /usr/include/x86_64-linux-gnu/bits/floatn-common.h \
+  /usr/include/x86_64-linux-gnu/bits/types/locale_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__locale_t.h \
+  /usr/include/x86_64-linux-gnu/sys/types.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clock_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/clockid_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/time_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/timer_t.h \
+  /usr/include/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/endian.h \
+  /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+  /usr/include/x86_64-linux-gnu/bits/uintn-identity.h \
+  /usr/include/x86_64-linux-gnu/sys/select.h \
+  /usr/include/x86_64-linux-gnu/bits/select.h \
+  /usr/include/x86_64-linux-gnu/bits/types/sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__sigset_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timeval.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h \
+  /usr/include/x86_64-linux-gnu/bits/thread-shared-types.h \
+  /usr/include/x86_64-linux-gnu/bits/pthreadtypes-arch.h \
+  /usr/include/alloca.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-bsearch.h \
+  /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+  /usr/include/stdio.h \
+  /usr/lib/gcc/x86_64-linux-gnu/8/include/stdarg.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__mbstate_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__fpos64_t.h \
+  /usr/include/x86_64-linux-gnu/bits/types/__FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h \
+  /usr/include/x86_64-linux-gnu/bits/types/cookie_io_functions_t.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+  /usr/include/x86_64-linux-gnu/bits/sys_errlist.h \
+  /usr/include/x86_64-linux-gnu/bits/stdio.h \
+  /usr/include/string.h \
+  /usr/include/strings.h \
+  /usr/include/x86_64-linux-gnu/sys/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/mman-shared.h \
+  /usr/include/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl.h \
+  /usr/include/x86_64-linux-gnu/bits/fcntl-linux.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_iovec.h \
+  /usr/include/linux/falloc.h \
+  /usr/include/x86_64-linux-gnu/bits/stat.h \
+  /usr/include/byteswap.h \
+  /usr/include/time.h \
+  /usr/include/x86_64-linux-gnu/bits/time.h \
+  /usr/include/x86_64-linux-gnu/bits/timex.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_tm.h \
+  /usr/include/x86_64-linux-gnu/bits/types/struct_itimerspec.h \
+  tools/fdt_host.h \
+  tools/../include/linux/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/libfdt_env.h \
+  tools/../include/linux/../../scripts/dtc/libfdt/fdt.h \
+  tools/../include/fdt_support.h \
+    $(wildcard include/config/of/libfdt.h) \
+    $(wildcard include/config/arch/fixup/fdt/memory.h) \
+    $(wildcard include/config/usb/ehci/fsl.h) \
+    $(wildcard include/config/usb/xhci/fsl.h) \
+    $(wildcard include/config/sys/fsl/sec/compat.h) \
+    $(wildcard include/config/pci.h) \
+    $(wildcard include/config/sys/fdt/pad.h) \
+    $(wildcard include/config/of/board/setup.h) \
+    $(wildcard include/config/of/system/setup.h) \
+    $(wildcard include/config/fdt/fixup/partitions.h) \
+    $(wildcard include/config/fman/enet.h) \
+    $(wildcard include/config/fsl/mc/enet.h) \
+  tools/../scripts/dtc/libfdt/fdt_wip.c \
+  tools/../scripts/dtc/libfdt/libfdt_env.h \
+  scripts/dtc/libfdt/fdt.h \
+  scripts/dtc/libfdt/libfdt.h \
+  tools/../scripts/dtc/libfdt/libfdt_internal.h \
+
+tools/libfdt/fdt_wip.o: $(deps_tools/libfdt/fdt_wip.o)
+
+$(deps_tools/libfdt/fdt_wip.o):
diff --git a/tools/u-boot-tools/libfdt/fdt.c b/tools/u-boot-tools/libfdt/fdt.c
new file mode 100644
index 0000000000000000000000000000000000000000..8ba809192110c9df8634b403b3c464381b70f242
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt.c"
diff --git a/tools/u-boot-tools/libfdt/fdt.o b/tools/u-boot-tools/libfdt/fdt.o
new file mode 100644
index 0000000000000000000000000000000000000000..4072bffc3b0f4bf11861258cd708e9cc89ea557e
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_addresses.c b/tools/u-boot-tools/libfdt/fdt_addresses.c
new file mode 100644
index 0000000000000000000000000000000000000000..242a2c083ba1343c2536c90eca5e8b4f93eb6352
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_addresses.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
diff --git a/tools/u-boot-tools/libfdt/fdt_addresses.o b/tools/u-boot-tools/libfdt/fdt_addresses.o
new file mode 100644
index 0000000000000000000000000000000000000000..182d78a2b083e3dd8e3f48ef140c90b2200020fd
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_addresses.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_empty_tree.c b/tools/u-boot-tools/libfdt/fdt_empty_tree.c
new file mode 100644
index 0000000000000000000000000000000000000000..9ccbb1f94eae7d7a75d9e6c8a76badef789020aa
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_empty_tree.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/tools/u-boot-tools/libfdt/fdt_empty_tree.o b/tools/u-boot-tools/libfdt/fdt_empty_tree.o
new file mode 100644
index 0000000000000000000000000000000000000000..281d85aa8cb78815ab1a5ed3cf34b44c4382a391
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_empty_tree.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_overlay.c b/tools/u-boot-tools/libfdt/fdt_overlay.c
new file mode 100644
index 0000000000000000000000000000000000000000..801ec374b15801af8c863745ec46f497e58e1145
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_overlay.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt_overlay.c"
diff --git a/tools/u-boot-tools/libfdt/fdt_overlay.o b/tools/u-boot-tools/libfdt/fdt_overlay.o
new file mode 100644
index 0000000000000000000000000000000000000000..81e3226f31a89a89318b4a0f4708d959a5c45b8a
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_overlay.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_rw.c b/tools/u-boot-tools/libfdt/fdt_rw.c
new file mode 100644
index 0000000000000000000000000000000000000000..68fc7c8c887fafb9721e4d4f3a2b573bfd9a1db0
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_rw.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier:	GPL-2.0+ BSD-2-Clause */
+#include "fdt_host.h"
+#include "../../scripts/dtc/libfdt/fdt_rw.c"
+
+int fdt_remove_unused_strings(const void *old, void *new)
+{
+	const struct fdt_property *old_prop;
+	struct fdt_property *new_prop;
+	int size = fdt_totalsize(old);
+	int next_offset, offset;
+	const char *str;
+	int ret;
+	int tag = FDT_PROP;
+
+	/* Make a copy and remove the strings */
+	memcpy(new, old, size);
+	fdt_set_size_dt_strings(new, 0);
+
+	/* Add every property name back into the new string table */
+	for (offset = 0; tag != FDT_END; offset = next_offset) {
+		tag = fdt_next_tag(old, offset, &next_offset);
+		if (tag != FDT_PROP)
+			continue;
+		old_prop = fdt_get_property_by_offset(old, offset, NULL);
+		new_prop = (struct fdt_property *)(unsigned long)
+			fdt_get_property_by_offset(new, offset, NULL);
+		str = fdt_string(old, fdt32_to_cpu(old_prop->nameoff));
+		ret = fdt_find_add_string_(new, str);
+		if (ret < 0)
+			return ret;
+		new_prop->nameoff = cpu_to_fdt32(ret);
+	}
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/libfdt/fdt_rw.o b/tools/u-boot-tools/libfdt/fdt_rw.o
new file mode 100644
index 0000000000000000000000000000000000000000..2b20ccfc8b3fe7f2d64449e6b6aafa0e85f12157
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_rw.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_strerror.c b/tools/u-boot-tools/libfdt/fdt_strerror.c
new file mode 100644
index 0000000000000000000000000000000000000000..d7ed70bea47b828ddfb6e343f54195f9b8fa5986
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_strerror.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt_strerror.c"
diff --git a/tools/u-boot-tools/libfdt/fdt_strerror.o b/tools/u-boot-tools/libfdt/fdt_strerror.o
new file mode 100644
index 0000000000000000000000000000000000000000..1aceaa2500a05a25b361b0926de837064303eeee
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_strerror.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_sw.c b/tools/u-boot-tools/libfdt/fdt_sw.c
new file mode 100644
index 0000000000000000000000000000000000000000..ed6b3275730e217359e8b2c12e1b5b955d97ffb5
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_sw.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt_sw.c"
diff --git a/tools/u-boot-tools/libfdt/fdt_sw.o b/tools/u-boot-tools/libfdt/fdt_sw.o
new file mode 100644
index 0000000000000000000000000000000000000000..7eca52ee1ed2dd33a13b7023ad2605d14f134d9e
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_sw.o differ
diff --git a/tools/u-boot-tools/libfdt/fdt_wip.c b/tools/u-boot-tools/libfdt/fdt_wip.c
new file mode 100644
index 0000000000000000000000000000000000000000..bad73ed9e76864c763e8680ed48ef5ce4b4dc763
--- /dev/null
+++ b/tools/u-boot-tools/libfdt/fdt_wip.c
@@ -0,0 +1,2 @@
+#include "fdt_host.h"
+#include "../scripts/dtc/libfdt/fdt_wip.c"
diff --git a/tools/u-boot-tools/libfdt/fdt_wip.o b/tools/u-boot-tools/libfdt/fdt_wip.o
new file mode 100644
index 0000000000000000000000000000000000000000..23c66d0bcb025fb283ad3d340896685799bbc629
Binary files /dev/null and b/tools/u-boot-tools/libfdt/fdt_wip.o differ
diff --git a/tools/u-boot-tools/logos/atmel.bmp b/tools/u-boot-tools/logos/atmel.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..5c659ce87abd93be1090d8f54291edec5271544b
Binary files /dev/null and b/tools/u-boot-tools/logos/atmel.bmp differ
diff --git a/tools/u-boot-tools/logos/compulab.bmp b/tools/u-boot-tools/logos/compulab.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..df5435cb596ee93496557b78fbbb0c2b97299b4b
Binary files /dev/null and b/tools/u-boot-tools/logos/compulab.bmp differ
diff --git a/tools/u-boot-tools/logos/denx-comp.bmp b/tools/u-boot-tools/logos/denx-comp.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..89d0f471c85cf734b5f0e2bb2508d39f5b9e72b8
Binary files /dev/null and b/tools/u-boot-tools/logos/denx-comp.bmp differ
diff --git a/tools/u-boot-tools/logos/denx.bmp b/tools/u-boot-tools/logos/denx.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..c4cde09d59d60aac35a91fd15ff81d0d1c7c024d
Binary files /dev/null and b/tools/u-boot-tools/logos/denx.bmp differ
diff --git a/tools/u-boot-tools/logos/engicam.bmp b/tools/u-boot-tools/logos/engicam.bmp
new file mode 100755
index 0000000000000000000000000000000000000000..f6c60fb810d29830941e18d6725ef95785d95967
Binary files /dev/null and b/tools/u-boot-tools/logos/engicam.bmp differ
diff --git a/tools/u-boot-tools/logos/esd.bmp b/tools/u-boot-tools/logos/esd.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..a6b403024fbd92952c6fac576d215b49afdb6626
Binary files /dev/null and b/tools/u-boot-tools/logos/esd.bmp differ
diff --git a/tools/u-boot-tools/logos/freescale.bmp b/tools/u-boot-tools/logos/freescale.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..1589e8073d133b61d5dedb99ff393ef9aa68a07f
Binary files /dev/null and b/tools/u-boot-tools/logos/freescale.bmp differ
diff --git a/tools/u-boot-tools/logos/gateworks.bmp b/tools/u-boot-tools/logos/gateworks.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..01edb7cedf46c3441619213b6baf33410055601f
Binary files /dev/null and b/tools/u-boot-tools/logos/gateworks.bmp differ
diff --git a/tools/u-boot-tools/logos/intercontrol.bmp b/tools/u-boot-tools/logos/intercontrol.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..cf2a884b2df9e6fd63d89f929b64724d0fa10426
Binary files /dev/null and b/tools/u-boot-tools/logos/intercontrol.bmp differ
diff --git a/tools/u-boot-tools/logos/linux_logo_ttcontrol.bmp b/tools/u-boot-tools/logos/linux_logo_ttcontrol.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..031d3a41ecd3c2d1a732af0a3c53b430545f61e8
Binary files /dev/null and b/tools/u-boot-tools/logos/linux_logo_ttcontrol.bmp differ
diff --git a/tools/u-boot-tools/logos/linux_logo_ttcontrol_palfin.bmp b/tools/u-boot-tools/logos/linux_logo_ttcontrol_palfin.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..e3e38d1bf6423c7373b6e41530c24ec4d8f6da0c
Binary files /dev/null and b/tools/u-boot-tools/logos/linux_logo_ttcontrol_palfin.bmp differ
diff --git a/tools/u-boot-tools/logos/microchip.bmp b/tools/u-boot-tools/logos/microchip.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..bcecbe972f4e5f088f458df835396fc44293687f
Binary files /dev/null and b/tools/u-boot-tools/logos/microchip.bmp differ
diff --git a/tools/u-boot-tools/logos/ronetix.bmp b/tools/u-boot-tools/logos/ronetix.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..f956813915c7822ba11a3ab81ad9f48fa2849a5c
Binary files /dev/null and b/tools/u-boot-tools/logos/ronetix.bmp differ
diff --git a/tools/u-boot-tools/logos/siemens.bmp b/tools/u-boot-tools/logos/siemens.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..bff2b190e0c88ece91484ca82f0705ff28ed9b2c
Binary files /dev/null and b/tools/u-boot-tools/logos/siemens.bmp differ
diff --git a/tools/u-boot-tools/logos/solidrun.bmp b/tools/u-boot-tools/logos/solidrun.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..93db1f8f1649b1c2d81751b4403a9534b68d4167
Binary files /dev/null and b/tools/u-boot-tools/logos/solidrun.bmp differ
diff --git a/tools/u-boot-tools/logos/syteco.bmp b/tools/u-boot-tools/logos/syteco.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..14031f2c8c5eaee2e961a89b684ac3c986329673
Binary files /dev/null and b/tools/u-boot-tools/logos/syteco.bmp differ
diff --git a/tools/u-boot-tools/logos/toradex.bmp b/tools/u-boot-tools/logos/toradex.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..3e2dcf23358dd46fc7b1bb0dae70d3ba985606ee
Binary files /dev/null and b/tools/u-boot-tools/logos/toradex.bmp differ
diff --git a/tools/u-boot-tools/logos/u-boot_logo.bmp b/tools/u-boot-tools/logos/u-boot_logo.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..40245dd40db38e0f68ef9943e9598f1a47b84ac9
Binary files /dev/null and b/tools/u-boot-tools/logos/u-boot_logo.bmp differ
diff --git a/tools/u-boot-tools/logos/u-boot_logo.svg b/tools/u-boot-tools/logos/u-boot_logo.svg
new file mode 100644
index 0000000000000000000000000000000000000000..e45ef2ef773cda13967a17dd01ca2d2a86851cf1
--- /dev/null
+++ b/tools/u-boot-tools/logos/u-boot_logo.svg
@@ -0,0 +1,248 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- SPDX-License-Identifier: CC-BY-SA-4.0 -->
+
+<!-- Copyright (c) 2018, Heinrich Schuchardt <xypron.glpk@gmx.de> -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="186"
+   height="186"
+   viewBox="0 0 186 186"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.92.3 (2405546, 2018-03-11)"
+   sodipodi:docname="u-boot_logo.svg"
+   inkscape:export-filename="tools/logos/u-boot_logo.png"
+   inkscape:export-xdpi="41.290001"
+   inkscape:export-ydpi="41.290001">
+  <title
+     id="title30">U-Boot Logo</title>
+  <metadata
+     id="metadata31">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title>U-Boot Logo</dc:title>
+        <cc:license
+           rdf:resource="http://creativecommons.org/licenses/by-sa/4.0/" />
+        <dc:creator>
+          <cc:Agent>
+            <dc:title>Heinrich Schuchardt &lt;xypron.glpk@gmx.de&gt;</dc:title>
+          </cc:Agent>
+        </dc:creator>
+        <dc:date>May 21st, 2018</dc:date>
+      </cc:Work>
+      <cc:License
+         rdf:about="http://creativecommons.org/licenses/by-sa/4.0/">
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#Reproduction" />
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#Distribution" />
+        <cc:requires
+           rdf:resource="http://creativecommons.org/ns#Notice" />
+        <cc:requires
+           rdf:resource="http://creativecommons.org/ns#Attribution" />
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
+        <cc:requires
+           rdf:resource="http://creativecommons.org/ns#ShareAlike" />
+      </cc:License>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs29" />
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1440"
+     inkscape:window-height="871"
+     id="namedview27"
+     showgrid="false"
+     inkscape:zoom="3"
+     inkscape:cx="93"
+     inkscape:cy="93"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1" />
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(0,0)">
+    <rect
+       style="fill:#000000;fill-opacity:1;stroke:none;stroke-width:0"
+       id="rect31"
+       width="186"
+       height="186"
+       x="0"
+       y="0" />
+    <circle
+       style="fill:#004466;fill-opacity:1;stroke-width:0;stroke:none"
+       id="path835"
+       cx="93"
+       cy="93"
+       r="93" />
+    <path
+       inkscape:connector-curvature="0"
+       style="fill:#ffcc88;fill-opacity:1;stroke:none;stroke-width:0;stroke:none"
+       d="m 116,76 a 20,20 0 0 1 -20,-20 20,20 0 0 1 20,-20 v 11 a 9,9 0 0 0 -9,9 9,9 0 0 0 9,9 z"
+       id="path4136-6-6-1-6-3-5" />
+    <path
+       inkscape:connector-curvature="0"
+       style="fill:#ffcc88;fill-opacity:1;stroke:none;stroke-width:0"
+       d="m 116,66 a 10,10 0 0 1 -10,-10 10,10 0 0 1 10,-10 v 1 a 9,9 0 0 0 -9,9 9,9 0 0 0 9,9 z"
+       id="path4136-6-6-1-6-3-5-1-9" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4136-6-6-1-6-3-5-1-2"
+       cx="116"
+       cy="41.5"
+       rx="4"
+       ry="5.5" />
+    <circle
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4352"
+       cx="86"
+       cy="66"
+       r="10" />
+    <circle
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4352-1"
+       cx="126"
+       cy="66"
+       r="10" />
+    <rect
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="rect4399"
+       width="39"
+       height="20"
+       x="86.5"
+       y="56" />
+    <rect
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="rect4660"
+       width="60"
+       height="9.5"
+       x="76"
+       y="66.5" />
+    <circle
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4549-5"
+       cx="36"
+       cy="81"
+       r="15" />
+    <circle
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4549-5-5"
+       cx="36"
+       cy="121"
+       r="15" />
+    <ellipse
+       style="fill:#ffcc88;fill-opacity:1;stroke:#000000;stroke-width:0"
+       id="path4136-6-6"
+       cx="15"
+       cy="91"
+       rx="4"
+       ry="10" />
+    <ellipse
+       style="fill:#ffcc88;fill-opacity:1;stroke:#000000;stroke-width:0"
+       id="path4136-6-6-1"
+       cx="15"
+       cy="111"
+       rx="4"
+       ry="10" />
+    <rect
+       style="fill:#dd9955;fill-opacity:1;stroke:#000000;stroke-width:0"
+       id="rect4213"
+       width="65"
+       height="4"
+       x="11"
+       y="99" />
+    <ellipse
+       style="fill:#ffcc88;fill-opacity:1;stroke:#000000;stroke-width:0"
+       id="path4136"
+       cx="100.5"
+       cy="100.5"
+       rx="74.5"
+       ry="34.5" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416"
+       cx="70"
+       cy="95.5"
+       rx="15"
+       ry="12.5" />
+    <ellipse
+       style="fill:#ffcc88;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-9"
+       cx="70"
+       cy="95.5"
+       rx="14"
+       ry="11.5" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-0"
+       cx="70"
+       cy="95.5"
+       rx="11"
+       ry="8.5" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-94"
+       cx="110"
+       cy="95.5"
+       rx="15"
+       ry="12.5" />
+    <ellipse
+       style="fill:#ffcc88;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-9-1"
+       cx="110"
+       cy="95.5"
+       rx="14"
+       ry="11.5" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-0-1"
+       cx="110"
+       cy="95.5"
+       rx="11"
+       ry="8.5" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-94-2"
+       cx="150"
+       cy="95.5"
+       rx="15"
+       ry="12.5" />
+    <ellipse
+       style="fill:#ffcc88;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-9-1-2"
+       cx="150"
+       cy="95.5"
+       rx="14"
+       ry="11.5" />
+    <ellipse
+       style="fill:#dd9955;fill-opacity:1;stroke:none;stroke-width:0"
+       id="path4416-0-1-9"
+       cx="150"
+       cy="95.5"
+       rx="11"
+       ry="8.5" />
+  </g>
+</svg>
diff --git a/tools/u-boot-tools/logos/wandboard.bmp b/tools/u-boot-tools/logos/wandboard.bmp
new file mode 100644
index 0000000000000000000000000000000000000000..7f288a8e8eee052c96b9c2c2699e2e3d3efe1758
Binary files /dev/null and b/tools/u-boot-tools/logos/wandboard.bmp differ
diff --git a/tools/u-boot-tools/lpc32xximage.c b/tools/u-boot-tools/lpc32xximage.c
new file mode 100644
index 0000000000000000000000000000000000000000..37931f91840cea11a98c9b3a4670f3e9ab30b9b4
--- /dev/null
+++ b/tools/u-boot-tools/lpc32xximage.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Image manipulator for LPC32XX SoCs
+ *
+ * (C) Copyright 2015  DENX Software Engineering GmbH
+ * Written-by: Albert ARIBAUD <albert.aribaud@3adev.fr>
+ *
+ * Derived from omapimage.c:
+ *
+ * (C) Copyright 2010
+ * Linaro LTD, www.linaro.org
+ * Author: John Rigby <john.rigby@linaro.org>
+ * Based on TI's signGP.c
+ *
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include "imagetool.h"
+#include <compiler.h>
+#include <image.h>
+
+/*
+ * NAND page 0 boot header
+ */
+
+struct nand_page_0_boot_header {
+	uint32_t data[129];
+	uint32_t pad[383];
+};
+
+/*
+ * Default ICC (interface configuration data [sic]) if none specified
+ * in board config
+ */
+
+#ifndef LPC32XX_BOOT_ICR
+#define LPC32XX_BOOT_ICR 0x00000096
+#endif
+
+/*
+ * Default boot NAND page size if none specified in board config
+ */
+
+#ifndef LPC32XX_BOOT_NAND_PAGESIZE
+#define LPC32XX_BOOT_NAND_PAGESIZE 2048
+#endif
+
+/*
+ * Default boot NAND pages per sector if none specified in board config
+ */
+
+#ifndef LPC32XX_BOOT_NAND_PAGES_PER_SECTOR
+#define LPC32XX_BOOT_NAND_PAGES_PER_SECTOR 64
+#endif
+
+/*
+ * Maximum size for boot code is 56K unless defined in board config
+ */
+
+#ifndef LPC32XX_BOOT_CODESIZE
+#define LPC32XX_BOOT_CODESIZE (56*1024)
+#endif
+
+/* signature byte for a readable block */
+
+#define LPC32XX_BOOT_BLOCK_OK 0xaa
+
+static struct nand_page_0_boot_header lpc32xximage_header;
+
+static int lpc32xximage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_LPC32XXIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static int lpc32xximage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct nand_page_0_boot_header *hdr =
+		(struct nand_page_0_boot_header *)ptr;
+
+	/* turn image size from bytes to NAND pages, page 0 included */
+	int image_size_in_pages = ((image_size - 1)
+				  / LPC32XX_BOOT_NAND_PAGESIZE);
+
+	if (hdr->data[0] != (0xff & LPC32XX_BOOT_ICR))
+		return -1;
+	if (hdr->data[1] != (0xff & ~LPC32XX_BOOT_ICR))
+		return -1;
+	if (hdr->data[2] != (0xff & LPC32XX_BOOT_ICR))
+		return -1;
+	if (hdr->data[3] != (0xff & ~LPC32XX_BOOT_ICR))
+		return -1;
+	if (hdr->data[4] != (0xff & image_size_in_pages))
+		return -1;
+	if (hdr->data[5] != (0xff & ~image_size_in_pages))
+		return -1;
+	if (hdr->data[6] != (0xff & image_size_in_pages))
+		return -1;
+	if (hdr->data[7] != (0xff & ~image_size_in_pages))
+		return -1;
+	if (hdr->data[8] != (0xff & image_size_in_pages))
+		return -1;
+	if (hdr->data[9] != (0xff & ~image_size_in_pages))
+		return -1;
+	if (hdr->data[10] != (0xff & image_size_in_pages))
+		return -1;
+	if (hdr->data[11] != (0xff & ~image_size_in_pages))
+		return -1;
+	if (hdr->data[12] != LPC32XX_BOOT_BLOCK_OK)
+		return -1;
+	if (hdr->data[128] != LPC32XX_BOOT_BLOCK_OK)
+		return -1;
+	return 0;
+}
+
+static void print_hdr_byte(struct nand_page_0_boot_header *hdr, int ofs)
+{
+	printf("header[%d] = %02x\n", ofs, hdr->data[ofs]);
+}
+
+static void lpc32xximage_print_header(const void *ptr)
+{
+	struct nand_page_0_boot_header *hdr =
+		(struct nand_page_0_boot_header *)ptr;
+	int ofs;
+
+	for (ofs = 0; ofs <= 12; ofs++)
+		print_hdr_byte(hdr, ofs);
+	print_hdr_byte(hdr, 128);
+}
+
+static void lpc32xximage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	struct nand_page_0_boot_header *hdr =
+		(struct nand_page_0_boot_header *)ptr;
+
+	/* turn image size from bytes to NAND pages, page 0 included */
+	int image_size_in_pages = ((sbuf->st_size
+				  + LPC32XX_BOOT_NAND_PAGESIZE - 1)
+				  / LPC32XX_BOOT_NAND_PAGESIZE);
+
+	/* fill header -- default byte value is 0x00, not 0xFF */
+	memset((void *)hdr, 0, sizeof(*hdr));
+	hdr->data[0] = (hdr->data[2] = 0xff & LPC32XX_BOOT_ICR);
+	hdr->data[1] = (hdr->data[3] = 0xff & ~LPC32XX_BOOT_ICR);
+	hdr->data[4] = (hdr->data[6] = (hdr->data[8]
+		       = (hdr->data[10] = 0xff & image_size_in_pages)));
+	hdr->data[5] = (hdr->data[7] = (hdr->data[9]
+		       = (hdr->data[11] = 0xff & ~image_size_in_pages)));
+	hdr->data[12] = (hdr->data[128] = LPC32XX_BOOT_BLOCK_OK);
+}
+
+/*
+ * lpc32xximage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	lpc32xximage,
+	"LPC32XX Boot Image",
+	sizeof(lpc32xximage_header),
+	(void *)&lpc32xximage_header,
+	NULL,
+	lpc32xximage_verify_header,
+	lpc32xximage_print_header,
+	lpc32xximage_set_header,
+	NULL,
+	lpc32xximage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/lpc32xximage.o b/tools/u-boot-tools/lpc32xximage.o
new file mode 100644
index 0000000000000000000000000000000000000000..21f547ecd5b9490351692b05d8df50645074d2c2
Binary files /dev/null and b/tools/u-boot-tools/lpc32xximage.o differ
diff --git a/tools/u-boot-tools/microcode-tool b/tools/u-boot-tools/microcode-tool
new file mode 120000
index 0000000000000000000000000000000000000000..8be8507dd06c6fc2e3f2046b9c4b2e103c8fe3b9
--- /dev/null
+++ b/tools/u-boot-tools/microcode-tool
@@ -0,0 +1 @@
+microcode-tool.py
\ No newline at end of file
diff --git a/tools/u-boot-tools/microcode-tool.py b/tools/u-boot-tools/microcode-tool.py
new file mode 100755
index 0000000000000000000000000000000000000000..249a33b8cac440a1bf021dc71074f8d3a96243fc
--- /dev/null
+++ b/tools/u-boot-tools/microcode-tool.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2014 Google, Inc
+#
+# Intel microcode update tool
+
+from optparse import OptionParser
+import os
+import re
+import struct
+import sys
+
+MICROCODE_DIR = 'arch/x86/dts/microcode'
+
+class Microcode:
+    """Holds information about the microcode for a particular model of CPU.
+
+    Attributes:
+        name:  Name of the CPU this microcode is for, including any version
+                   information (e.g. 'm12206a7_00000029')
+        model: Model code string (this is cpuid(1).eax, e.g. '206a7')
+        words: List of hex words containing the microcode. The first 16 words
+                   are the public header.
+    """
+    def __init__(self, name, data):
+        self.name = name
+        # Convert data into a list of hex words
+        self.words = []
+        for value in ''.join(data).split(','):
+            hexval = value.strip()
+            if hexval:
+                self.words.append(int(hexval, 0))
+
+        # The model is in the 4rd hex word
+        self.model = '%x' % self.words[3]
+
+def ParseFile(fname):
+    """Parse a micrcode.dat file and return the component parts
+
+    Args:
+        fname: Filename to parse
+    Returns:
+        3-Tuple:
+            date:         String containing date from the file's header
+            license_text: List of text lines for the license file
+            microcodes:   List of Microcode objects from the file
+    """
+    re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
+    re_license = re.compile('/[^-*+] *(.*)$')
+    re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
+    microcodes = {}
+    license_text = []
+    date = ''
+    data = []
+    name = None
+    with open(fname) as fd:
+        for line in fd:
+            line = line.rstrip()
+            m_date = re_date.match(line)
+            m_license = re_license.match(line)
+            m_name = re_name.match(line)
+            if m_name:
+                if name:
+                    microcodes[name] = Microcode(name, data)
+                name = m_name.group(1).lower()
+                data = []
+            elif m_license:
+                license_text.append(m_license.group(1))
+            elif m_date:
+                date = m_date.group(1)
+            else:
+                data.append(line)
+    if name:
+        microcodes[name] = Microcode(name, data)
+    return date, license_text, microcodes
+
+def ParseHeaderFiles(fname_list):
+    """Parse a list of header files and return the component parts
+
+    Args:
+        fname_list: List of files to parse
+    Returns:
+            date:         String containing date from the file's header
+            license_text: List of text lines for the license file
+            microcodes:   List of Microcode objects from the file
+    """
+    microcodes = {}
+    license_text = []
+    date = ''
+    name = None
+    for fname in fname_list:
+        name = os.path.basename(fname).lower()
+        name = os.path.splitext(name)[0]
+        data = []
+        with open(fname) as fd:
+            license_start = False
+            license_end = False
+            for line in fd:
+                line = line.rstrip()
+
+                if len(line) >= 2:
+                    if line[0] == '/' and line[1] == '*':
+                        license_start = True
+                        continue
+                    if line[0] == '*' and line[1] == '/':
+                        license_end = True
+                        continue
+                if license_start and not license_end:
+                    # Ignore blank line
+                    if len(line) > 0:
+                        license_text.append(line)
+                    continue
+                # Omit anything after the last comma
+                words = line.split(',')[:-1]
+                data += [word + ',' for word in words]
+        microcodes[name] = Microcode(name, data)
+    return date, license_text, microcodes
+
+
+def List(date, microcodes, model):
+    """List the available microcode chunks
+
+    Args:
+        date:           Date of the microcode file
+        microcodes:     Dict of Microcode objects indexed by name
+        model:          Model string to search for, or None
+    """
+    print 'Date: %s' % date
+    if model:
+        mcode_list, tried = FindMicrocode(microcodes, model.lower())
+        print 'Matching models %s:' % (', '.join(tried))
+    else:
+        print 'All models:'
+        mcode_list = [microcodes[m] for m in microcodes.keys()]
+    for mcode in mcode_list:
+        print '%-20s: model %s' % (mcode.name, mcode.model)
+
+def FindMicrocode(microcodes, model):
+    """Find all the microcode chunks which match the given model.
+
+    This model is something like 306a9 (the value returned in eax from
+    cpuid(1) when running on Intel CPUs). But we allow a partial match,
+    omitting the last 1 or two characters to allow many families to have the
+    same microcode.
+
+    If the model name is ambiguous we return a list of matches.
+
+    Args:
+        microcodes: Dict of Microcode objects indexed by name
+        model:      String containing model name to find
+    Returns:
+        Tuple:
+            List of matching Microcode objects
+            List of abbreviations we tried
+    """
+    # Allow a full name to be used
+    mcode = microcodes.get(model)
+    if mcode:
+        return [mcode], []
+
+    tried = []
+    found = []
+    for i in range(3):
+        abbrev = model[:-i] if i else model
+        tried.append(abbrev)
+        for mcode in microcodes.values():
+            if mcode.model.startswith(abbrev):
+                found.append(mcode)
+        if found:
+            break
+    return found, tried
+
+def CreateFile(date, license_text, mcodes, outfile):
+    """Create a microcode file in U-Boot's .dtsi format
+
+    Args:
+        date:       String containing date of original microcode file
+        license:    List of text lines for the license file
+        mcodes:      Microcode objects to write (normally only 1)
+        outfile:    Filename to write to ('-' for stdout)
+    """
+    out = '''/*%s
+ * ---
+ * This is a device tree fragment. Use #include to add these properties to a
+ * node.
+ *
+ * Date: %s
+ */
+
+compatible = "intel,microcode";
+intel,header-version = <%d>;
+intel,update-revision = <%#x>;
+intel,date-code = <%#x>;
+intel,processor-signature = <%#x>;
+intel,checksum = <%#x>;
+intel,loader-revision = <%d>;
+intel,processor-flags = <%#x>;
+
+/* The first 48-bytes are the public header which repeats the above data */
+data = <%s
+\t>;'''
+    words = ''
+    add_comments = len(mcodes) > 1
+    for mcode in mcodes:
+        if add_comments:
+            words += '\n/* %s */' % mcode.name
+        for i in range(len(mcode.words)):
+            if not (i & 3):
+                words += '\n'
+            val = mcode.words[i]
+            # Change each word so it will be little-endian in the FDT
+            # This data is needed before RAM is available on some platforms so
+            # we cannot do an endianness swap on boot.
+            val = struct.unpack("<I", struct.pack(">I", val))[0]
+            words += '\t%#010x' % val
+
+    # Use the first microcode for the headers
+    mcode = mcodes[0]
+
+    # Take care to avoid adding a space before a tab
+    text = ''
+    for line in license_text:
+        if line[0] == '\t':
+            text += '\n *' + line
+        else:
+            text += '\n * ' + line
+    args = [text, date]
+    args += [mcode.words[i] for i in range(7)]
+    args.append(words)
+    if outfile == '-':
+        print out % tuple(args)
+    else:
+        if not outfile:
+            if not os.path.exists(MICROCODE_DIR):
+                print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR
+                os.makedirs(MICROCODE_DIR)
+            outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi')
+        print >> sys.stderr, "Writing microcode for '%s' to '%s'" % (
+                ', '.join([mcode.name for mcode in mcodes]), outfile)
+        with open(outfile, 'w') as fd:
+            print >> fd, out % tuple(args)
+
+def MicrocodeTool():
+    """Run the microcode tool"""
+    commands = 'create,license,list'.split(',')
+    parser = OptionParser()
+    parser.add_option('-d', '--mcfile', type='string', action='store',
+                    help='Name of microcode.dat file')
+    parser.add_option('-H', '--headerfile', type='string', action='append',
+                    help='Name of .h file containing microcode')
+    parser.add_option('-m', '--model', type='string', action='store',
+                    help="Model name to extract ('all' for all)")
+    parser.add_option('-M', '--multiple', type='string', action='store',
+                    help="Allow output of multiple models")
+    parser.add_option('-o', '--outfile', type='string', action='store',
+                    help='Filename to use for output (- for stdout), default is'
+                    ' %s/<name>.dtsi' % MICROCODE_DIR)
+    parser.usage += """ command
+
+    Process an Intel microcode file (use -h for help). Commands:
+
+       create     Create microcode .dtsi file for a model
+       list       List available models in microcode file
+       license    Print the license
+
+    Typical usage:
+
+       ./tools/microcode-tool -d microcode.dat -m 306a create
+
+    This will find the appropriate file and write it to %s.""" % MICROCODE_DIR
+
+    (options, args) = parser.parse_args()
+    if not args:
+        parser.error('Please specify a command')
+    cmd = args[0]
+    if cmd not in commands:
+        parser.error("Unknown command '%s'" % cmd)
+
+    if (not not options.mcfile) != (not not options.mcfile):
+        parser.error("You must specify either header files or a microcode file, not both")
+    if options.headerfile:
+        date, license_text, microcodes = ParseHeaderFiles(options.headerfile)
+    elif options.mcfile:
+        date, license_text, microcodes = ParseFile(options.mcfile)
+    else:
+        parser.error('You must specify a microcode file (or header files)')
+
+    if cmd == 'list':
+        List(date, microcodes, options.model)
+    elif cmd == 'license':
+        print '\n'.join(license_text)
+    elif cmd == 'create':
+        if not options.model:
+            parser.error('You must specify a model to create')
+        model = options.model.lower()
+        if options.model == 'all':
+            options.multiple = True
+            mcode_list = microcodes.values()
+            tried = []
+        else:
+            mcode_list, tried = FindMicrocode(microcodes, model)
+        if not mcode_list:
+            parser.error("Unknown model '%s' (%s) - try 'list' to list" %
+                        (model, ', '.join(tried)))
+        if not options.multiple and len(mcode_list) > 1:
+            parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' "
+                        "to list or specify a particular file" %
+                        (model, ', '.join(tried),
+                        ', '.join([m.name for m in mcode_list])))
+        CreateFile(date, license_text, mcode_list, options.outfile)
+    else:
+        parser.error("Unknown command '%s'" % cmd)
+
+if __name__ == "__main__":
+    MicrocodeTool()
diff --git a/tools/u-boot-tools/mingw_support.c b/tools/u-boot-tools/mingw_support.c
new file mode 100644
index 0000000000000000000000000000000000000000..2b17bf7dd28b6f78d4a1d81aea69fc3f800a8e05
--- /dev/null
+++ b/tools/u-boot-tools/mingw_support.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2008 Extreme Engineering Solutions, Inc.
+ *
+ * mmap/munmap implementation derived from:
+ * Clamav Native Windows Port : mmap win32 compatibility layer
+ * Copyright (c) 2005-2006 Gianluigi Tiesi <sherpya@netfarm.it>
+ * Parts by Kees Zeelenberg <kzlg@users.sourceforge.net> (LibGW32C)
+ */
+
+#include "mingw_support.h"
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <io.h>
+
+int fsync(int fd)
+{
+	return _commit(fd);
+}
+
+void *mmap(void *addr, size_t len, int prot, int flags, int fd, int offset)
+{
+	void *map = NULL;
+	HANDLE handle = INVALID_HANDLE_VALUE;
+	DWORD cfm_flags = 0, mvf_flags = 0;
+
+	switch (prot) {
+	case PROT_READ | PROT_WRITE:
+		cfm_flags = PAGE_READWRITE;
+		mvf_flags = FILE_MAP_ALL_ACCESS;
+		break;
+	case PROT_WRITE:
+		cfm_flags = PAGE_READWRITE;
+		mvf_flags = FILE_MAP_WRITE;
+		break;
+	case PROT_READ:
+		cfm_flags = PAGE_READONLY;
+		mvf_flags = FILE_MAP_READ;
+		break;
+	default:
+		return MAP_FAILED;
+	}
+
+	handle = CreateFileMappingA((HANDLE) _get_osfhandle(fd), NULL,
+				cfm_flags, HIDWORD(len), LODWORD(len), NULL);
+	if (!handle)
+		return MAP_FAILED;
+
+	map = MapViewOfFile(handle, mvf_flags, HIDWORD(offset),
+			LODWORD(offset), len);
+	CloseHandle(handle);
+
+	if (!map)
+		return MAP_FAILED;
+
+	return map;
+}
+
+int munmap(void *addr, size_t len)
+{
+	if (!UnmapViewOfFile(addr))
+		return -1;
+
+	return 0;
+}
+
+/* Reentrant string tokenizer.  Generic version.
+   Copyright (C) 1991,1996-1999,2001,2004,2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+ */
+
+/* Parse S into tokens separated by characters in DELIM.
+   If S is NULL, the saved pointer in SAVE_PTR is used as
+   the next starting point.  For example:
+	char s[] = "-abc-=-def";
+	char *sp;
+	x = strtok_r(s, "-", &sp);	// x = "abc", sp = "=-def"
+	x = strtok_r(NULL, "-=", &sp);	// x = "def", sp = NULL
+	x = strtok_r(NULL, "=", &sp);	// x = NULL
+		// s = "abc\0-def\0"
+*/
+char *strtok_r(char *s, const char *delim, char **save_ptr)
+{
+	char *token;
+
+	if (s == NULL)
+		s = *save_ptr;
+
+	/* Scan leading delimiters.  */
+	s += strspn(s, delim);
+	if (*s == '\0') {
+		*save_ptr = s;
+		return NULL;
+	}
+
+	/* Find the end of the token.  */
+	token = s;
+	s = strpbrk (token, delim);
+	if (s == NULL) {
+		/* This token finishes the string.  */
+		*save_ptr = memchr(token, '\0', strlen(token));
+	} else {
+		/* Terminate the token and make *SAVE_PTR point past it.  */
+		*s = '\0';
+		*save_ptr = s + 1;
+	}
+	return token;
+}
+
+#include "getline.c"
diff --git a/tools/u-boot-tools/mingw_support.h b/tools/u-boot-tools/mingw_support.h
new file mode 100644
index 0000000000000000000000000000000000000000..e0b8ac3ebc9cd8cd31273f9d822bf80442763294
--- /dev/null
+++ b/tools/u-boot-tools/mingw_support.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: LGPL-2.0+ */
+/*
+ * Copyright 2008 Extreme Engineering Solutions, Inc.
+ */
+
+#ifndef __MINGW_SUPPORT_H_
+#define __WINGW_SUPPORT_H_	1
+
+/* Defining __INSIDE_MSYS__ helps to prevent u-boot/mingw overlap */
+#define __INSIDE_MSYS__	1
+
+#include <windows.h>
+
+/* mmap protections */
+#define PROT_READ	0x1		/* Page can be read */
+#define PROT_WRITE	0x2		/* Page can be written */
+#define PROT_EXEC	0x4		/* Page can be executed */
+#define PROT_NONE	0x0		/* Page can not be accessed */
+
+/* Sharing types (must choose one and only one of these) */
+#define MAP_SHARED	0x01		/* Share changes */
+#define MAP_PRIVATE	0x02		/* Changes are private */
+
+/* File perms */
+#ifndef S_IRGRP
+# define S_IRGRP 0
+#endif
+#ifndef S_IWGRP
+# define S_IWGRP 0
+#endif
+
+/* Windows 64-bit access macros */
+#define LODWORD(x) ((DWORD)((DWORDLONG)(x)))
+#define HIDWORD(x) ((DWORD)(((DWORDLONG)(x) >> 32) & 0xffffffff))
+
+typedef	UINT	uint;
+typedef	ULONG	ulong;
+
+int fsync(int fd);
+void *mmap(void *, size_t, int, int, int, int);
+int munmap(void *, size_t);
+char *strtok_r(char *s, const char *delim, char **save_ptr);
+#include "getline.h"
+
+#endif /* __MINGW_SUPPORT_H_ */
diff --git a/tools/u-boot-tools/mips-relocs.c b/tools/u-boot-tools/mips-relocs.c
new file mode 100644
index 0000000000000000000000000000000000000000..625258085b608b17a54342db5ee90affd89f0ca0
--- /dev/null
+++ b/tools/u-boot-tools/mips-relocs.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPS Relocation Data Generator
+ *
+ * Copyright (c) 2017 Imagination Technologies Ltd.
+ */
+
+#include <assert.h>
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <asm/relocs.h>
+
+#define hdr_field(pfx, idx, field) ({				\
+	uint64_t _val;						\
+	unsigned int _size;					\
+								\
+	if (is_64) {						\
+		_val = pfx##hdr64[idx].field;			\
+		_size = sizeof(pfx##hdr64[0].field);		\
+	} else {						\
+		_val = pfx##hdr32[idx].field;			\
+		_size = sizeof(pfx##hdr32[0].field);		\
+	}							\
+								\
+	switch (_size) {					\
+	case 1:							\
+		break;						\
+	case 2:							\
+		_val = is_be ? be16toh(_val) : le16toh(_val);	\
+		break;						\
+	case 4:							\
+		_val = is_be ? be32toh(_val) : le32toh(_val);	\
+		break;						\
+	case 8:							\
+		_val = is_be ? be64toh(_val) : le64toh(_val);	\
+		break;						\
+	}							\
+								\
+	_val;							\
+})
+
+#define set_hdr_field(pfx, idx, field, val) ({			\
+	uint64_t _val;						\
+	unsigned int _size;					\
+								\
+	if (is_64)						\
+		_size = sizeof(pfx##hdr64[0].field);		\
+	else							\
+		_size = sizeof(pfx##hdr32[0].field);		\
+								\
+	switch (_size) {					\
+	case 1:							\
+		_val = val;					\
+		break;						\
+	case 2:							\
+		_val = is_be ? htobe16(val) : htole16(val);	\
+		break;						\
+	case 4:							\
+		_val = is_be ? htobe32(val) : htole32(val);	\
+		break;						\
+	case 8:							\
+		_val = is_be ? htobe64(val) : htole64(val);	\
+		break;						\
+	default:						\
+		/* We should never reach here */		\
+		_val = 0;					\
+		assert(0);					\
+		break;						\
+	}							\
+								\
+	if (is_64)						\
+		pfx##hdr64[idx].field = _val;			\
+	else							\
+		pfx##hdr32[idx].field = _val;			\
+})
+
+#define ehdr_field(field) \
+	hdr_field(e, 0, field)
+#define phdr_field(idx, field) \
+	hdr_field(p, idx, field)
+#define shdr_field(idx, field) \
+	hdr_field(s, idx, field)
+
+#define set_phdr_field(idx, field, val) \
+	set_hdr_field(p, idx, field, val)
+#define set_shdr_field(idx, field, val) \
+	set_hdr_field(s, idx, field, val)
+
+#define shstr(idx) (&shstrtab[idx])
+
+bool is_64, is_be;
+uint64_t text_base;
+
+struct mips_reloc {
+	uint8_t type;
+	uint64_t offset;
+} *relocs;
+size_t relocs_sz, relocs_idx;
+
+static int add_reloc(unsigned int type, uint64_t off)
+{
+	struct mips_reloc *new;
+	size_t new_sz;
+
+	switch (type) {
+	case R_MIPS_NONE:
+	case R_MIPS_LO16:
+	case R_MIPS_PC16:
+	case R_MIPS_HIGHER:
+	case R_MIPS_HIGHEST:
+	case R_MIPS_PC21_S2:
+	case R_MIPS_PC26_S2:
+		/* Skip these relocs */
+		return 0;
+
+	default:
+		break;
+	}
+
+	if (relocs_idx == relocs_sz) {
+		new_sz = relocs_sz ? relocs_sz * 2 : 128;
+		new = realloc(relocs, new_sz * sizeof(*relocs));
+		if (!new) {
+			fprintf(stderr, "Out of memory\n");
+			return -ENOMEM;
+		}
+
+		relocs = new;
+		relocs_sz = new_sz;
+	}
+
+	relocs[relocs_idx++] = (struct mips_reloc){
+		.type = type,
+		.offset = off,
+	};
+
+	return 0;
+}
+
+static int parse_mips32_rel(const void *_rel)
+{
+	const Elf32_Rel *rel = _rel;
+	uint32_t off, type;
+
+	off = is_be ? be32toh(rel->r_offset) : le32toh(rel->r_offset);
+	off -= text_base;
+
+	type = is_be ? be32toh(rel->r_info) : le32toh(rel->r_info);
+	type = ELF32_R_TYPE(type);
+
+	return add_reloc(type, off);
+}
+
+static int parse_mips64_rela(const void *_rel)
+{
+	const Elf64_Rela *rel = _rel;
+	uint64_t off, type;
+
+	off = is_be ? be64toh(rel->r_offset) : le64toh(rel->r_offset);
+	off -= text_base;
+
+	type = rel->r_info >> (64 - 8);
+
+	return add_reloc(type, off);
+}
+
+static void output_uint(uint8_t **buf, uint64_t val)
+{
+	uint64_t tmp;
+
+	do {
+		tmp = val & 0x7f;
+		val >>= 7;
+		tmp |= !!val << 7;
+		*(*buf)++ = tmp;
+	} while (val);
+}
+
+static int compare_relocs(const void *a, const void *b)
+{
+	const struct mips_reloc *ra = a, *rb = b;
+
+	return ra->offset - rb->offset;
+}
+
+int main(int argc, char *argv[])
+{
+	unsigned int i, j, i_rel_shdr, sh_type, sh_entsize, sh_entries;
+	size_t rel_size, rel_actual_size;
+	const char *shstrtab, *sh_name, *rel_pfx;
+	int (*parse_fn)(const void *rel);
+	uint8_t *buf_start, *buf;
+	const Elf32_Ehdr *ehdr32;
+	const Elf64_Ehdr *ehdr64;
+	uintptr_t sh_offset;
+	Elf32_Shdr *shdr32;
+	Elf64_Shdr *shdr64;
+	struct stat st;
+	int err, fd;
+	void *elf;
+	bool skip;
+
+	fd = open(argv[1], O_RDWR);
+	if (fd == -1) {
+		fprintf(stderr, "Unable to open input file %s\n", argv[1]);
+		err = errno;
+		goto out_ret;
+	}
+
+	err = fstat(fd, &st);
+	if (err) {
+		fprintf(stderr, "Unable to fstat() input file\n");
+		goto out_close_fd;
+	}
+
+	elf = mmap(NULL, st.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	if (elf == MAP_FAILED) {
+		fprintf(stderr, "Unable to mmap() input file\n");
+		err = errno;
+		goto out_close_fd;
+	}
+
+	ehdr32 = elf;
+	ehdr64 = elf;
+
+	if (memcmp(&ehdr32->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
+		fprintf(stderr, "Input file is not an ELF\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	if (ehdr32->e_ident[EI_VERSION] != EV_CURRENT) {
+		fprintf(stderr, "Unrecognised ELF version\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	switch (ehdr32->e_ident[EI_CLASS]) {
+	case ELFCLASS32:
+		is_64 = false;
+		break;
+	case ELFCLASS64:
+		is_64 = true;
+		break;
+	default:
+		fprintf(stderr, "Unrecognised ELF class\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	switch (ehdr32->e_ident[EI_DATA]) {
+	case ELFDATA2LSB:
+		is_be = false;
+		break;
+	case ELFDATA2MSB:
+		is_be = true;
+		break;
+	default:
+		fprintf(stderr, "Unrecognised ELF data encoding\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	if (ehdr_field(e_type) != ET_EXEC) {
+		fprintf(stderr, "Input ELF is not an executable\n");
+		printf("type 0x%lx\n", ehdr_field(e_type));
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	if (ehdr_field(e_machine) != EM_MIPS) {
+		fprintf(stderr, "Input ELF does not target MIPS\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	shdr32 = elf + ehdr_field(e_shoff);
+	shdr64 = elf + ehdr_field(e_shoff);
+	shstrtab = elf + shdr_field(ehdr_field(e_shstrndx), sh_offset);
+
+	i_rel_shdr = UINT_MAX;
+	for (i = 0; i < ehdr_field(e_shnum); i++) {
+		sh_name = shstr(shdr_field(i, sh_name));
+
+		if (!strcmp(sh_name, ".data.reloc")) {
+			i_rel_shdr = i;
+			continue;
+		}
+
+		if (!strcmp(sh_name, ".text")) {
+			text_base = shdr_field(i, sh_addr);
+			continue;
+		}
+	}
+	if (i_rel_shdr == UINT_MAX) {
+		fprintf(stderr, "Unable to find .rel section\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+	if (!text_base) {
+		fprintf(stderr, "Unable to find .text base address\n");
+		err = -EINVAL;
+		goto out_free_relocs;
+	}
+
+	rel_pfx = is_64 ? ".rela." : ".rel.";
+
+	for (i = 0; i < ehdr_field(e_shnum); i++) {
+		sh_type = shdr_field(i, sh_type);
+		if ((sh_type != SHT_REL) && (sh_type != SHT_RELA))
+			continue;
+
+		sh_name = shstr(shdr_field(i, sh_name));
+		if (strncmp(sh_name, rel_pfx, strlen(rel_pfx))) {
+			if (strcmp(sh_name, ".rel") && strcmp(sh_name, ".rel.dyn"))
+				fprintf(stderr, "WARNING: Unexpected reloc section name '%s'\n", sh_name);
+			continue;
+		}
+
+		/*
+		 * Skip reloc sections which either don't correspond to another
+		 * section in the ELF, or whose corresponding section isn't
+		 * loaded as part of the U-Boot binary (ie. doesn't have the
+		 * alloc flags set).
+		 */
+		skip = true;
+		for (j = 0; j < ehdr_field(e_shnum); j++) {
+			if (strcmp(&sh_name[strlen(rel_pfx) - 1], shstr(shdr_field(j, sh_name))))
+				continue;
+
+			skip = !(shdr_field(j, sh_flags) & SHF_ALLOC);
+			break;
+		}
+		if (skip)
+			continue;
+
+		sh_offset = shdr_field(i, sh_offset);
+		sh_entsize = shdr_field(i, sh_entsize);
+		sh_entries = shdr_field(i, sh_size) / sh_entsize;
+
+		if (sh_type == SHT_REL) {
+			if (is_64) {
+				fprintf(stderr, "REL-style reloc in MIPS64 ELF?\n");
+				err = -EINVAL;
+				goto out_free_relocs;
+			} else {
+				parse_fn = parse_mips32_rel;
+			}
+		} else {
+			if (is_64) {
+				parse_fn = parse_mips64_rela;
+			} else {
+				fprintf(stderr, "RELA-style reloc in MIPS32 ELF?\n");
+				err = -EINVAL;
+				goto out_free_relocs;
+			}
+		}
+
+		for (j = 0; j < sh_entries; j++) {
+			err = parse_fn(elf + sh_offset + (j * sh_entsize));
+			if (err)
+				goto out_free_relocs;
+		}
+	}
+
+	/* Sort relocs in ascending order of offset */
+	qsort(relocs, relocs_idx, sizeof(*relocs), compare_relocs);
+
+	/* Make reloc offsets relative to their predecessor */
+	for (i = relocs_idx - 1; i > 0; i--)
+		relocs[i].offset -= relocs[i - 1].offset;
+
+	/* Write the relocations to the .rel section */
+	buf = buf_start = elf + shdr_field(i_rel_shdr, sh_offset);
+	for (i = 0; i < relocs_idx; i++) {
+		output_uint(&buf, relocs[i].type);
+		output_uint(&buf, relocs[i].offset >> 2);
+	}
+
+	/* Write a terminating R_MIPS_NONE (0) */
+	output_uint(&buf, R_MIPS_NONE);
+
+	/* Ensure the relocs didn't overflow the .rel section */
+	rel_size = shdr_field(i_rel_shdr, sh_size);
+	rel_actual_size = buf - buf_start;
+	if (rel_actual_size > rel_size) {
+		fprintf(stderr, "Relocations overflow available space of 0x%lx (required 0x%lx)!\n",
+			rel_size, rel_actual_size);
+		fprintf(stderr, "Please adjust CONFIG_MIPS_RELOCATION_TABLE_SIZE to at least 0x%lx\n",
+			(rel_actual_size + 0x100) & ~0xFF);
+		err = -ENOMEM;
+		goto out_free_relocs;
+	}
+
+	/* Make sure data is written back to the file */
+	err = msync(elf, st.st_size, MS_SYNC);
+	if (err) {
+		fprintf(stderr, "Failed to msync: %d\n", errno);
+		goto out_free_relocs;
+	}
+
+out_free_relocs:
+	free(relocs);
+	munmap(elf, st.st_size);
+out_close_fd:
+	close(fd);
+out_ret:
+	return err;
+}
diff --git a/tools/u-boot-tools/mkenvimage b/tools/u-boot-tools/mkenvimage
new file mode 100755
index 0000000000000000000000000000000000000000..86bbcc097200eea97c69326c0ac58e93a01b0564
Binary files /dev/null and b/tools/u-boot-tools/mkenvimage differ
diff --git a/tools/u-boot-tools/mkenvimage.c b/tools/u-boot-tools/mkenvimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..75967d0c2d59667da31ada29bc3d7abb3e3b5acc
--- /dev/null
+++ b/tools/u-boot-tools/mkenvimage.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2011 Free Electrons
+ * David Wagner <david.wagner@free-electrons.com>
+ *
+ * Inspired from envcrc.c:
+ * (C) Copyright 2001
+ * Paolo Scaffardi, AIRVENT SAM s.p.a - RIMINI(ITALY), arsenio@tin.it
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include "compiler.h"
+#include <u-boot/crc.h>
+#include <version.h>
+
+#define CRC_SIZE sizeof(uint32_t)
+
+static void usage(const char *exec_name)
+{
+	fprintf(stderr, "%s [-h] [-r] [-b] [-p <byte>] -s <environment partition size> -o <output> <input file>\n"
+	       "\n"
+	       "This tool takes a key=value input file (same as would a `printenv' show) and generates the corresponding environment image, ready to be flashed.\n"
+	       "\n"
+	       "\tThe input file is in format:\n"
+	       "\t\tkey1=value1\n"
+	       "\t\tkey2=value2\n"
+	       "\t\t...\n"
+	       "\tEmpty lines are skipped, and lines with a # in the first\n"
+	       "\tcolumn are treated as comments (also skipped).\n"
+	       "\t-r : the environment has multiple copies in flash\n"
+	       "\t-b : the target is big endian (default is little endian)\n"
+	       "\t-p <byte> : fill the image with <byte> bytes instead of 0xff bytes\n"
+	       "\t-V : print version information and exit\n"
+	       "\n"
+	       "If the input file is \"-\", data is read from standard input\n",
+	       exec_name);
+}
+
+long int xstrtol(const char *s)
+{
+	long int tmp;
+
+	errno = 0;
+	tmp = strtol(s, NULL, 0);
+	if (!errno)
+		return tmp;
+
+	if (errno == ERANGE)
+		fprintf(stderr, "Bad integer format: %s\n",  s);
+	else
+		fprintf(stderr, "Error while parsing %s: %s\n", s,
+				strerror(errno));
+
+	exit(EXIT_FAILURE);
+}
+
+int main(int argc, char **argv)
+{
+	uint32_t crc, targetendian_crc;
+	const char *txt_filename = NULL, *bin_filename = NULL;
+	int txt_fd, bin_fd;
+	unsigned char *dataptr, *envptr;
+	unsigned char *filebuf = NULL;
+	unsigned int filesize = 0, envsize = 0, datasize = 0;
+	int bigendian = 0;
+	int redundant = 0;
+	unsigned char padbyte = 0xff;
+
+	int option;
+	int ret = EXIT_SUCCESS;
+
+	struct stat txt_file_stat;
+
+	int fp, ep;
+	const char *prg;
+
+	prg = basename(argv[0]);
+
+	/* Turn off getopt()'s internal error message */
+	opterr = 0;
+
+	/* Parse the cmdline */
+	while ((option = getopt(argc, argv, ":s:o:rbp:hV")) != -1) {
+		switch (option) {
+		case 's':
+			datasize = xstrtol(optarg);
+			break;
+		case 'o':
+			bin_filename = strdup(optarg);
+			if (!bin_filename) {
+				fprintf(stderr, "Can't strdup() the output filename\n");
+				return EXIT_FAILURE;
+			}
+			break;
+		case 'r':
+			redundant = 1;
+			break;
+		case 'b':
+			bigendian = 1;
+			break;
+		case 'p':
+			padbyte = xstrtol(optarg);
+			break;
+		case 'h':
+			usage(prg);
+			return EXIT_SUCCESS;
+		case 'V':
+			printf("%s version %s\n", prg, PLAIN_VERSION);
+			return EXIT_SUCCESS;
+		case ':':
+			fprintf(stderr, "Missing argument for option -%c\n",
+				optopt);
+			usage(prg);
+			return EXIT_FAILURE;
+		default:
+			fprintf(stderr, "Wrong option -%c\n", optopt);
+			usage(prg);
+			return EXIT_FAILURE;
+		}
+	}
+
+	/* Check datasize and allocate the data */
+	if (datasize == 0) {
+		fprintf(stderr, "Please specify the size of the environment partition.\n");
+		usage(prg);
+		return EXIT_FAILURE;
+	}
+
+	dataptr = malloc(datasize * sizeof(*dataptr));
+	if (!dataptr) {
+		fprintf(stderr, "Can't alloc %d bytes for dataptr.\n",
+				datasize);
+		return EXIT_FAILURE;
+	}
+
+	/*
+	 * envptr points to the beginning of the actual environment (after the
+	 * crc and possible `redundant' byte
+	 */
+	envsize = datasize - (CRC_SIZE + redundant);
+	envptr = dataptr + CRC_SIZE + redundant;
+
+	/* Pad the environment with the padding byte */
+	memset(envptr, padbyte, envsize);
+
+	/* Open the input file ... */
+	if (optind >= argc || strcmp(argv[optind], "-") == 0) {
+		int readbytes = 0;
+		int readlen = sizeof(*envptr) * 4096;
+		txt_fd = STDIN_FILENO;
+
+		do {
+			filebuf = realloc(filebuf, filesize + readlen);
+			if (!filebuf) {
+				fprintf(stderr, "Can't realloc memory for the input file buffer\n");
+				return EXIT_FAILURE;
+			}
+			readbytes = read(txt_fd, filebuf + filesize, readlen);
+			if (readbytes < 0) {
+				fprintf(stderr, "Error while reading stdin: %s\n",
+						strerror(errno));
+				return EXIT_FAILURE;
+			}
+			filesize += readbytes;
+		} while (readbytes == readlen);
+
+	} else {
+		txt_filename = argv[optind];
+		txt_fd = open(txt_filename, O_RDONLY);
+		if (txt_fd == -1) {
+			fprintf(stderr, "Can't open \"%s\": %s\n",
+					txt_filename, strerror(errno));
+			return EXIT_FAILURE;
+		}
+		/* ... and check it */
+		ret = fstat(txt_fd, &txt_file_stat);
+		if (ret == -1) {
+			fprintf(stderr, "Can't stat() on \"%s\": %s\n",
+					txt_filename, strerror(errno));
+			return EXIT_FAILURE;
+		}
+
+		filesize = txt_file_stat.st_size;
+
+		filebuf = mmap(NULL, sizeof(*envptr) * filesize, PROT_READ,
+			       MAP_PRIVATE, txt_fd, 0);
+		if (filebuf == MAP_FAILED) {
+			fprintf(stderr, "mmap (%zu bytes) failed: %s\n",
+					sizeof(*envptr) * filesize,
+					strerror(errno));
+			fprintf(stderr, "Falling back to read()\n");
+
+			filebuf = malloc(sizeof(*envptr) * filesize);
+			ret = read(txt_fd, filebuf, sizeof(*envptr) * filesize);
+			if (ret != sizeof(*envptr) * filesize) {
+				fprintf(stderr, "Can't read the whole input file (%zu bytes): %s\n",
+					sizeof(*envptr) * filesize,
+					strerror(errno));
+
+				return EXIT_FAILURE;
+			}
+		}
+		ret = close(txt_fd);
+	}
+
+	/* Parse a byte at time until reaching the file OR until the environment fills
+	 * up. Check ep against envsize - 1 to allow for extra trailing '\0'. */
+	for (fp = 0, ep = 0 ; fp < filesize && ep < envsize - 1; fp++) {
+		if (filebuf[fp] == '\n') {
+			if (fp == 0 || filebuf[fp-1] == '\n') {
+				/*
+				 * Skip empty lines.
+				 */
+				continue;
+			} else if (filebuf[fp-1] == '\\') {
+				/*
+				 * Embedded newline in a variable.
+				 *
+				 * The backslash was added to the envptr; rewind
+				 * and replace it with a newline
+				 */
+				ep--;
+				envptr[ep++] = '\n';
+			} else {
+				/* End of a variable */
+				envptr[ep++] = '\0';
+			}
+		} else if ((fp == 0 || filebuf[fp-1] == '\n') && filebuf[fp] == '#') {
+			/* Comment, skip the line. */
+			while (++fp < filesize && filebuf[fp] != '\n')
+			continue;
+		} else {
+			envptr[ep++] = filebuf[fp];
+		}
+	}
+	/* If there are more bytes in the file still, it means the env filled up
+	 * before parsing the whole file.  Eat comments & whitespace here to see if
+	 * there was anything meaning full left in the file, and if so, throw a error
+	 * and exit. */
+	for( ; fp < filesize; fp++ )
+	{
+		if (filebuf[fp] == '\n') {
+			if (fp == 0 || filebuf[fp-1] == '\n') {
+				/* Ignore blank lines */
+				continue;
+			}
+		} else if ((fp == 0 || filebuf[fp-1] == '\n') && filebuf[fp] == '#') {
+			while (++fp < filesize && filebuf[fp] != '\n')
+			continue;
+		} else {
+			fprintf(stderr, "The environment file is too large for the target environment storage\n");
+			return EXIT_FAILURE;
+		}
+	}
+	/*
+	 * Make sure there is a final '\0'
+	 * And do it again on the next byte to mark the end of the environment.
+	 */
+	if (envptr[ep-1] != '\0') {
+		envptr[ep++] = '\0';
+		/*
+		 * The text file doesn't have an ending newline.  We need to
+		 * check the env size again to make sure we have room for two \0
+		 */
+		if (ep >= envsize) {
+			fprintf(stderr, "The environment file is too large for the target environment storage\n");
+			return EXIT_FAILURE;
+		}
+		envptr[ep] = '\0';
+	} else {
+		envptr[ep] = '\0';
+	}
+
+	/* Computes the CRC and put it at the beginning of the data */
+	crc = crc32(0, envptr, envsize);
+	targetendian_crc = bigendian ? cpu_to_be32(crc) : cpu_to_le32(crc);
+
+	memcpy(dataptr, &targetendian_crc, sizeof(targetendian_crc));
+	if (redundant)
+		dataptr[sizeof(targetendian_crc)] = 1;
+
+	if (!bin_filename || strcmp(bin_filename, "-") == 0) {
+		bin_fd = STDOUT_FILENO;
+	} else {
+		bin_fd = creat(bin_filename, S_IRUSR | S_IWUSR | S_IRGRP |
+					     S_IWGRP);
+		if (bin_fd == -1) {
+			fprintf(stderr, "Can't open output file \"%s\": %s\n",
+					bin_filename, strerror(errno));
+			return EXIT_FAILURE;
+		}
+	}
+
+	if (write(bin_fd, dataptr, sizeof(*dataptr) * datasize) !=
+			sizeof(*dataptr) * datasize) {
+		fprintf(stderr, "write() failed: %s\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	ret = close(bin_fd);
+
+	return ret;
+}
diff --git a/tools/u-boot-tools/mkenvimage.o b/tools/u-boot-tools/mkenvimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..762c00f0575b2b809e547e76cc12fed51bf4ecef
Binary files /dev/null and b/tools/u-boot-tools/mkenvimage.o differ
diff --git a/tools/u-boot-tools/mkexynosspl.c b/tools/u-boot-tools/mkexynosspl.c
new file mode 100644
index 0000000000000000000000000000000000000000..53122b8614a08ff39625e926204f43a7a182da08
--- /dev/null
+++ b/tools/u-boot-tools/mkexynosspl.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012 Samsung Electronics
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <compiler.h>
+
+#define CHECKSUM_OFFSET		(14*1024-4)
+#define FILE_PERM		(S_IRUSR | S_IWUSR | S_IRGRP \
+				| S_IWGRP | S_IROTH | S_IWOTH)
+/*
+ * Requirement for the fixed size SPL header:
+ * IROM code reads first (CHECKSUM_OFFSET + 4) bytes from boot device. It then
+ * calculates the checksum of CHECKSUM_OFFSET bytes and compares with data at
+ * CHECKSUM_OFFSET location.
+ *
+ * Requirement for the variable size SPL header:
+
+ * IROM code reads the below header to find out the size of the blob (total
+ * size, header size included) and its checksum. Then it reads the rest of the
+ * blob [i.e size - sizeof(struct var_size_header) bytes], calculates the
+ * checksum and compares it with value read from the header.
+ */
+struct var_size_header {
+	uint32_t spl_size;
+	uint32_t spl_checksum;
+	uint32_t reserved[2];
+};
+
+static const char *prog_name;
+
+static void write_to_file(int ofd, void *buffer, int size)
+{
+	if (write(ofd, buffer, size) == size)
+		return;
+
+	fprintf(stderr, "%s: Failed to write to output file: %s\n",
+		prog_name, strerror(errno));
+	exit(EXIT_FAILURE);
+}
+
+/*
+ * The argv is expected to include one optional parameter and two filenames:
+ * [--vs] IN OUT
+ *
+ * --vs - turns on the variable size SPL mode
+ * IN  - the u-boot SPL binary, usually u-boot-spl.bin
+ * OUT - the prepared SPL blob, usually ${BOARD}-spl.bin
+ *
+ * This utility first reads the "u-boot-spl.bin" into a buffer. In case of
+ * fixed size SPL the buffer size is exactly CHECKSUM_OFFSET (such that
+ * smaller u-boot-spl.bin gets padded with 0xff bytes, the larger than limit
+ * u-boot-spl.bin causes an error). For variable size SPL the buffer size is
+ * eqaul to size of the IN file.
+ *
+ * Then it calculates checksum of the buffer by just summing up all bytes.
+ * Then
+ *
+ * - for fixed size SPL the buffer is written into the output file and the
+ *   checksum is appended to the file in little endian format, which results
+ *   in checksum added exactly at CHECKSUM_OFFSET.
+ *
+ * - for variable size SPL the checksum and file size are stored in the
+ *   var_size_header structure (again, in little endian format) and the
+ *   structure is written into the output file. Then the buffer is written
+ *   into the output file.
+ */
+int main(int argc, char **argv)
+{
+	unsigned char *buffer;
+	int i, ifd, ofd;
+	uint32_t checksum = 0;
+	off_t	len;
+	int	var_size_flag, read_size, count;
+	struct stat stat;
+	const int if_index = argc - 2; /* Input file name index in argv. */
+	const int of_index = argc - 1; /* Output file name index in argv. */
+
+	/* Strip path off the program name. */
+	prog_name = strrchr(argv[0], '/');
+	if (prog_name)
+		prog_name++;
+	else
+		prog_name = argv[0];
+
+	if ((argc < 3) ||
+	    (argc > 4) ||
+	    ((argc == 4) && strcmp(argv[1], "--vs"))) {
+		fprintf(stderr, "Usage: %s [--vs] <infile> <outfile>\n",
+			prog_name);
+		exit(EXIT_FAILURE);
+	}
+
+	/* four args mean variable size SPL wrapper is required */
+	var_size_flag = (argc == 4);
+
+	ifd = open(argv[if_index], O_RDONLY);
+	if (ifd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			prog_name, argv[if_index], strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	ofd = open(argv[of_index], O_WRONLY | O_CREAT | O_TRUNC, FILE_PERM);
+	if (ofd < 0) {
+		fprintf(stderr, "%s: Can't open %s: %s\n",
+			prog_name, argv[of_index], strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	if (fstat(ifd, &stat)) {
+		fprintf(stderr, "%s: Unable to get size of %s: %s\n",
+			prog_name, argv[if_index], strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	len = stat.st_size;
+
+	if (var_size_flag) {
+		read_size = len;
+		count = len;
+	} else {
+		if (len > CHECKSUM_OFFSET) {
+			fprintf(stderr,
+				"%s: %s is too big (exceeds %d bytes)\n",
+				prog_name, argv[if_index], CHECKSUM_OFFSET);
+			exit(EXIT_FAILURE);
+		}
+		count = CHECKSUM_OFFSET;
+		read_size = len;
+	}
+
+	buffer = malloc(count);
+	if (!buffer) {
+		fprintf(stderr,
+			"%s: Failed to allocate %d bytes to store %s\n",
+			prog_name, count, argv[if_index]);
+		exit(EXIT_FAILURE);
+	}
+
+	if (read(ifd, buffer, read_size) != read_size) {
+		fprintf(stderr, "%s: Can't read %s: %s\n",
+			prog_name, argv[if_index], strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+
+	/* Pad if needed with 0xff to make flashing faster. */
+	if (read_size < count)
+		memset((char *)buffer + read_size, 0xff, count - read_size);
+
+	for (i = 0, checksum = 0; i < count; i++)
+		checksum += buffer[i];
+	checksum = cpu_to_le32(checksum);
+
+	if (var_size_flag) {
+		/* Prepare and write out the variable size SPL header. */
+		struct var_size_header vsh;
+		uint32_t spl_size;
+
+		memset(&vsh, 0, sizeof(vsh));
+		memcpy(&vsh.spl_checksum, &checksum, sizeof(checksum));
+
+		spl_size = cpu_to_le32(count + sizeof(struct var_size_header));
+		memcpy(&vsh.spl_size, &spl_size, sizeof(spl_size));
+		write_to_file(ofd, &vsh, sizeof(vsh));
+	}
+
+	write_to_file(ofd, buffer, count);
+
+	/* For fixed size SPL checksum is appended in the end. */
+	if (!var_size_flag)
+		write_to_file(ofd, &checksum, sizeof(checksum));
+
+	close(ifd);
+	close(ofd);
+	free(buffer);
+
+	return EXIT_SUCCESS;
+}
diff --git a/tools/u-boot-tools/mkimage b/tools/u-boot-tools/mkimage
new file mode 100755
index 0000000000000000000000000000000000000000..86dc186443460707274d2f5de515c083f57bfe97
Binary files /dev/null and b/tools/u-boot-tools/mkimage differ
diff --git a/tools/u-boot-tools/mkimage.c b/tools/u-boot-tools/mkimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..ea5ed542ab9c120776be927c8b66601562a55758
--- /dev/null
+++ b/tools/u-boot-tools/mkimage.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2008 Semihalf
+ *
+ * (C) Copyright 2000-2009
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ */
+
+#include "mkimage.h"
+#include "imximage.h"
+#include <image.h>
+#include <version.h>
+
+static void copy_file(int, const char *, int);
+
+/* parameters initialized by core will be used by the image type code */
+static struct image_tool_params params = {
+	.os = IH_OS_LINUX,
+	.arch = IH_ARCH_PPC,
+	.type = IH_TYPE_KERNEL,
+	.comp = IH_COMP_GZIP,
+	.dtc = MKIMAGE_DEFAULT_DTC_OPTIONS,
+	.imagename = "",
+	.imagename2 = "",
+};
+
+static enum ih_category cur_category;
+
+static int h_compare_category_name(const void *vtype1, const void *vtype2)
+{
+	const int *type1 = vtype1;
+	const int *type2 = vtype2;
+	const char *name1 = genimg_get_cat_short_name(cur_category, *type1);
+	const char *name2 = genimg_get_cat_short_name(cur_category, *type2);
+
+	return strcmp(name1, name2);
+}
+
+static int show_valid_options(enum ih_category category)
+{
+	int *order;
+	int count;
+	int item;
+	int i;
+
+	count = genimg_get_cat_count(category);
+	order = calloc(count, sizeof(*order));
+	if (!order)
+		return -ENOMEM;
+
+	/* Sort the names in order of short name for easier reading */
+	for (item = 0; item < count; item++)
+		order[item] = item;
+	cur_category = category;
+	qsort(order, count, sizeof(int), h_compare_category_name);
+
+	fprintf(stderr, "\nInvalid %s, supported are:\n",
+		genimg_get_cat_desc(category));
+	for (i = 0; i < count; i++) {
+		item = order[i];
+		fprintf(stderr, "\t%-15s  %s\n",
+			genimg_get_cat_short_name(category, item),
+			genimg_get_cat_name(category, item));
+	}
+	fprintf(stderr, "\n");
+	free(order);
+
+	return 0;
+}
+
+static void usage(const char *msg)
+{
+	fprintf(stderr, "Error: %s\n", msg);
+	fprintf(stderr, "Usage: %s -l image\n"
+			 "          -l ==> list image header information\n",
+		params.cmdname);
+	fprintf(stderr,
+		"       %s [-x] -A arch -O os -T type -C comp -a addr -e ep -n name -d data_file[:data_file...] image\n"
+		"          -A ==> set architecture to 'arch'\n"
+		"          -O ==> set operating system to 'os'\n"
+		"          -T ==> set image type to 'type'\n"
+		"          -C ==> set compression type 'comp'\n"
+		"          -a ==> set load address to 'addr' (hex)\n"
+		"          -e ==> set entry point to 'ep' (hex)\n"
+		"          -n ==> set image name to 'name'\n"
+		"          -d ==> use image data from 'datafile'\n"
+		"          -x ==> set XIP (execute in place)\n",
+		params.cmdname);
+	fprintf(stderr,
+		"       %s [-D dtc_options] [-f fit-image.its|-f auto|-F] [-b <dtb> [-b <dtb>]] [-i <ramdisk.cpio.gz>] fit-image\n"
+		"           <dtb> file is used with -f auto, it may occur multiple times.\n",
+		params.cmdname);
+	fprintf(stderr,
+		"          -D => set all options for device tree compiler\n"
+		"          -f => input filename for FIT source\n"
+		"          -i => input filename for ramdisk file\n");
+#ifdef CONFIG_FIT_SIGNATURE
+	fprintf(stderr,
+		"Signing / verified boot options: [-E] [-k keydir] [-K dtb] [ -c <comment>] [-p addr] [-r] [-N engine]\n"
+		"          -E => place data outside of the FIT structure\n"
+		"          -k => set directory containing private keys\n"
+		"          -K => write public keys to this .dtb file\n"
+		"          -c => add comment in signature node\n"
+		"          -F => re-sign existing FIT image\n"
+		"          -p => place external data at a static position\n"
+		"          -r => mark keys used as 'required' in dtb\n"
+		"          -N => engine to use for signing (pkcs11)\n");
+#else
+	fprintf(stderr,
+		"Signing / verified boot not supported (CONFIG_FIT_SIGNATURE undefined)\n");
+#endif
+	fprintf(stderr, "       %s -V ==> print version information and exit\n",
+		params.cmdname);
+	fprintf(stderr, "Use '-T list' to see a list of available image types\n");
+
+	exit(EXIT_FAILURE);
+}
+
+static int add_content(int type, const char *fname)
+{
+	struct content_info *cont;
+
+	cont = calloc(1, sizeof(*cont));
+	if (!cont)
+		return -1;
+	cont->type = type;
+	cont->fname = fname;
+	if (params.content_tail)
+		params.content_tail->next = cont;
+	else
+		params.content_head = cont;
+	params.content_tail = cont;
+
+	return 0;
+}
+
+static void process_args(int argc, char **argv)
+{
+	char *ptr;
+	int type = IH_TYPE_INVALID;
+	char *datafile = NULL;
+	int opt;
+
+	while ((opt = getopt(argc, argv,
+			     "a:A:b:c:C:d:D:e:Ef:Fk:i:K:ln:N:p:O:rR:qsT:vVx")) != -1) {
+		switch (opt) {
+		case 'a':
+			params.addr = strtoull(optarg, &ptr, 16);
+			if (*ptr) {
+				fprintf(stderr, "%s: invalid load address %s\n",
+					params.cmdname, optarg);
+				exit(EXIT_FAILURE);
+			}
+			break;
+		case 'A':
+			params.arch = genimg_get_arch_id(optarg);
+			if (params.arch < 0) {
+				show_valid_options(IH_ARCH);
+				usage("Invalid architecture");
+			}
+			break;
+		case 'b':
+			if (add_content(IH_TYPE_FLATDT, optarg)) {
+				fprintf(stderr,
+					"%s: Out of memory adding content '%s'",
+					params.cmdname, optarg);
+				exit(EXIT_FAILURE);
+			}
+			break;
+		case 'c':
+			params.comment = optarg;
+			break;
+		case 'C':
+			params.comp = genimg_get_comp_id(optarg);
+			if (params.comp < 0) {
+				show_valid_options(IH_COMP);
+				usage("Invalid compression type");
+			}
+			break;
+		case 'd':
+			params.datafile = optarg;
+			params.dflag = 1;
+			break;
+		case 'D':
+			params.dtc = optarg;
+			break;
+		case 'e':
+			params.ep = strtoull(optarg, &ptr, 16);
+			if (*ptr) {
+				fprintf(stderr, "%s: invalid entry point %s\n",
+					params.cmdname, optarg);
+				exit(EXIT_FAILURE);
+			}
+			params.eflag = 1;
+			break;
+		case 'E':
+			params.external_data = true;
+			break;
+		case 'f':
+			datafile = optarg;
+			params.auto_its = !strcmp(datafile, "auto");
+			/* no break */
+		case 'F':
+			/*
+			 * The flattened image tree (FIT) format
+			 * requires a flattened device tree image type
+			 */
+			params.type = IH_TYPE_FLATDT;
+			params.fflag = 1;
+			break;
+		case 'i':
+			params.fit_ramdisk = optarg;
+			break;
+		case 'k':
+			params.keydir = optarg;
+			break;
+		case 'K':
+			params.keydest = optarg;
+			break;
+		case 'l':
+			params.lflag = 1;
+			break;
+		case 'n':
+			params.imagename = optarg;
+			break;
+		case 'N':
+			params.engine_id = optarg;
+			break;
+		case 'O':
+			params.os = genimg_get_os_id(optarg);
+			if (params.os < 0) {
+				show_valid_options(IH_OS);
+				usage("Invalid operating system");
+			}
+			break;
+		case 'p':
+			params.external_offset = strtoull(optarg, &ptr, 16);
+			if (*ptr) {
+				fprintf(stderr, "%s: invalid offset size %s\n",
+					params.cmdname, optarg);
+				exit(EXIT_FAILURE);
+			}
+			break;
+		case 'q':
+			params.quiet = 1;
+			break;
+		case 'r':
+			params.require_keys = 1;
+			break;
+		case 'R':
+			/*
+			 * This entry is for the second configuration
+			 * file, if only one is not enough.
+			 */
+			params.imagename2 = optarg;
+			break;
+		case 's':
+			params.skipcpy = 1;
+			break;
+		case 'T':
+			if (strcmp(optarg, "list") == 0) {
+				show_valid_options(IH_TYPE);
+				exit(EXIT_SUCCESS);
+			}
+			type = genimg_get_type_id(optarg);
+			if (type < 0) {
+				show_valid_options(IH_TYPE);
+				usage("Invalid image type");
+			}
+			break;
+		case 'v':
+			params.vflag++;
+			break;
+		case 'V':
+			printf("mkimage version %s\n", PLAIN_VERSION);
+			exit(EXIT_SUCCESS);
+		case 'x':
+			params.xflag++;
+			break;
+		default:
+			usage("Invalid option");
+		}
+	}
+
+	/* The last parameter is expected to be the imagefile */
+	if (optind < argc)
+		params.imagefile = argv[optind];
+
+	/*
+	 * For auto-generated FIT images we need to know the image type to put
+	 * in the FIT, which is separate from the file's image type (which
+	 * will always be IH_TYPE_FLATDT in this case).
+	 */
+	if (params.type == IH_TYPE_FLATDT) {
+		params.fit_image_type = type ? type : IH_TYPE_KERNEL;
+		/* For auto_its, datafile is always 'auto' */
+		if (!params.auto_its)
+			params.datafile = datafile;
+		else if (!params.datafile)
+			usage("Missing data file for auto-FIT (use -d)");
+	} else if (type != IH_TYPE_INVALID) {
+		if (type == IH_TYPE_SCRIPT && !params.datafile)
+			usage("Missing data file for script (use -d)");
+		params.type = type;
+	}
+
+	if (!params.imagefile)
+		usage("Missing output filename");
+}
+
+int main(int argc, char **argv)
+{
+	int ifd = -1;
+	struct stat sbuf;
+	char *ptr;
+	int retval = 0;
+	struct image_type_params *tparams = NULL;
+	int pad_len = 0;
+	int dfd;
+	size_t map_len;
+
+	params.cmdname = *argv;
+	params.addr = 0;
+	params.ep = 0;
+
+	process_args(argc, argv);
+
+	/* set tparams as per input type_id */
+	tparams = imagetool_get_type(params.type);
+	if (tparams == NULL) {
+		fprintf (stderr, "%s: unsupported type %s\n",
+			params.cmdname, genimg_get_type_name(params.type));
+		exit (EXIT_FAILURE);
+	}
+
+	/*
+	 * check the passed arguments parameters meets the requirements
+	 * as per image type to be generated/listed
+	 */
+	if (tparams->check_params)
+		if (tparams->check_params (&params))
+			usage("Bad parameters for image type");
+
+	if (!params.eflag) {
+		params.ep = params.addr;
+		/* If XIP, entry point must be after the U-Boot header */
+		if (params.xflag)
+			params.ep += tparams->header_size;
+	}
+
+	if (params.fflag){
+		if (tparams->fflag_handle)
+			/*
+			 * in some cases, some additional processing needs
+			 * to be done if fflag is defined
+			 *
+			 * For ex. fit_handle_file for Fit file support
+			 */
+			retval = tparams->fflag_handle(&params);
+
+		if (retval != EXIT_SUCCESS)
+			exit (retval);
+	}
+
+	if (params.lflag || params.fflag) {
+		ifd = open (params.imagefile, O_RDONLY|O_BINARY);
+	} else {
+		ifd = open (params.imagefile,
+			O_RDWR|O_CREAT|O_TRUNC|O_BINARY, 0666);
+	}
+
+	if (ifd < 0) {
+		fprintf (stderr, "%s: Can't open %s: %s\n",
+			params.cmdname, params.imagefile,
+			strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	if (params.lflag || params.fflag) {
+		/*
+		 * list header information of existing image
+		 */
+		if (fstat(ifd, &sbuf) < 0) {
+			fprintf (stderr, "%s: Can't stat %s: %s\n",
+				params.cmdname, params.imagefile,
+				strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+
+		if ((unsigned)sbuf.st_size < tparams->header_size) {
+			fprintf (stderr,
+				"%s: Bad size: \"%s\" is not valid image\n",
+				params.cmdname, params.imagefile);
+			exit (EXIT_FAILURE);
+		}
+
+		ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, ifd, 0);
+		if (ptr == MAP_FAILED) {
+			fprintf (stderr, "%s: Can't read %s: %s\n",
+				params.cmdname, params.imagefile,
+				strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+
+		/*
+		 * scan through mkimage registry for all supported image types
+		 * and verify the input image file header for match
+		 * Print the image information for matched image type
+		 * Returns the error code if not matched
+		 */
+		retval = imagetool_verify_print_header(ptr, &sbuf,
+				tparams, &params);
+
+		(void) munmap((void *)ptr, sbuf.st_size);
+		(void) close (ifd);
+
+		exit (retval);
+	}
+
+	if ((params.type != IH_TYPE_MULTI) && (params.type != IH_TYPE_SCRIPT)) {
+		dfd = open(params.datafile, O_RDONLY | O_BINARY);
+		if (dfd < 0) {
+			fprintf(stderr, "%s: Can't open %s: %s\n",
+				params.cmdname, params.datafile,
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		if (fstat(dfd, &sbuf) < 0) {
+			fprintf(stderr, "%s: Can't stat %s: %s\n",
+				params.cmdname, params.datafile,
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		params.file_size = sbuf.st_size + tparams->header_size;
+		close(dfd);
+	}
+
+	/*
+	 * In case there an header with a variable
+	 * length will be added, the corresponding
+	 * function is called. This is responsible to
+	 * allocate memory for the header itself.
+	 */
+	if (tparams->vrec_header)
+		pad_len = tparams->vrec_header(&params, tparams);
+	else
+		memset(tparams->hdr, 0, tparams->header_size);
+
+	if (write(ifd, tparams->hdr, tparams->header_size)
+					!= tparams->header_size) {
+		fprintf (stderr, "%s: Write error on %s: %s\n",
+			params.cmdname, params.imagefile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	if (!params.skipcpy) {
+		if (params.type == IH_TYPE_MULTI ||
+		    params.type == IH_TYPE_SCRIPT) {
+			char *file = params.datafile;
+			uint32_t size;
+
+			for (;;) {
+				char *sep = NULL;
+
+				if (file) {
+					if ((sep = strchr(file, ':')) != NULL) {
+						*sep = '\0';
+					}
+
+					if (stat (file, &sbuf) < 0) {
+						fprintf (stderr, "%s: Can't stat %s: %s\n",
+							 params.cmdname, file, strerror(errno));
+						exit (EXIT_FAILURE);
+					}
+					size = cpu_to_uimage (sbuf.st_size);
+				} else {
+					size = 0;
+				}
+
+				if (write(ifd, (char *)&size, sizeof(size)) != sizeof(size)) {
+					fprintf (stderr, "%s: Write error on %s: %s\n",
+						 params.cmdname, params.imagefile,
+						 strerror(errno));
+					exit (EXIT_FAILURE);
+				}
+
+				if (!file) {
+					break;
+				}
+
+				if (sep) {
+					*sep = ':';
+					file = sep + 1;
+				} else {
+					file = NULL;
+				}
+			}
+
+			file = params.datafile;
+
+			for (;;) {
+				char *sep = strchr(file, ':');
+				if (sep) {
+					*sep = '\0';
+					copy_file (ifd, file, 1);
+					*sep++ = ':';
+					file = sep;
+				} else {
+					copy_file (ifd, file, 0);
+					break;
+				}
+			}
+		} else if (params.type == IH_TYPE_PBLIMAGE) {
+			/* PBL has special Image format, implements its' own */
+			pbl_load_uboot(ifd, &params);
+		} else if (params.type == IH_TYPE_ZYNQMPBIF) {
+			/* Image file is meta, walk through actual targets */
+			int ret;
+
+			ret = zynqmpbif_copy_image(ifd, &params);
+			if (ret)
+				return ret;
+		} else if (params.type == IH_TYPE_IMX8IMAGE) {
+			/* i.MX8/8X has special Image format */
+			int ret;
+
+			ret = imx8image_copy_image(ifd, &params);
+			if (ret)
+				return ret;
+		} else if (params.type == IH_TYPE_IMX8MIMAGE) {
+			/* i.MX8M has special Image format */
+			int ret;
+
+			ret = imx8mimage_copy_image(ifd, &params);
+			if (ret)
+				return ret;
+		} else {
+			copy_file(ifd, params.datafile, pad_len);
+		}
+		if (params.type == IH_TYPE_FIRMWARE_IVT) {
+			/* Add alignment and IVT */
+			uint32_t aligned_filesize = (params.file_size + 0x1000
+					- 1) & ~(0x1000 - 1);
+			flash_header_v2_t ivt_header = { { 0xd1, 0x2000, 0x40 },
+					params.addr, 0, 0, 0, params.addr
+							+ aligned_filesize
+							- tparams->header_size,
+					params.addr + aligned_filesize
+							- tparams->header_size
+							+ 0x20, 0 };
+			int i = params.file_size;
+			for (; i < aligned_filesize; i++) {
+				if (write(ifd, (char *) &i, 1) != 1) {
+					fprintf(stderr,
+							"%s: Write error on %s: %s\n",
+							params.cmdname,
+							params.imagefile,
+							strerror(errno));
+					exit(EXIT_FAILURE);
+				}
+			}
+			if (write(ifd, &ivt_header, sizeof(flash_header_v2_t))
+					!= sizeof(flash_header_v2_t)) {
+				fprintf(stderr, "%s: Write error on %s: %s\n",
+						params.cmdname,
+						params.imagefile,
+						strerror(errno));
+				exit(EXIT_FAILURE);
+			}
+		}
+	}
+
+	/* We're a bit of paranoid */
+#if defined(_POSIX_SYNCHRONIZED_IO) && \
+   !defined(__sun__) && \
+   !defined(__FreeBSD__) && \
+   !defined(__OpenBSD__) && \
+   !defined(__APPLE__)
+	(void) fdatasync (ifd);
+#else
+	(void) fsync (ifd);
+#endif
+
+	if (fstat(ifd, &sbuf) < 0) {
+		fprintf (stderr, "%s: Can't stat %s: %s\n",
+			params.cmdname, params.imagefile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+	params.file_size = sbuf.st_size;
+
+	map_len = sbuf.st_size;
+	ptr = mmap(0, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, ifd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf (stderr, "%s: Can't map %s: %s\n",
+			params.cmdname, params.imagefile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	/* Setup the image header as per input image type*/
+	if (tparams->set_header)
+		tparams->set_header (ptr, &sbuf, ifd, &params);
+	else {
+		fprintf (stderr, "%s: Can't set header for %s: %s\n",
+			params.cmdname, tparams->name, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	/* Print the image information by processing image header */
+	if (tparams->print_header)
+		tparams->print_header (ptr);
+	else {
+		fprintf (stderr, "%s: Can't print header for %s\n",
+			params.cmdname, tparams->name);
+	}
+
+	(void)munmap((void *)ptr, map_len);
+
+	/* We're a bit of paranoid */
+#if defined(_POSIX_SYNCHRONIZED_IO) && \
+   !defined(__sun__) && \
+   !defined(__FreeBSD__) && \
+   !defined(__OpenBSD__) && \
+   !defined(__APPLE__)
+	(void) fdatasync (ifd);
+#else
+	(void) fsync (ifd);
+#endif
+
+	if (close(ifd)) {
+		fprintf (stderr, "%s: Write error on %s: %s\n",
+			params.cmdname, params.imagefile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	exit (EXIT_SUCCESS);
+}
+
+static void
+copy_file (int ifd, const char *datafile, int pad)
+{
+	int dfd;
+	struct stat sbuf;
+	unsigned char *ptr;
+	int tail;
+	int zero = 0;
+	uint8_t zeros[4096];
+	int offset = 0;
+	int size;
+	struct image_type_params *tparams = imagetool_get_type(params.type);
+
+	memset(zeros, 0, sizeof(zeros));
+
+	if (params.vflag) {
+		fprintf (stderr, "Adding Image %s\n", datafile);
+	}
+
+	if ((dfd = open(datafile, O_RDONLY|O_BINARY)) < 0) {
+		fprintf (stderr, "%s: Can't open %s: %s\n",
+			params.cmdname, datafile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	if (fstat(dfd, &sbuf) < 0) {
+		fprintf (stderr, "%s: Can't stat %s: %s\n",
+			params.cmdname, datafile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	ptr = mmap(0, sbuf.st_size, PROT_READ, MAP_SHARED, dfd, 0);
+	if (ptr == MAP_FAILED) {
+		fprintf (stderr, "%s: Can't read %s: %s\n",
+			params.cmdname, datafile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	if (params.xflag) {
+		unsigned char *p = NULL;
+		/*
+		 * XIP: do not append the image_header_t at the
+		 * beginning of the file, but consume the space
+		 * reserved for it.
+		 */
+
+		if ((unsigned)sbuf.st_size < tparams->header_size) {
+			fprintf (stderr,
+				"%s: Bad size: \"%s\" is too small for XIP\n",
+				params.cmdname, datafile);
+			exit (EXIT_FAILURE);
+		}
+
+		for (p = ptr; p < ptr + tparams->header_size; p++) {
+			if ( *p != 0xff ) {
+				fprintf (stderr,
+					"%s: Bad file: \"%s\" has invalid buffer for XIP\n",
+					params.cmdname, datafile);
+				exit (EXIT_FAILURE);
+			}
+		}
+
+		offset = tparams->header_size;
+	}
+
+	size = sbuf.st_size - offset;
+	if (write(ifd, ptr + offset, size) != size) {
+		fprintf (stderr, "%s: Write error on %s: %s\n",
+			params.cmdname, params.imagefile, strerror(errno));
+		exit (EXIT_FAILURE);
+	}
+
+	tail = size % 4;
+	if ((pad == 1) && (tail != 0)) {
+
+		if (write(ifd, (char *)&zero, 4-tail) != 4-tail) {
+			fprintf (stderr, "%s: Write error on %s: %s\n",
+				params.cmdname, params.imagefile,
+				strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+	} else if (pad > 1) {
+		while (pad > 0) {
+			int todo = sizeof(zeros);
+
+			if (todo > pad)
+				todo = pad;
+			if (write(ifd, (char *)&zeros, todo) != todo) {
+				fprintf(stderr, "%s: Write error on %s: %s\n",
+					params.cmdname, params.imagefile,
+					strerror(errno));
+				exit(EXIT_FAILURE);
+			}
+			pad -= todo;
+		}
+	}
+
+	(void) munmap((void *)ptr, sbuf.st_size);
+	(void) close (dfd);
+}
diff --git a/tools/u-boot-tools/mkimage.h b/tools/u-boot-tools/mkimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..0254af59fbed9531d994907d91b4d80a7c2df93b
--- /dev/null
+++ b/tools/u-boot-tools/mkimage.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2000-2004
+ * DENX Software Engineering
+ * Wolfgang Denk, wd@denx.de
+ */
+
+#ifndef _MKIIMAGE_H_
+#define _MKIIMAGE_H_
+
+#include "os_support.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <u-boot/sha1.h>
+#include "fdt_host.h"
+#include "imagetool.h"
+
+#undef MKIMAGE_DEBUG
+
+#ifdef MKIMAGE_DEBUG
+#define debug(fmt,args...)	printf (fmt ,##args)
+#else
+#define debug(fmt,args...)
+#endif /* MKIMAGE_DEBUG */
+
+static inline void *map_sysmem(ulong paddr, unsigned long len)
+{
+	return (void *)(uintptr_t)paddr;
+}
+
+static inline ulong map_to_sysmem(void *ptr)
+{
+	return (ulong)(uintptr_t)ptr;
+}
+
+#define MKIMAGE_TMPFILE_SUFFIX		".tmp"
+#define MKIMAGE_MAX_TMPFILE_LEN		256
+#define MKIMAGE_DEFAULT_DTC_OPTIONS	"-I dts -O dtb -p 500"
+#define MKIMAGE_MAX_DTC_CMDLINE_LEN	512
+
+#endif /* _MKIIMAGE_H_ */
diff --git a/tools/u-boot-tools/mkimage.o b/tools/u-boot-tools/mkimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..65df7a86254df5be833db9acf07345e54ab0b7f8
Binary files /dev/null and b/tools/u-boot-tools/mkimage.o differ
diff --git a/tools/u-boot-tools/mksunxiboot.c b/tools/u-boot-tools/mksunxiboot.c
new file mode 100644
index 0000000000000000000000000000000000000000..1c8701e75edf8c126b512d0027a2f12f1339b4a7
--- /dev/null
+++ b/tools/u-boot-tools/mksunxiboot.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2007-2011
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * Tom Cubie <tangliang@allwinnertech.com>
+ *
+ * a simple tool to generate bootable image for sunxi platform.
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "../arch/arm/include/asm/arch-sunxi/spl.h"
+
+#define STAMP_VALUE                     0x5F0A6C39
+
+/* check sum functon from sun4i boot code */
+int gen_check_sum(struct boot_file_head *head_p)
+{
+	uint32_t length;
+	uint32_t *buf;
+	uint32_t loop;
+	uint32_t i;
+	uint32_t sum;
+
+	length = le32_to_cpu(head_p->length);
+	if ((length & 0x3) != 0)	/* must 4-byte-aligned */
+		return -1;
+	buf = (uint32_t *)head_p;
+	head_p->check_sum = cpu_to_le32(STAMP_VALUE);	/* fill stamp */
+	loop = length >> 2;
+
+	/* calculate the sum */
+	for (i = 0, sum = 0; i < loop; i++)
+		sum += le32_to_cpu(buf[i]);
+
+	/* write back check sum */
+	head_p->check_sum = cpu_to_le32(sum);
+
+	return 0;
+}
+
+#define ALIGN(x, a) __ALIGN_MASK((x), (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+
+#define SUNXI_SRAM_SIZE 0x8000	/* SoC with smaller size are limited before */
+#define SRAM_LOAD_MAX_SIZE (SUNXI_SRAM_SIZE - sizeof(struct boot_file_head))
+
+/*
+ * BROM (at least on A10 and A20) requires NAND-images to be explicitly aligned
+ * to a multiple of 8K, and rejects the image otherwise. MMC-images are fine
+ * with 512B blocks. To cater for both, align to the largest of the two.
+ */
+#define BLOCK_SIZE 0x2000
+
+struct boot_img {
+	struct boot_file_head header;
+	char code[SRAM_LOAD_MAX_SIZE];
+	char pad[BLOCK_SIZE];
+};
+
+int main(int argc, char *argv[])
+{
+	int fd_in, fd_out;
+	struct boot_img img;
+	unsigned file_size;
+	int count;
+	char *tool_name = argv[0];
+	char *default_dt = NULL;
+
+	/* a sanity check */
+	if ((sizeof(img.header) % 32) != 0) {
+		fprintf(stderr, "ERROR: the SPL header must be a multiple ");
+		fprintf(stderr, "of 32 bytes.\n");
+		return EXIT_FAILURE;
+	}
+
+	/* process optional command line switches */
+	while (argc >= 2 && argv[1][0] == '-') {
+		if (strcmp(argv[1], "--default-dt") == 0) {
+			if (argc >= 3) {
+				default_dt = argv[2];
+				argv += 2;
+				argc -= 2;
+				continue;
+			}
+			fprintf(stderr, "ERROR: no --default-dt arg\n");
+			return EXIT_FAILURE;
+		} else {
+			fprintf(stderr, "ERROR: bad option '%s'\n", argv[1]);
+			return EXIT_FAILURE;
+		}
+	}
+
+	if (argc < 3) {
+		printf("This program converts an input binary file to a sunxi bootable image.\n");
+		printf("\nUsage: %s [options] input_file output_file\n",
+		       tool_name);
+		printf("Where [options] may be:\n");
+		printf("  --default-dt arg         - 'arg' is the default device tree name\n");
+		printf("                             (CONFIG_DEFAULT_DEVICE_TREE).\n");
+		return EXIT_FAILURE;
+	}
+
+	fd_in = open(argv[1], O_RDONLY);
+	if (fd_in < 0) {
+		perror("Open input file");
+		return EXIT_FAILURE;
+	}
+
+	memset(&img, 0, sizeof(img));
+
+	/* get input file size */
+	file_size = lseek(fd_in, 0, SEEK_END);
+
+	if (file_size > SRAM_LOAD_MAX_SIZE) {
+		fprintf(stderr, "ERROR: File too large!\n");
+		return EXIT_FAILURE;
+	}
+
+	fd_out = open(argv[2], O_WRONLY | O_CREAT, 0666);
+	if (fd_out < 0) {
+		perror("Open output file");
+		return EXIT_FAILURE;
+	}
+
+	/* read file to buffer to calculate checksum */
+	lseek(fd_in, 0, SEEK_SET);
+	count = read(fd_in, img.code, file_size);
+	if (count != file_size) {
+		perror("Reading input image");
+		return EXIT_FAILURE;
+	}
+
+	/* fill the header */
+	img.header.b_instruction =	/* b instruction */
+		0xEA000000 |	/* jump to the first instr after the header */
+		((sizeof(struct boot_file_head) / sizeof(int) - 2)
+		 & 0x00FFFFFF);
+	memcpy(img.header.magic, BOOT0_MAGIC, 8);	/* no '0' termination */
+	img.header.length =
+		ALIGN(file_size + sizeof(struct boot_file_head), BLOCK_SIZE);
+	img.header.b_instruction = cpu_to_le32(img.header.b_instruction);
+	img.header.length = cpu_to_le32(img.header.length);
+
+	memcpy(img.header.spl_signature, SPL_SIGNATURE, 3); /* "sunxi" marker */
+	img.header.spl_signature[3] = SPL_HEADER_VERSION;
+
+	if (default_dt) {
+		if (strlen(default_dt) + 1 <= sizeof(img.header.string_pool)) {
+			strcpy((char *)img.header.string_pool, default_dt);
+			img.header.dt_name_offset =
+				cpu_to_le32(offsetof(struct boot_file_head,
+						     string_pool));
+		} else {
+			printf("WARNING: The SPL header is too small\n");
+			printf("         and has no space to store the dt name.\n");
+		}
+	}
+
+	gen_check_sum(&img.header);
+
+	count = write(fd_out, &img, le32_to_cpu(img.header.length));
+	if (count != le32_to_cpu(img.header.length)) {
+		perror("Writing output");
+		return EXIT_FAILURE;
+	}
+
+	close(fd_in);
+	close(fd_out);
+
+	return EXIT_SUCCESS;
+}
diff --git a/tools/u-boot-tools/moveconfig.py b/tools/u-boot-tools/moveconfig.py
new file mode 100755
index 0000000000000000000000000000000000000000..caa81ac2ed77af9e635c2b577e3838b314812cdc
--- /dev/null
+++ b/tools/u-boot-tools/moveconfig.py
@@ -0,0 +1,1885 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+#
+
+"""
+Move config options from headers to defconfig files.
+
+Since Kconfig was introduced to U-Boot, we have worked on moving
+config options from headers to Kconfig (defconfig).
+
+This tool intends to help this tremendous work.
+
+
+Usage
+-----
+
+First, you must edit the Kconfig to add the menu entries for the configs
+you are moving.
+
+And then run this tool giving CONFIG names you want to move.
+For example, if you want to move CONFIG_CMD_USB and CONFIG_SYS_TEXT_BASE,
+simply type as follows:
+
+  $ tools/moveconfig.py CONFIG_CMD_USB CONFIG_SYS_TEXT_BASE
+
+The tool walks through all the defconfig files and move the given CONFIGs.
+
+The log is also displayed on the terminal.
+
+The log is printed for each defconfig as follows:
+
+<defconfig_name>
+    <action1>
+    <action2>
+    <action3>
+    ...
+
+<defconfig_name> is the name of the defconfig.
+
+<action*> shows what the tool did for that defconfig.
+It looks like one of the following:
+
+ - Move 'CONFIG_... '
+   This config option was moved to the defconfig
+
+ - CONFIG_... is not defined in Kconfig.  Do nothing.
+   The entry for this CONFIG was not found in Kconfig.  The option is not
+   defined in the config header, either.  So, this case can be just skipped.
+
+ - CONFIG_... is not defined in Kconfig (suspicious).  Do nothing.
+   This option is defined in the config header, but its entry was not found
+   in Kconfig.
+   There are two common cases:
+     - You forgot to create an entry for the CONFIG before running
+       this tool, or made a typo in a CONFIG passed to this tool.
+     - The entry was hidden due to unmet 'depends on'.
+   The tool does not know if the result is reasonable, so please check it
+   manually.
+
+ - 'CONFIG_...' is the same as the define in Kconfig.  Do nothing.
+   The define in the config header matched the one in Kconfig.
+   We do not need to touch it.
+
+ - Compiler is missing.  Do nothing.
+   The compiler specified for this architecture was not found
+   in your PATH environment.
+   (If -e option is passed, the tool exits immediately.)
+
+ - Failed to process.
+   An error occurred during processing this defconfig.  Skipped.
+   (If -e option is passed, the tool exits immediately on error.)
+
+Finally, you will be asked, Clean up headers? [y/n]:
+
+If you say 'y' here, the unnecessary config defines are removed
+from the config headers (include/configs/*.h).
+It just uses the regex method, so you should not rely on it.
+Just in case, please do 'git diff' to see what happened.
+
+
+How does it work?
+-----------------
+
+This tool runs configuration and builds include/autoconf.mk for every
+defconfig.  The config options defined in Kconfig appear in the .config
+file (unless they are hidden because of unmet dependency.)
+On the other hand, the config options defined by board headers are seen
+in include/autoconf.mk.  The tool looks for the specified options in both
+of them to decide the appropriate action for the options.  If the given
+config option is found in the .config, but its value does not match the
+one from the board header, the config option in the .config is replaced
+with the define in the board header.  Then, the .config is synced by
+"make savedefconfig" and the defconfig is updated with it.
+
+For faster processing, this tool handles multi-threading.  It creates
+separate build directories where the out-of-tree build is run.  The
+temporary build directories are automatically created and deleted as
+needed.  The number of threads are chosen based on the number of the CPU
+cores of your system although you can change it via -j (--jobs) option.
+
+
+Toolchains
+----------
+
+Appropriate toolchain are necessary to generate include/autoconf.mk
+for all the architectures supported by U-Boot.  Most of them are available
+at the kernel.org site, some are not provided by kernel.org. This tool uses
+the same tools as buildman, so see that tool for setup (e.g. --fetch-arch).
+
+
+Tips and trips
+--------------
+
+To sync only X86 defconfigs:
+
+   ./tools/moveconfig.py -s -d <(grep -l X86 configs/*)
+
+or:
+
+   grep -l X86 configs/* | ./tools/moveconfig.py -s -d -
+
+To process CONFIG_CMD_FPGAD only for a subset of configs based on path match:
+
+   ls configs/{hrcon*,iocon*,strider*} | \
+       ./tools/moveconfig.py -Cy CONFIG_CMD_FPGAD -d -
+
+
+Finding implied CONFIGs
+-----------------------
+
+Some CONFIG options can be implied by others and this can help to reduce
+the size of the defconfig files. For example, CONFIG_X86 implies
+CONFIG_CMD_IRQ, so we can put 'imply CMD_IRQ' under 'config X86' and
+all x86 boards will have that option, avoiding adding CONFIG_CMD_IRQ to
+each of the x86 defconfig files.
+
+This tool can help find such configs. To use it, first build a database:
+
+    ./tools/moveconfig.py -b
+
+Then try to query it:
+
+    ./tools/moveconfig.py -i CONFIG_CMD_IRQ
+    CONFIG_CMD_IRQ found in 311/2384 defconfigs
+    44 : CONFIG_SYS_FSL_ERRATUM_IFC_A002769
+    41 : CONFIG_SYS_FSL_ERRATUM_A007075
+    31 : CONFIG_SYS_FSL_DDR_VER_44
+    28 : CONFIG_ARCH_P1010
+    28 : CONFIG_SYS_FSL_ERRATUM_P1010_A003549
+    28 : CONFIG_SYS_FSL_ERRATUM_SEC_A003571
+    28 : CONFIG_SYS_FSL_ERRATUM_IFC_A003399
+    25 : CONFIG_SYS_FSL_ERRATUM_A008044
+    22 : CONFIG_ARCH_P1020
+    21 : CONFIG_SYS_FSL_DDR_VER_46
+    20 : CONFIG_MAX_PIRQ_LINKS
+    20 : CONFIG_HPET_ADDRESS
+    20 : CONFIG_X86
+    20 : CONFIG_PCIE_ECAM_SIZE
+    20 : CONFIG_IRQ_SLOT_COUNT
+    20 : CONFIG_I8259_PIC
+    20 : CONFIG_CPU_ADDR_BITS
+    20 : CONFIG_RAMBASE
+    20 : CONFIG_SYS_FSL_ERRATUM_A005871
+    20 : CONFIG_PCIE_ECAM_BASE
+    20 : CONFIG_X86_TSC_TIMER
+    20 : CONFIG_I8254_TIMER
+    20 : CONFIG_CMD_GETTIME
+    19 : CONFIG_SYS_FSL_ERRATUM_A005812
+    18 : CONFIG_X86_RUN_32BIT
+    17 : CONFIG_CMD_CHIP_CONFIG
+    ...
+
+This shows a list of config options which might imply CONFIG_CMD_EEPROM along
+with how many defconfigs they cover. From this you can see that CONFIG_X86
+implies CONFIG_CMD_EEPROM. Therefore, instead of adding CONFIG_CMD_EEPROM to
+the defconfig of every x86 board, you could add a single imply line to the
+Kconfig file:
+
+    config X86
+        bool "x86 architecture"
+        ...
+        imply CMD_EEPROM
+
+That will cover 20 defconfigs. Many of the options listed are not suitable as
+they are not related. E.g. it would be odd for CONFIG_CMD_GETTIME to imply
+CMD_EEPROM.
+
+Using this search you can reduce the size of moveconfig patches.
+
+You can automatically add 'imply' statements in the Kconfig with the -a
+option:
+
+    ./tools/moveconfig.py -s -i CONFIG_SCSI \
+            -a CONFIG_ARCH_LS1021A,CONFIG_ARCH_LS1043A
+
+This will add 'imply SCSI' to the two CONFIG options mentioned, assuming that
+the database indicates that they do actually imply CONFIG_SCSI and do not
+already have an 'imply SCSI'.
+
+The output shows where the imply is added:
+
+   18 : CONFIG_ARCH_LS1021A       arch/arm/cpu/armv7/ls102xa/Kconfig:1
+   13 : CONFIG_ARCH_LS1043A       arch/arm/cpu/armv8/fsl-layerscape/Kconfig:11
+   12 : CONFIG_ARCH_LS1046A       arch/arm/cpu/armv8/fsl-layerscape/Kconfig:31
+
+The first number is the number of boards which can avoid having a special
+CONFIG_SCSI option in their defconfig file if this 'imply' is added.
+The location at the right is the Kconfig file and line number where the config
+appears. For example, adding 'imply CONFIG_SCSI' to the 'config ARCH_LS1021A'
+in arch/arm/cpu/armv7/ls102xa/Kconfig at line 1 will help 18 boards to reduce
+the size of their defconfig files.
+
+If you want to add an 'imply' to every imply config in the list, you can use
+
+    ./tools/moveconfig.py -s -i CONFIG_SCSI -a all
+
+To control which ones are displayed, use -I <list> where list is a list of
+options (use '-I help' to see possible options and their meaning).
+
+To skip showing you options that already have an 'imply' attached, use -A.
+
+When you have finished adding 'imply' options you can regenerate the
+defconfig files for affected boards with something like:
+
+    git show --stat | ./tools/moveconfig.py -s -d -
+
+This will regenerate only those defconfigs changed in the current commit.
+If you start with (say) 100 defconfigs being changed in the commit, and add
+a few 'imply' options as above, then regenerate, hopefully you can reduce the
+number of defconfigs changed in the commit.
+
+
+Available options
+-----------------
+
+ -c, --color
+   Surround each portion of the log with escape sequences to display it
+   in color on the terminal.
+
+ -C, --commit
+   Create a git commit with the changes when the operation is complete. A
+   standard commit message is used which may need to be edited.
+
+ -d, --defconfigs
+  Specify a file containing a list of defconfigs to move.  The defconfig
+  files can be given with shell-style wildcards. Use '-' to read from stdin.
+
+ -n, --dry-run
+   Perform a trial run that does not make any changes.  It is useful to
+   see what is going to happen before one actually runs it.
+
+ -e, --exit-on-error
+   Exit immediately if Make exits with a non-zero status while processing
+   a defconfig file.
+
+ -s, --force-sync
+   Do "make savedefconfig" forcibly for all the defconfig files.
+   If not specified, "make savedefconfig" only occurs for cases
+   where at least one CONFIG was moved.
+
+ -S, --spl
+   Look for moved config options in spl/include/autoconf.mk instead of
+   include/autoconf.mk.  This is useful for moving options for SPL build
+   because SPL related options (mostly prefixed with CONFIG_SPL_) are
+   sometimes blocked by CONFIG_SPL_BUILD ifdef conditionals.
+
+ -H, --headers-only
+   Only cleanup the headers; skip the defconfig processing
+
+ -j, --jobs
+   Specify the number of threads to run simultaneously.  If not specified,
+   the number of threads is the same as the number of CPU cores.
+
+ -r, --git-ref
+   Specify the git ref to clone for building the autoconf.mk. If unspecified
+   use the CWD. This is useful for when changes to the Kconfig affect the
+   default values and you want to capture the state of the defconfig from
+   before that change was in effect. If in doubt, specify a ref pre-Kconfig
+   changes (use HEAD if Kconfig changes are not committed). Worst case it will
+   take a bit longer to run, but will always do the right thing.
+
+ -v, --verbose
+   Show any build errors as boards are built
+
+ -y, --yes
+   Instead of prompting, automatically go ahead with all operations. This
+   includes cleaning up headers, CONFIG_SYS_EXTRA_OPTIONS, the config whitelist
+   and the README.
+
+To see the complete list of supported options, run
+
+  $ tools/moveconfig.py -h
+
+"""
+
+import collections
+import copy
+import difflib
+import filecmp
+import fnmatch
+import glob
+import multiprocessing
+import optparse
+import os
+import Queue
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+
+sys.path.append(os.path.join(os.path.dirname(__file__), 'buildman'))
+sys.path.append(os.path.join(os.path.dirname(__file__), 'patman'))
+import bsettings
+import kconfiglib
+import toolchain
+
+SHOW_GNU_MAKE = 'scripts/show-gnu-make'
+SLEEP_TIME=0.03
+
+STATE_IDLE = 0
+STATE_DEFCONFIG = 1
+STATE_AUTOCONF = 2
+STATE_SAVEDEFCONFIG = 3
+
+ACTION_MOVE = 0
+ACTION_NO_ENTRY = 1
+ACTION_NO_ENTRY_WARN = 2
+ACTION_NO_CHANGE = 3
+
+COLOR_BLACK        = '0;30'
+COLOR_RED          = '0;31'
+COLOR_GREEN        = '0;32'
+COLOR_BROWN        = '0;33'
+COLOR_BLUE         = '0;34'
+COLOR_PURPLE       = '0;35'
+COLOR_CYAN         = '0;36'
+COLOR_LIGHT_GRAY   = '0;37'
+COLOR_DARK_GRAY    = '1;30'
+COLOR_LIGHT_RED    = '1;31'
+COLOR_LIGHT_GREEN  = '1;32'
+COLOR_YELLOW       = '1;33'
+COLOR_LIGHT_BLUE   = '1;34'
+COLOR_LIGHT_PURPLE = '1;35'
+COLOR_LIGHT_CYAN   = '1;36'
+COLOR_WHITE        = '1;37'
+
+AUTO_CONF_PATH = 'include/config/auto.conf'
+CONFIG_DATABASE = 'moveconfig.db'
+
+CONFIG_LEN = len('CONFIG_')
+
+### helper functions ###
+def get_devnull():
+    """Get the file object of '/dev/null' device."""
+    try:
+        devnull = subprocess.DEVNULL # py3k
+    except AttributeError:
+        devnull = open(os.devnull, 'wb')
+    return devnull
+
+def check_top_directory():
+    """Exit if we are not at the top of source directory."""
+    for f in ('README', 'Licenses'):
+        if not os.path.exists(f):
+            sys.exit('Please run at the top of source directory.')
+
+def check_clean_directory():
+    """Exit if the source tree is not clean."""
+    for f in ('.config', 'include/config'):
+        if os.path.exists(f):
+            sys.exit("source tree is not clean, please run 'make mrproper'")
+
+def get_make_cmd():
+    """Get the command name of GNU Make.
+
+    U-Boot needs GNU Make for building, but the command name is not
+    necessarily "make". (for example, "gmake" on FreeBSD).
+    Returns the most appropriate command name on your system.
+    """
+    process = subprocess.Popen([SHOW_GNU_MAKE], stdout=subprocess.PIPE)
+    ret = process.communicate()
+    if process.returncode:
+        sys.exit('GNU Make not found')
+    return ret[0].rstrip()
+
+def get_matched_defconfig(line):
+    """Get the defconfig files that match a pattern
+
+    Args:
+        line: Path or filename to match, e.g. 'configs/snow_defconfig' or
+            'k2*_defconfig'. If no directory is provided, 'configs/' is
+            prepended
+
+    Returns:
+        a list of matching defconfig files
+    """
+    dirname = os.path.dirname(line)
+    if dirname:
+        pattern = line
+    else:
+        pattern = os.path.join('configs', line)
+    return glob.glob(pattern) + glob.glob(pattern + '_defconfig')
+
+def get_matched_defconfigs(defconfigs_file):
+    """Get all the defconfig files that match the patterns in a file.
+
+    Args:
+        defconfigs_file: File containing a list of defconfigs to process, or
+            '-' to read the list from stdin
+
+    Returns:
+        A list of paths to defconfig files, with no duplicates
+    """
+    defconfigs = []
+    if defconfigs_file == '-':
+        fd = sys.stdin
+        defconfigs_file = 'stdin'
+    else:
+        fd = open(defconfigs_file)
+    for i, line in enumerate(fd):
+        line = line.strip()
+        if not line:
+            continue # skip blank lines silently
+        if ' ' in line:
+            line = line.split(' ')[0]  # handle 'git log' input
+        matched = get_matched_defconfig(line)
+        if not matched:
+            print >> sys.stderr, "warning: %s:%d: no defconfig matched '%s'" % \
+                                                 (defconfigs_file, i + 1, line)
+
+        defconfigs += matched
+
+    # use set() to drop multiple matching
+    return [ defconfig[len('configs') + 1:]  for defconfig in set(defconfigs) ]
+
+def get_all_defconfigs():
+    """Get all the defconfig files under the configs/ directory."""
+    defconfigs = []
+    for (dirpath, dirnames, filenames) in os.walk('configs'):
+        dirpath = dirpath[len('configs') + 1:]
+        for filename in fnmatch.filter(filenames, '*_defconfig'):
+            defconfigs.append(os.path.join(dirpath, filename))
+
+    return defconfigs
+
+def color_text(color_enabled, color, string):
+    """Return colored string."""
+    if color_enabled:
+        # LF should not be surrounded by the escape sequence.
+        # Otherwise, additional whitespace or line-feed might be printed.
+        return '\n'.join([ '\033[' + color + 'm' + s + '\033[0m' if s else ''
+                           for s in string.split('\n') ])
+    else:
+        return string
+
+def show_diff(a, b, file_path, color_enabled):
+    """Show unidified diff.
+
+    Arguments:
+      a: A list of lines (before)
+      b: A list of lines (after)
+      file_path: Path to the file
+      color_enabled: Display the diff in color
+    """
+
+    diff = difflib.unified_diff(a, b,
+                                fromfile=os.path.join('a', file_path),
+                                tofile=os.path.join('b', file_path))
+
+    for line in diff:
+        if line[0] == '-' and line[1] != '-':
+            print color_text(color_enabled, COLOR_RED, line),
+        elif line[0] == '+' and line[1] != '+':
+            print color_text(color_enabled, COLOR_GREEN, line),
+        else:
+            print line,
+
+def extend_matched_lines(lines, matched, pre_patterns, post_patterns, extend_pre,
+                         extend_post):
+    """Extend matched lines if desired patterns are found before/after already
+    matched lines.
+
+    Arguments:
+      lines: A list of lines handled.
+      matched: A list of line numbers that have been already matched.
+               (will be updated by this function)
+      pre_patterns: A list of regular expression that should be matched as
+                    preamble.
+      post_patterns: A list of regular expression that should be matched as
+                     postamble.
+      extend_pre: Add the line number of matched preamble to the matched list.
+      extend_post: Add the line number of matched postamble to the matched list.
+    """
+    extended_matched = []
+
+    j = matched[0]
+
+    for i in matched:
+        if i == 0 or i < j:
+            continue
+        j = i
+        while j in matched:
+            j += 1
+        if j >= len(lines):
+            break
+
+        for p in pre_patterns:
+            if p.search(lines[i - 1]):
+                break
+        else:
+            # not matched
+            continue
+
+        for p in post_patterns:
+            if p.search(lines[j]):
+                break
+        else:
+            # not matched
+            continue
+
+        if extend_pre:
+            extended_matched.append(i - 1)
+        if extend_post:
+            extended_matched.append(j)
+
+    matched += extended_matched
+    matched.sort()
+
+def confirm(options, prompt):
+    if not options.yes:
+        while True:
+            choice = raw_input('{} [y/n]: '.format(prompt))
+            choice = choice.lower()
+            print choice
+            if choice == 'y' or choice == 'n':
+                break
+
+        if choice == 'n':
+            return False
+
+    return True
+
+def cleanup_one_header(header_path, patterns, options):
+    """Clean regex-matched lines away from a file.
+
+    Arguments:
+      header_path: path to the cleaned file.
+      patterns: list of regex patterns.  Any lines matching to these
+                patterns are deleted.
+      options: option flags.
+    """
+    with open(header_path) as f:
+        lines = f.readlines()
+
+    matched = []
+    for i, line in enumerate(lines):
+        if i - 1 in matched and lines[i - 1][-2:] == '\\\n':
+            matched.append(i)
+            continue
+        for pattern in patterns:
+            if pattern.search(line):
+                matched.append(i)
+                break
+
+    if not matched:
+        return
+
+    # remove empty #ifdef ... #endif, successive blank lines
+    pattern_if = re.compile(r'#\s*if(def|ndef)?\W') #  #if, #ifdef, #ifndef
+    pattern_elif = re.compile(r'#\s*el(if|se)\W')   #  #elif, #else
+    pattern_endif = re.compile(r'#\s*endif\W')      #  #endif
+    pattern_blank = re.compile(r'^\s*$')            #  empty line
+
+    while True:
+        old_matched = copy.copy(matched)
+        extend_matched_lines(lines, matched, [pattern_if],
+                             [pattern_endif], True, True)
+        extend_matched_lines(lines, matched, [pattern_elif],
+                             [pattern_elif, pattern_endif], True, False)
+        extend_matched_lines(lines, matched, [pattern_if, pattern_elif],
+                             [pattern_blank], False, True)
+        extend_matched_lines(lines, matched, [pattern_blank],
+                             [pattern_elif, pattern_endif], True, False)
+        extend_matched_lines(lines, matched, [pattern_blank],
+                             [pattern_blank], True, False)
+        if matched == old_matched:
+            break
+
+    tolines = copy.copy(lines)
+
+    for i in reversed(matched):
+        tolines.pop(i)
+
+    show_diff(lines, tolines, header_path, options.color)
+
+    if options.dry_run:
+        return
+
+    with open(header_path, 'w') as f:
+        for line in tolines:
+            f.write(line)
+
+def cleanup_headers(configs, options):
+    """Delete config defines from board headers.
+
+    Arguments:
+      configs: A list of CONFIGs to remove.
+      options: option flags.
+    """
+    if not confirm(options, 'Clean up headers?'):
+        return
+
+    patterns = []
+    for config in configs:
+        patterns.append(re.compile(r'#\s*define\s+%s\W' % config))
+        patterns.append(re.compile(r'#\s*undef\s+%s\W' % config))
+
+    for dir in 'include', 'arch', 'board':
+        for (dirpath, dirnames, filenames) in os.walk(dir):
+            if dirpath == os.path.join('include', 'generated'):
+                continue
+            for filename in filenames:
+                if not fnmatch.fnmatch(filename, '*~'):
+                    cleanup_one_header(os.path.join(dirpath, filename),
+                                       patterns, options)
+
+def cleanup_one_extra_option(defconfig_path, configs, options):
+    """Delete config defines in CONFIG_SYS_EXTRA_OPTIONS in one defconfig file.
+
+    Arguments:
+      defconfig_path: path to the cleaned defconfig file.
+      configs: A list of CONFIGs to remove.
+      options: option flags.
+    """
+
+    start = 'CONFIG_SYS_EXTRA_OPTIONS="'
+    end = '"\n'
+
+    with open(defconfig_path) as f:
+        lines = f.readlines()
+
+    for i, line in enumerate(lines):
+        if line.startswith(start) and line.endswith(end):
+            break
+    else:
+        # CONFIG_SYS_EXTRA_OPTIONS was not found in this defconfig
+        return
+
+    old_tokens = line[len(start):-len(end)].split(',')
+    new_tokens = []
+
+    for token in old_tokens:
+        pos = token.find('=')
+        if not (token[:pos] if pos >= 0 else token) in configs:
+            new_tokens.append(token)
+
+    if new_tokens == old_tokens:
+        return
+
+    tolines = copy.copy(lines)
+
+    if new_tokens:
+        tolines[i] = start + ','.join(new_tokens) + end
+    else:
+        tolines.pop(i)
+
+    show_diff(lines, tolines, defconfig_path, options.color)
+
+    if options.dry_run:
+        return
+
+    with open(defconfig_path, 'w') as f:
+        for line in tolines:
+            f.write(line)
+
+def cleanup_extra_options(configs, options):
+    """Delete config defines in CONFIG_SYS_EXTRA_OPTIONS in defconfig files.
+
+    Arguments:
+      configs: A list of CONFIGs to remove.
+      options: option flags.
+    """
+    if not confirm(options, 'Clean up CONFIG_SYS_EXTRA_OPTIONS?'):
+        return
+
+    configs = [ config[len('CONFIG_'):] for config in configs ]
+
+    defconfigs = get_all_defconfigs()
+
+    for defconfig in defconfigs:
+        cleanup_one_extra_option(os.path.join('configs', defconfig), configs,
+                                 options)
+
+def cleanup_whitelist(configs, options):
+    """Delete config whitelist entries
+
+    Arguments:
+      configs: A list of CONFIGs to remove.
+      options: option flags.
+    """
+    if not confirm(options, 'Clean up whitelist entries?'):
+        return
+
+    with open(os.path.join('scripts', 'config_whitelist.txt')) as f:
+        lines = f.readlines()
+
+    lines = [x for x in lines if x.strip() not in configs]
+
+    with open(os.path.join('scripts', 'config_whitelist.txt'), 'w') as f:
+        f.write(''.join(lines))
+
+def find_matching(patterns, line):
+    for pat in patterns:
+        if pat.search(line):
+            return True
+    return False
+
+def cleanup_readme(configs, options):
+    """Delete config description in README
+
+    Arguments:
+      configs: A list of CONFIGs to remove.
+      options: option flags.
+    """
+    if not confirm(options, 'Clean up README?'):
+        return
+
+    patterns = []
+    for config in configs:
+        patterns.append(re.compile(r'^\s+%s' % config))
+
+    with open('README') as f:
+        lines = f.readlines()
+
+    found = False
+    newlines = []
+    for line in lines:
+        if not found:
+            found = find_matching(patterns, line)
+            if found:
+                continue
+
+        if found and re.search(r'^\s+CONFIG', line):
+            found = False
+
+        if not found:
+            newlines.append(line)
+
+    with open('README', 'w') as f:
+        f.write(''.join(newlines))
+
+
+### classes ###
+class Progress:
+
+    """Progress Indicator"""
+
+    def __init__(self, total):
+        """Create a new progress indicator.
+
+        Arguments:
+          total: A number of defconfig files to process.
+        """
+        self.current = 0
+        self.total = total
+
+    def inc(self):
+        """Increment the number of processed defconfig files."""
+
+        self.current += 1
+
+    def show(self):
+        """Display the progress."""
+        print ' %d defconfigs out of %d\r' % (self.current, self.total),
+        sys.stdout.flush()
+
+
+class KconfigScanner:
+    """Kconfig scanner."""
+
+    def __init__(self):
+        """Scan all the Kconfig files and create a Config object."""
+        # Define environment variables referenced from Kconfig
+        os.environ['srctree'] = os.getcwd()
+        os.environ['UBOOTVERSION'] = 'dummy'
+        os.environ['KCONFIG_OBJDIR'] = ''
+        self.conf = kconfiglib.Config()
+
+
+class KconfigParser:
+
+    """A parser of .config and include/autoconf.mk."""
+
+    re_arch = re.compile(r'CONFIG_SYS_ARCH="(.*)"')
+    re_cpu = re.compile(r'CONFIG_SYS_CPU="(.*)"')
+
+    def __init__(self, configs, options, build_dir):
+        """Create a new parser.
+
+        Arguments:
+          configs: A list of CONFIGs to move.
+          options: option flags.
+          build_dir: Build directory.
+        """
+        self.configs = configs
+        self.options = options
+        self.dotconfig = os.path.join(build_dir, '.config')
+        self.autoconf = os.path.join(build_dir, 'include', 'autoconf.mk')
+        self.spl_autoconf = os.path.join(build_dir, 'spl', 'include',
+                                         'autoconf.mk')
+        self.config_autoconf = os.path.join(build_dir, AUTO_CONF_PATH)
+        self.defconfig = os.path.join(build_dir, 'defconfig')
+
+    def get_arch(self):
+        """Parse .config file and return the architecture.
+
+        Returns:
+          Architecture name (e.g. 'arm').
+        """
+        arch = ''
+        cpu = ''
+        for line in open(self.dotconfig):
+            m = self.re_arch.match(line)
+            if m:
+                arch = m.group(1)
+                continue
+            m = self.re_cpu.match(line)
+            if m:
+                cpu = m.group(1)
+
+        if not arch:
+            return None
+
+        # fix-up for aarch64
+        if arch == 'arm' and cpu == 'armv8':
+            arch = 'aarch64'
+
+        return arch
+
+    def parse_one_config(self, config, dotconfig_lines, autoconf_lines):
+        """Parse .config, defconfig, include/autoconf.mk for one config.
+
+        This function looks for the config options in the lines from
+        defconfig, .config, and include/autoconf.mk in order to decide
+        which action should be taken for this defconfig.
+
+        Arguments:
+          config: CONFIG name to parse.
+          dotconfig_lines: lines from the .config file.
+          autoconf_lines: lines from the include/autoconf.mk file.
+
+        Returns:
+          A tupple of the action for this defconfig and the line
+          matched for the config.
+        """
+        not_set = '# %s is not set' % config
+
+        for line in autoconf_lines:
+            line = line.rstrip()
+            if line.startswith(config + '='):
+                new_val = line
+                break
+        else:
+            new_val = not_set
+
+        for line in dotconfig_lines:
+            line = line.rstrip()
+            if line.startswith(config + '=') or line == not_set:
+                old_val = line
+                break
+        else:
+            if new_val == not_set:
+                return (ACTION_NO_ENTRY, config)
+            else:
+                return (ACTION_NO_ENTRY_WARN, config)
+
+        # If this CONFIG is neither bool nor trisate
+        if old_val[-2:] != '=y' and old_val[-2:] != '=m' and old_val != not_set:
+            # tools/scripts/define2mk.sed changes '1' to 'y'.
+            # This is a problem if the CONFIG is int type.
+            # Check the type in Kconfig and handle it correctly.
+            if new_val[-2:] == '=y':
+                new_val = new_val[:-1] + '1'
+
+        return (ACTION_NO_CHANGE if old_val == new_val else ACTION_MOVE,
+                new_val)
+
+    def update_dotconfig(self):
+        """Parse files for the config options and update the .config.
+
+        This function parses the generated .config and include/autoconf.mk
+        searching the target options.
+        Move the config option(s) to the .config as needed.
+
+        Arguments:
+          defconfig: defconfig name.
+
+        Returns:
+          Return a tuple of (updated flag, log string).
+          The "updated flag" is True if the .config was updated, False
+          otherwise.  The "log string" shows what happend to the .config.
+        """
+
+        results = []
+        updated = False
+        suspicious = False
+        rm_files = [self.config_autoconf, self.autoconf]
+
+        if self.options.spl:
+            if os.path.exists(self.spl_autoconf):
+                autoconf_path = self.spl_autoconf
+                rm_files.append(self.spl_autoconf)
+            else:
+                for f in rm_files:
+                    os.remove(f)
+                return (updated, suspicious,
+                        color_text(self.options.color, COLOR_BROWN,
+                                   "SPL is not enabled.  Skipped.") + '\n')
+        else:
+            autoconf_path = self.autoconf
+
+        with open(self.dotconfig) as f:
+            dotconfig_lines = f.readlines()
+
+        with open(autoconf_path) as f:
+            autoconf_lines = f.readlines()
+
+        for config in self.configs:
+            result = self.parse_one_config(config, dotconfig_lines,
+                                           autoconf_lines)
+            results.append(result)
+
+        log = ''
+
+        for (action, value) in results:
+            if action == ACTION_MOVE:
+                actlog = "Move '%s'" % value
+                log_color = COLOR_LIGHT_GREEN
+            elif action == ACTION_NO_ENTRY:
+                actlog = "%s is not defined in Kconfig.  Do nothing." % value
+                log_color = COLOR_LIGHT_BLUE
+            elif action == ACTION_NO_ENTRY_WARN:
+                actlog = "%s is not defined in Kconfig (suspicious).  Do nothing." % value
+                log_color = COLOR_YELLOW
+                suspicious = True
+            elif action == ACTION_NO_CHANGE:
+                actlog = "'%s' is the same as the define in Kconfig.  Do nothing." \
+                         % value
+                log_color = COLOR_LIGHT_PURPLE
+            elif action == ACTION_SPL_NOT_EXIST:
+                actlog = "SPL is not enabled for this defconfig.  Skip."
+                log_color = COLOR_PURPLE
+            else:
+                sys.exit("Internal Error. This should not happen.")
+
+            log += color_text(self.options.color, log_color, actlog) + '\n'
+
+        with open(self.dotconfig, 'a') as f:
+            for (action, value) in results:
+                if action == ACTION_MOVE:
+                    f.write(value + '\n')
+                    updated = True
+
+        self.results = results
+        for f in rm_files:
+            os.remove(f)
+
+        return (updated, suspicious, log)
+
+    def check_defconfig(self):
+        """Check the defconfig after savedefconfig
+
+        Returns:
+          Return additional log if moved CONFIGs were removed again by
+          'make savedefconfig'.
+        """
+
+        log = ''
+
+        with open(self.defconfig) as f:
+            defconfig_lines = f.readlines()
+
+        for (action, value) in self.results:
+            if action != ACTION_MOVE:
+                continue
+            if not value + '\n' in defconfig_lines:
+                log += color_text(self.options.color, COLOR_YELLOW,
+                                  "'%s' was removed by savedefconfig.\n" %
+                                  value)
+
+        return log
+
+
+class DatabaseThread(threading.Thread):
+    """This thread processes results from Slot threads.
+
+    It collects the data in the master config directary. There is only one
+    result thread, and this helps to serialise the build output.
+    """
+    def __init__(self, config_db, db_queue):
+        """Set up a new result thread
+
+        Args:
+            builder: Builder which will be sent each result
+        """
+        threading.Thread.__init__(self)
+        self.config_db = config_db
+        self.db_queue= db_queue
+
+    def run(self):
+        """Called to start up the result thread.
+
+        We collect the next result job and pass it on to the build.
+        """
+        while True:
+            defconfig, configs = self.db_queue.get()
+            self.config_db[defconfig] = configs
+            self.db_queue.task_done()
+
+
+class Slot:
+
+    """A slot to store a subprocess.
+
+    Each instance of this class handles one subprocess.
+    This class is useful to control multiple threads
+    for faster processing.
+    """
+
+    def __init__(self, toolchains, configs, options, progress, devnull,
+		 make_cmd, reference_src_dir, db_queue):
+        """Create a new process slot.
+
+        Arguments:
+          toolchains: Toolchains object containing toolchains.
+          configs: A list of CONFIGs to move.
+          options: option flags.
+          progress: A progress indicator.
+          devnull: A file object of '/dev/null'.
+          make_cmd: command name of GNU Make.
+          reference_src_dir: Determine the true starting config state from this
+                             source tree.
+          db_queue: output queue to write config info for the database
+        """
+        self.toolchains = toolchains
+        self.options = options
+        self.progress = progress
+        self.build_dir = tempfile.mkdtemp()
+        self.devnull = devnull
+        self.make_cmd = (make_cmd, 'O=' + self.build_dir)
+        self.reference_src_dir = reference_src_dir
+        self.db_queue = db_queue
+        self.parser = KconfigParser(configs, options, self.build_dir)
+        self.state = STATE_IDLE
+        self.failed_boards = set()
+        self.suspicious_boards = set()
+
+    def __del__(self):
+        """Delete the working directory
+
+        This function makes sure the temporary directory is cleaned away
+        even if Python suddenly dies due to error.  It should be done in here
+        because it is guaranteed the destructor is always invoked when the
+        instance of the class gets unreferenced.
+
+        If the subprocess is still running, wait until it finishes.
+        """
+        if self.state != STATE_IDLE:
+            while self.ps.poll() == None:
+                pass
+        shutil.rmtree(self.build_dir)
+
+    def add(self, defconfig):
+        """Assign a new subprocess for defconfig and add it to the slot.
+
+        If the slot is vacant, create a new subprocess for processing the
+        given defconfig and add it to the slot.  Just returns False if
+        the slot is occupied (i.e. the current subprocess is still running).
+
+        Arguments:
+          defconfig: defconfig name.
+
+        Returns:
+          Return True on success or False on failure
+        """
+        if self.state != STATE_IDLE:
+            return False
+
+        self.defconfig = defconfig
+        self.log = ''
+        self.current_src_dir = self.reference_src_dir
+        self.do_defconfig()
+        return True
+
+    def poll(self):
+        """Check the status of the subprocess and handle it as needed.
+
+        Returns True if the slot is vacant (i.e. in idle state).
+        If the configuration is successfully finished, assign a new
+        subprocess to build include/autoconf.mk.
+        If include/autoconf.mk is generated, invoke the parser to
+        parse the .config and the include/autoconf.mk, moving
+        config options to the .config as needed.
+        If the .config was updated, run "make savedefconfig" to sync
+        it, update the original defconfig, and then set the slot back
+        to the idle state.
+
+        Returns:
+          Return True if the subprocess is terminated, False otherwise
+        """
+        if self.state == STATE_IDLE:
+            return True
+
+        if self.ps.poll() == None:
+            return False
+
+        if self.ps.poll() != 0:
+            self.handle_error()
+        elif self.state == STATE_DEFCONFIG:
+            if self.reference_src_dir and not self.current_src_dir:
+                self.do_savedefconfig()
+            else:
+                self.do_autoconf()
+        elif self.state == STATE_AUTOCONF:
+            if self.current_src_dir:
+                self.current_src_dir = None
+                self.do_defconfig()
+            elif self.options.build_db:
+                self.do_build_db()
+            else:
+                self.do_savedefconfig()
+        elif self.state == STATE_SAVEDEFCONFIG:
+            self.update_defconfig()
+        else:
+            sys.exit("Internal Error. This should not happen.")
+
+        return True if self.state == STATE_IDLE else False
+
+    def handle_error(self):
+        """Handle error cases."""
+
+        self.log += color_text(self.options.color, COLOR_LIGHT_RED,
+                               "Failed to process.\n")
+        if self.options.verbose:
+            self.log += color_text(self.options.color, COLOR_LIGHT_CYAN,
+                                   self.ps.stderr.read())
+        self.finish(False)
+
+    def do_defconfig(self):
+        """Run 'make <board>_defconfig' to create the .config file."""
+
+        cmd = list(self.make_cmd)
+        cmd.append(self.defconfig)
+        self.ps = subprocess.Popen(cmd, stdout=self.devnull,
+                                   stderr=subprocess.PIPE,
+                                   cwd=self.current_src_dir)
+        self.state = STATE_DEFCONFIG
+
+    def do_autoconf(self):
+        """Run 'make AUTO_CONF_PATH'."""
+
+        arch = self.parser.get_arch()
+        try:
+            toolchain = self.toolchains.Select(arch)
+        except ValueError:
+            self.log += color_text(self.options.color, COLOR_YELLOW,
+                    "Tool chain for '%s' is missing.  Do nothing.\n" % arch)
+            self.finish(False)
+            return
+	env = toolchain.MakeEnvironment(False)
+
+        cmd = list(self.make_cmd)
+        cmd.append('KCONFIG_IGNORE_DUPLICATES=1')
+        cmd.append(AUTO_CONF_PATH)
+        self.ps = subprocess.Popen(cmd, stdout=self.devnull, env=env,
+                                   stderr=subprocess.PIPE,
+                                   cwd=self.current_src_dir)
+        self.state = STATE_AUTOCONF
+
+    def do_build_db(self):
+        """Add the board to the database"""
+        configs = {}
+        with open(os.path.join(self.build_dir, AUTO_CONF_PATH)) as fd:
+            for line in fd.readlines():
+                if line.startswith('CONFIG'):
+                    config, value = line.split('=', 1)
+                    configs[config] = value.rstrip()
+        self.db_queue.put([self.defconfig, configs])
+        self.finish(True)
+
+    def do_savedefconfig(self):
+        """Update the .config and run 'make savedefconfig'."""
+
+        (updated, suspicious, log) = self.parser.update_dotconfig()
+        if suspicious:
+            self.suspicious_boards.add(self.defconfig)
+        self.log += log
+
+        if not self.options.force_sync and not updated:
+            self.finish(True)
+            return
+        if updated:
+            self.log += color_text(self.options.color, COLOR_LIGHT_GREEN,
+                                   "Syncing by savedefconfig...\n")
+        else:
+            self.log += "Syncing by savedefconfig (forced by option)...\n"
+
+        cmd = list(self.make_cmd)
+        cmd.append('savedefconfig')
+        self.ps = subprocess.Popen(cmd, stdout=self.devnull,
+                                   stderr=subprocess.PIPE)
+        self.state = STATE_SAVEDEFCONFIG
+
+    def update_defconfig(self):
+        """Update the input defconfig and go back to the idle state."""
+
+        log = self.parser.check_defconfig()
+        if log:
+            self.suspicious_boards.add(self.defconfig)
+            self.log += log
+        orig_defconfig = os.path.join('configs', self.defconfig)
+        new_defconfig = os.path.join(self.build_dir, 'defconfig')
+        updated = not filecmp.cmp(orig_defconfig, new_defconfig)
+
+        if updated:
+            self.log += color_text(self.options.color, COLOR_LIGHT_BLUE,
+                                   "defconfig was updated.\n")
+
+        if not self.options.dry_run and updated:
+            shutil.move(new_defconfig, orig_defconfig)
+        self.finish(True)
+
+    def finish(self, success):
+        """Display log along with progress and go to the idle state.
+
+        Arguments:
+          success: Should be True when the defconfig was processed
+                   successfully, or False when it fails.
+        """
+        # output at least 30 characters to hide the "* defconfigs out of *".
+        log = self.defconfig.ljust(30) + '\n'
+
+        log += '\n'.join([ '    ' + s for s in self.log.split('\n') ])
+        # Some threads are running in parallel.
+        # Print log atomically to not mix up logs from different threads.
+        print >> (sys.stdout if success else sys.stderr), log
+
+        if not success:
+            if self.options.exit_on_error:
+                sys.exit("Exit on error.")
+            # If --exit-on-error flag is not set, skip this board and continue.
+            # Record the failed board.
+            self.failed_boards.add(self.defconfig)
+
+        self.progress.inc()
+        self.progress.show()
+        self.state = STATE_IDLE
+
+    def get_failed_boards(self):
+        """Returns a set of failed boards (defconfigs) in this slot.
+        """
+        return self.failed_boards
+
+    def get_suspicious_boards(self):
+        """Returns a set of boards (defconfigs) with possible misconversion.
+        """
+        return self.suspicious_boards - self.failed_boards
+
+class Slots:
+
+    """Controller of the array of subprocess slots."""
+
+    def __init__(self, toolchains, configs, options, progress,
+		 reference_src_dir, db_queue):
+        """Create a new slots controller.
+
+        Arguments:
+          toolchains: Toolchains object containing toolchains.
+          configs: A list of CONFIGs to move.
+          options: option flags.
+          progress: A progress indicator.
+          reference_src_dir: Determine the true starting config state from this
+                             source tree.
+          db_queue: output queue to write config info for the database
+        """
+        self.options = options
+        self.slots = []
+        devnull = get_devnull()
+        make_cmd = get_make_cmd()
+        for i in range(options.jobs):
+            self.slots.append(Slot(toolchains, configs, options, progress,
+				   devnull, make_cmd, reference_src_dir,
+				   db_queue))
+
+    def add(self, defconfig):
+        """Add a new subprocess if a vacant slot is found.
+
+        Arguments:
+          defconfig: defconfig name to be put into.
+
+        Returns:
+          Return True on success or False on failure
+        """
+        for slot in self.slots:
+            if slot.add(defconfig):
+                return True
+        return False
+
+    def available(self):
+        """Check if there is a vacant slot.
+
+        Returns:
+          Return True if at lease one vacant slot is found, False otherwise.
+        """
+        for slot in self.slots:
+            if slot.poll():
+                return True
+        return False
+
+    def empty(self):
+        """Check if all slots are vacant.
+
+        Returns:
+          Return True if all the slots are vacant, False otherwise.
+        """
+        ret = True
+        for slot in self.slots:
+            if not slot.poll():
+                ret = False
+        return ret
+
+    def show_failed_boards(self):
+        """Display all of the failed boards (defconfigs)."""
+        boards = set()
+        output_file = 'moveconfig.failed'
+
+        for slot in self.slots:
+            boards |= slot.get_failed_boards()
+
+        if boards:
+            boards = '\n'.join(boards) + '\n'
+            msg = "The following boards were not processed due to error:\n"
+            msg += boards
+            msg += "(the list has been saved in %s)\n" % output_file
+            print >> sys.stderr, color_text(self.options.color, COLOR_LIGHT_RED,
+                                            msg)
+
+            with open(output_file, 'w') as f:
+                f.write(boards)
+
+    def show_suspicious_boards(self):
+        """Display all boards (defconfigs) with possible misconversion."""
+        boards = set()
+        output_file = 'moveconfig.suspicious'
+
+        for slot in self.slots:
+            boards |= slot.get_suspicious_boards()
+
+        if boards:
+            boards = '\n'.join(boards) + '\n'
+            msg = "The following boards might have been converted incorrectly.\n"
+            msg += "It is highly recommended to check them manually:\n"
+            msg += boards
+            msg += "(the list has been saved in %s)\n" % output_file
+            print >> sys.stderr, color_text(self.options.color, COLOR_YELLOW,
+                                            msg)
+
+            with open(output_file, 'w') as f:
+                f.write(boards)
+
+class ReferenceSource:
+
+    """Reference source against which original configs should be parsed."""
+
+    def __init__(self, commit):
+        """Create a reference source directory based on a specified commit.
+
+        Arguments:
+          commit: commit to git-clone
+        """
+        self.src_dir = tempfile.mkdtemp()
+        print "Cloning git repo to a separate work directory..."
+        subprocess.check_output(['git', 'clone', os.getcwd(), '.'],
+                                cwd=self.src_dir)
+        print "Checkout '%s' to build the original autoconf.mk." % \
+            subprocess.check_output(['git', 'rev-parse', '--short', commit]).strip()
+        subprocess.check_output(['git', 'checkout', commit],
+                                stderr=subprocess.STDOUT, cwd=self.src_dir)
+
+    def __del__(self):
+        """Delete the reference source directory
+
+        This function makes sure the temporary directory is cleaned away
+        even if Python suddenly dies due to error.  It should be done in here
+        because it is guaranteed the destructor is always invoked when the
+        instance of the class gets unreferenced.
+        """
+        shutil.rmtree(self.src_dir)
+
+    def get_dir(self):
+        """Return the absolute path to the reference source directory."""
+
+        return self.src_dir
+
+def move_config(toolchains, configs, options, db_queue):
+    """Move config options to defconfig files.
+
+    Arguments:
+      configs: A list of CONFIGs to move.
+      options: option flags
+    """
+    if len(configs) == 0:
+        if options.force_sync:
+            print 'No CONFIG is specified. You are probably syncing defconfigs.',
+        elif options.build_db:
+            print 'Building %s database' % CONFIG_DATABASE
+        else:
+            print 'Neither CONFIG nor --force-sync is specified. Nothing will happen.',
+    else:
+        print 'Move ' + ', '.join(configs),
+    print '(jobs: %d)\n' % options.jobs
+
+    if options.git_ref:
+        reference_src = ReferenceSource(options.git_ref)
+        reference_src_dir = reference_src.get_dir()
+    else:
+        reference_src_dir = None
+
+    if options.defconfigs:
+        defconfigs = get_matched_defconfigs(options.defconfigs)
+    else:
+        defconfigs = get_all_defconfigs()
+
+    progress = Progress(len(defconfigs))
+    slots = Slots(toolchains, configs, options, progress, reference_src_dir,
+		  db_queue)
+
+    # Main loop to process defconfig files:
+    #  Add a new subprocess into a vacant slot.
+    #  Sleep if there is no available slot.
+    for defconfig in defconfigs:
+        while not slots.add(defconfig):
+            while not slots.available():
+                # No available slot: sleep for a while
+                time.sleep(SLEEP_TIME)
+
+    # wait until all the subprocesses finish
+    while not slots.empty():
+        time.sleep(SLEEP_TIME)
+
+    print ''
+    slots.show_failed_boards()
+    slots.show_suspicious_boards()
+
+def find_kconfig_rules(kconf, config, imply_config):
+    """Check whether a config has a 'select' or 'imply' keyword
+
+    Args:
+        kconf: Kconfig.Config object
+        config: Name of config to check (without CONFIG_ prefix)
+        imply_config: Implying config (without CONFIG_ prefix) which may or
+            may not have an 'imply' for 'config')
+
+    Returns:
+        Symbol object for 'config' if found, else None
+    """
+    sym = kconf.get_symbol(imply_config)
+    if sym:
+        for sel in sym.get_selected_symbols() | sym.get_implied_symbols():
+            if sel.get_name() == config:
+                return sym
+    return None
+
+def check_imply_rule(kconf, config, imply_config):
+    """Check if we can add an 'imply' option
+
+    This finds imply_config in the Kconfig and looks to see if it is possible
+    to add an 'imply' for 'config' to that part of the Kconfig.
+
+    Args:
+        kconf: Kconfig.Config object
+        config: Name of config to check (without CONFIG_ prefix)
+        imply_config: Implying config (without CONFIG_ prefix) which may or
+            may not have an 'imply' for 'config')
+
+    Returns:
+        tuple:
+            filename of Kconfig file containing imply_config, or None if none
+            line number within the Kconfig file, or 0 if none
+            message indicating the result
+    """
+    sym = kconf.get_symbol(imply_config)
+    if not sym:
+        return 'cannot find sym'
+    locs = sym.get_def_locations()
+    if len(locs) != 1:
+        return '%d locations' % len(locs)
+    fname, linenum = locs[0]
+    cwd = os.getcwd()
+    if cwd and fname.startswith(cwd):
+        fname = fname[len(cwd) + 1:]
+    file_line = ' at %s:%d' % (fname, linenum)
+    with open(fname) as fd:
+        data = fd.read().splitlines()
+    if data[linenum - 1] != 'config %s' % imply_config:
+        return None, 0, 'bad sym format %s%s' % (data[linenum], file_line)
+    return fname, linenum, 'adding%s' % file_line
+
+def add_imply_rule(config, fname, linenum):
+    """Add a new 'imply' option to a Kconfig
+
+    Args:
+        config: config option to add an imply for (without CONFIG_ prefix)
+        fname: Kconfig filename to update
+        linenum: Line number to place the 'imply' before
+
+    Returns:
+        Message indicating the result
+    """
+    file_line = ' at %s:%d' % (fname, linenum)
+    data = open(fname).read().splitlines()
+    linenum -= 1
+
+    for offset, line in enumerate(data[linenum:]):
+        if line.strip().startswith('help') or not line:
+            data.insert(linenum + offset, '\timply %s' % config)
+            with open(fname, 'w') as fd:
+                fd.write('\n'.join(data) + '\n')
+            return 'added%s' % file_line
+
+    return 'could not insert%s'
+
+(IMPLY_MIN_2, IMPLY_TARGET, IMPLY_CMD, IMPLY_NON_ARCH_BOARD) = (
+    1, 2, 4, 8)
+
+IMPLY_FLAGS = {
+    'min2': [IMPLY_MIN_2, 'Show options which imply >2 boards (normally >5)'],
+    'target': [IMPLY_TARGET, 'Allow CONFIG_TARGET_... options to imply'],
+    'cmd': [IMPLY_CMD, 'Allow CONFIG_CMD_... to imply'],
+    'non-arch-board': [
+        IMPLY_NON_ARCH_BOARD,
+        'Allow Kconfig options outside arch/ and /board/ to imply'],
+};
+
+def do_imply_config(config_list, add_imply, imply_flags, skip_added,
+                    check_kconfig=True, find_superset=False):
+    """Find CONFIG options which imply those in the list
+
+    Some CONFIG options can be implied by others and this can help to reduce
+    the size of the defconfig files. For example, CONFIG_X86 implies
+    CONFIG_CMD_IRQ, so we can put 'imply CMD_IRQ' under 'config X86' and
+    all x86 boards will have that option, avoiding adding CONFIG_CMD_IRQ to
+    each of the x86 defconfig files.
+
+    This function uses the moveconfig database to find such options. It
+    displays a list of things that could possibly imply those in the list.
+    The algorithm ignores any that start with CONFIG_TARGET since these
+    typically refer to only a few defconfigs (often one). It also does not
+    display a config with less than 5 defconfigs.
+
+    The algorithm works using sets. For each target config in config_list:
+        - Get the set 'defconfigs' which use that target config
+        - For each config (from a list of all configs):
+            - Get the set 'imply_defconfig' of defconfigs which use that config
+            -
+            - If imply_defconfigs contains anything not in defconfigs then
+              this config does not imply the target config
+
+    Params:
+        config_list: List of CONFIG options to check (each a string)
+        add_imply: Automatically add an 'imply' for each config.
+        imply_flags: Flags which control which implying configs are allowed
+           (IMPLY_...)
+        skip_added: Don't show options which already have an imply added.
+        check_kconfig: Check if implied symbols already have an 'imply' or
+            'select' for the target config, and show this information if so.
+        find_superset: True to look for configs which are a superset of those
+            already found. So for example if CONFIG_EXYNOS5 implies an option,
+            but CONFIG_EXYNOS covers a larger set of defconfigs and also
+            implies that option, this will drop the former in favour of the
+            latter. In practice this option has not proved very used.
+
+    Note the terminoloy:
+        config - a CONFIG_XXX options (a string, e.g. 'CONFIG_CMD_EEPROM')
+        defconfig - a defconfig file (a string, e.g. 'configs/snow_defconfig')
+    """
+    kconf = KconfigScanner().conf if check_kconfig else None
+    if add_imply and add_imply != 'all':
+        add_imply = add_imply.split()
+
+    # key is defconfig name, value is dict of (CONFIG_xxx, value)
+    config_db = {}
+
+    # Holds a dict containing the set of defconfigs that contain each config
+    # key is config, value is set of defconfigs using that config
+    defconfig_db = collections.defaultdict(set)
+
+    # Set of all config options we have seen
+    all_configs = set()
+
+    # Set of all defconfigs we have seen
+    all_defconfigs = set()
+
+    # Read in the database
+    configs = {}
+    with open(CONFIG_DATABASE) as fd:
+        for line in fd.readlines():
+            line = line.rstrip()
+            if not line:  # Separator between defconfigs
+                config_db[defconfig] = configs
+                all_defconfigs.add(defconfig)
+                configs = {}
+            elif line[0] == ' ':  # CONFIG line
+                config, value = line.strip().split('=', 1)
+                configs[config] = value
+                defconfig_db[config].add(defconfig)
+                all_configs.add(config)
+            else:  # New defconfig
+                defconfig = line
+
+    # Work through each target config option in tern, independently
+    for config in config_list:
+        defconfigs = defconfig_db.get(config)
+        if not defconfigs:
+            print '%s not found in any defconfig' % config
+            continue
+
+        # Get the set of defconfigs without this one (since a config cannot
+        # imply itself)
+        non_defconfigs = all_defconfigs - defconfigs
+        num_defconfigs = len(defconfigs)
+        print '%s found in %d/%d defconfigs' % (config, num_defconfigs,
+                                                len(all_configs))
+
+        # This will hold the results: key=config, value=defconfigs containing it
+        imply_configs = {}
+        rest_configs = all_configs - set([config])
+
+        # Look at every possible config, except the target one
+        for imply_config in rest_configs:
+            if 'ERRATUM' in imply_config:
+                continue
+            if not (imply_flags & IMPLY_CMD):
+                if 'CONFIG_CMD' in imply_config:
+                    continue
+            if not (imply_flags & IMPLY_TARGET):
+                if 'CONFIG_TARGET' in imply_config:
+                    continue
+
+            # Find set of defconfigs that have this config
+            imply_defconfig = defconfig_db[imply_config]
+
+            # Get the intersection of this with defconfigs containing the
+            # target config
+            common_defconfigs = imply_defconfig & defconfigs
+
+            # Get the set of defconfigs containing this config which DO NOT
+            # also contain the taret config. If this set is non-empty it means
+            # that this config affects other defconfigs as well as (possibly)
+            # the ones affected by the target config. This means it implies
+            # things we don't want to imply.
+            not_common_defconfigs = imply_defconfig & non_defconfigs
+            if not_common_defconfigs:
+                continue
+
+            # If there are common defconfigs, imply_config may be useful
+            if common_defconfigs:
+                skip = False
+                if find_superset:
+                    for prev in imply_configs.keys():
+                        prev_count = len(imply_configs[prev])
+                        count = len(common_defconfigs)
+                        if (prev_count > count and
+                            (imply_configs[prev] & common_defconfigs ==
+                            common_defconfigs)):
+                            # skip imply_config because prev is a superset
+                            skip = True
+                            break
+                        elif count > prev_count:
+                            # delete prev because imply_config is a superset
+                            del imply_configs[prev]
+                if not skip:
+                    imply_configs[imply_config] = common_defconfigs
+
+        # Now we have a dict imply_configs of configs which imply each config
+        # The value of each dict item is the set of defconfigs containing that
+        # config. Rank them so that we print the configs that imply the largest
+        # number of defconfigs first.
+        ranked_iconfigs = sorted(imply_configs,
+                            key=lambda k: len(imply_configs[k]), reverse=True)
+        kconfig_info = ''
+        cwd = os.getcwd()
+        add_list = collections.defaultdict(list)
+        for iconfig in ranked_iconfigs:
+            num_common = len(imply_configs[iconfig])
+
+            # Don't bother if there are less than 5 defconfigs affected.
+            if num_common < (2 if imply_flags & IMPLY_MIN_2 else 5):
+                continue
+            missing = defconfigs - imply_configs[iconfig]
+            missing_str = ', '.join(missing) if missing else 'all'
+            missing_str = ''
+            show = True
+            if kconf:
+                sym = find_kconfig_rules(kconf, config[CONFIG_LEN:],
+                                         iconfig[CONFIG_LEN:])
+                kconfig_info = ''
+                if sym:
+                    locs = sym.get_def_locations()
+                    if len(locs) == 1:
+                        fname, linenum = locs[0]
+                        if cwd and fname.startswith(cwd):
+                            fname = fname[len(cwd) + 1:]
+                        kconfig_info = '%s:%d' % (fname, linenum)
+                        if skip_added:
+                            show = False
+                else:
+                    sym = kconf.get_symbol(iconfig[CONFIG_LEN:])
+                    fname = ''
+                    if sym:
+                        locs = sym.get_def_locations()
+                        if len(locs) == 1:
+                            fname, linenum = locs[0]
+                            if cwd and fname.startswith(cwd):
+                                fname = fname[len(cwd) + 1:]
+                    in_arch_board = not sym or (fname.startswith('arch') or
+                                                fname.startswith('board'))
+                    if (not in_arch_board and
+                        not (imply_flags & IMPLY_NON_ARCH_BOARD)):
+                        continue
+
+                    if add_imply and (add_imply == 'all' or
+                                      iconfig in add_imply):
+                        fname, linenum, kconfig_info = (check_imply_rule(kconf,
+                                config[CONFIG_LEN:], iconfig[CONFIG_LEN:]))
+                        if fname:
+                            add_list[fname].append(linenum)
+
+            if show and kconfig_info != 'skip':
+                print '%5d : %-30s%-25s %s' % (num_common, iconfig.ljust(30),
+                                              kconfig_info, missing_str)
+
+        # Having collected a list of things to add, now we add them. We process
+        # each file from the largest line number to the smallest so that
+        # earlier additions do not affect our line numbers. E.g. if we added an
+        # imply at line 20 it would change the position of each line after
+        # that.
+        for fname, linenums in add_list.iteritems():
+            for linenum in sorted(linenums, reverse=True):
+                add_imply_rule(config[CONFIG_LEN:], fname, linenum)
+
+
+def main():
+    try:
+        cpu_count = multiprocessing.cpu_count()
+    except NotImplementedError:
+        cpu_count = 1
+
+    parser = optparse.OptionParser()
+    # Add options here
+    parser.add_option('-a', '--add-imply', type='string', default='',
+                      help='comma-separated list of CONFIG options to add '
+                      "an 'imply' statement to for the CONFIG in -i")
+    parser.add_option('-A', '--skip-added', action='store_true', default=False,
+                      help="don't show options which are already marked as "
+                      'implying others')
+    parser.add_option('-b', '--build-db', action='store_true', default=False,
+                      help='build a CONFIG database')
+    parser.add_option('-c', '--color', action='store_true', default=False,
+                      help='display the log in color')
+    parser.add_option('-C', '--commit', action='store_true', default=False,
+                      help='Create a git commit for the operation')
+    parser.add_option('-d', '--defconfigs', type='string',
+                      help='a file containing a list of defconfigs to move, '
+                      "one per line (for example 'snow_defconfig') "
+                      "or '-' to read from stdin")
+    parser.add_option('-i', '--imply', action='store_true', default=False,
+                      help='find options which imply others')
+    parser.add_option('-I', '--imply-flags', type='string', default='',
+                      help="control the -i option ('help' for help")
+    parser.add_option('-n', '--dry-run', action='store_true', default=False,
+                      help='perform a trial run (show log with no changes)')
+    parser.add_option('-e', '--exit-on-error', action='store_true',
+                      default=False,
+                      help='exit immediately on any error')
+    parser.add_option('-s', '--force-sync', action='store_true', default=False,
+                      help='force sync by savedefconfig')
+    parser.add_option('-S', '--spl', action='store_true', default=False,
+                      help='parse config options defined for SPL build')
+    parser.add_option('-H', '--headers-only', dest='cleanup_headers_only',
+                      action='store_true', default=False,
+                      help='only cleanup the headers')
+    parser.add_option('-j', '--jobs', type='int', default=cpu_count,
+                      help='the number of jobs to run simultaneously')
+    parser.add_option('-r', '--git-ref', type='string',
+                      help='the git ref to clone for building the autoconf.mk')
+    parser.add_option('-y', '--yes', action='store_true', default=False,
+                      help="respond 'yes' to any prompts")
+    parser.add_option('-v', '--verbose', action='store_true', default=False,
+                      help='show any build errors as boards are built')
+    parser.usage += ' CONFIG ...'
+
+    (options, configs) = parser.parse_args()
+
+    if len(configs) == 0 and not any((options.force_sync, options.build_db,
+                                      options.imply)):
+        parser.print_usage()
+        sys.exit(1)
+
+    # prefix the option name with CONFIG_ if missing
+    configs = [ config if config.startswith('CONFIG_') else 'CONFIG_' + config
+                for config in configs ]
+
+    check_top_directory()
+
+    if options.imply:
+        imply_flags = 0
+        if options.imply_flags == 'all':
+            imply_flags = -1
+
+        elif options.imply_flags:
+            for flag in options.imply_flags.split(','):
+                bad = flag not in IMPLY_FLAGS
+                if bad:
+                    print "Invalid flag '%s'" % flag
+                if flag == 'help' or bad:
+                    print "Imply flags: (separate with ',')"
+                    for name, info in IMPLY_FLAGS.iteritems():
+                        print ' %-15s: %s' % (name, info[1])
+                    parser.print_usage()
+                    sys.exit(1)
+                imply_flags |= IMPLY_FLAGS[flag][0]
+
+        do_imply_config(configs, options.add_imply, imply_flags,
+                        options.skip_added)
+        return
+
+    config_db = {}
+    db_queue = Queue.Queue()
+    t = DatabaseThread(config_db, db_queue)
+    t.setDaemon(True)
+    t.start()
+
+    if not options.cleanup_headers_only:
+        check_clean_directory()
+	bsettings.Setup('')
+        toolchains = toolchain.Toolchains()
+        toolchains.GetSettings()
+        toolchains.Scan(verbose=False)
+        move_config(toolchains, configs, options, db_queue)
+        db_queue.join()
+
+    if configs:
+        cleanup_headers(configs, options)
+        cleanup_extra_options(configs, options)
+        cleanup_whitelist(configs, options)
+        cleanup_readme(configs, options)
+
+    if options.commit:
+        subprocess.call(['git', 'add', '-u'])
+        if configs:
+            msg = 'Convert %s %sto Kconfig' % (configs[0],
+                    'et al ' if len(configs) > 1 else '')
+            msg += ('\n\nThis converts the following to Kconfig:\n   %s\n' %
+                    '\n   '.join(configs))
+        else:
+            msg = 'configs: Resync with savedefconfig'
+            msg += '\n\nRsync all defconfig files using moveconfig.py'
+        subprocess.call(['git', 'commit', '-s', '-m', msg])
+
+    if options.build_db:
+        with open(CONFIG_DATABASE, 'w') as fd:
+            for defconfig, configs in config_db.iteritems():
+                fd.write('%s\n' % defconfig)
+                for config in sorted(configs.keys()):
+                    fd.write('   %s=%s\n' % (config, configs[config]))
+                fd.write('\n')
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/u-boot-tools/mrvl_uart.sh b/tools/u-boot-tools/mrvl_uart.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a46411fc99fb5e63dbd1173a7c865a30739ff55d
--- /dev/null
+++ b/tools/u-boot-tools/mrvl_uart.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+######################################################
+# Copyright (C) 2016 Marvell International Ltd.
+#
+# https://spdx.org/licenses
+#
+# Author: Konstantin Porotchkin kostap@marvell.com
+#
+# Version 0.3
+#
+# UART recovery downloader for Armada SoCs
+#
+######################################################
+
+port=$1
+file=$2
+speed=$3
+
+pattern_repeat=1500
+default_baudrate=115200
+tmpfile=/tmp/xmodem.pattern
+tools=( dd stty sx minicom )
+
+case "$3" in
+    2)
+        fast_baudrate=230400
+        prefix="\xF2"
+        ;;
+    4)
+        fast_baudrate=460800
+        prefix="\xF4"
+        ;;
+    8)
+    	fast_baudrate=921600
+        prefix="\xF8"
+        ;;
+    *)
+    	fast_baudrate=$default_baudrate
+        prefix="\xBB"
+esac
+
+if [[ -z "$port" || -z "$file" ]]
+then
+    echo -e "\nMarvell recovery image downloader for Armada SoC family."
+    echo -e "Command syntax:"
+    echo -e "\t$(basename $0) <port> <file> [2|4|8]"
+    echo -e "\tport  - serial port the target board is connected to"
+    echo -e "\tfile  - recovery boot image for target download"
+    echo -e "\t2|4|8 - times to increase the default serial port speed by"
+    echo -e "For example - load the image over ttyUSB0 @ 460800 baud:"
+    echo -e "$(basename $0) /dev/ttyUSB0 /tmp/flash-image.bin 4\n"
+    echo -e "=====WARNING====="
+    echo -e "- The speed-up option is not available in SoC families prior to A8K+"
+    echo -e "- This utility is not compatible with Armada 37xx SoC family\n"
+fi
+
+# Sanity checks
+if [ -c "$port" ]
+then
+   echo -e "Using device connected on serial port \"$port\""
+else
+   echo "Wrong serial port name!"
+   exit 1
+fi
+
+if [ -f "$file" ]
+then
+   echo -e "Loading flash image file \"$file\""
+else
+   echo "File $file does not exist!"
+   exit 1
+fi
+
+# Verify required tools installation
+for tool in ${tools[@]}
+do
+    toolname=`which $tool`
+    if [ -z "$toolname" ]
+    then
+        echo -e "Missing installation of \"$tool\" --> Exiting"
+        exit 1
+    fi
+done
+
+
+echo -e "Recovery will run at $fast_baudrate baud"
+echo -e "========================================"
+
+if [ -f "$tmpfile" ]
+then
+    rm -f $tmpfile
+fi
+
+# Send the escape sequence to target board using default debug port speed
+stty -F $port raw ignbrk time 5 $default_baudrate
+counter=0
+while [ $counter -lt $pattern_repeat ]; do
+    echo -n -e "$prefix\x11\x22\x33\x44\x55\x66\x77" >> $tmpfile
+    let counter=counter+1
+done
+
+echo -en "Press the \"Reset\" button on the target board and "
+echo -en "the \"Enter\" key on the host keyboard simultaneously"
+read
+dd if=$tmpfile of=$port &>/dev/null
+
+# Speed up the binary image transfer
+stty -F $port raw ignbrk time 5 $fast_baudrate
+sx -vv $file > $port < $port
+#sx-at91 $port $file
+
+# Return the port to the default speed
+stty -F $port raw ignbrk time 5 $default_baudrate
+
+# Optional - fire up Minicom
+minicom -D $port -b $default_baudrate
+
diff --git a/tools/u-boot-tools/mtk_image.c b/tools/u-boot-tools/mtk_image.c
new file mode 100644
index 0000000000000000000000000000000000000000..2ca519483d33dd7ae7ed8d264afd4d92684e064c
--- /dev/null
+++ b/tools/u-boot-tools/mtk_image.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Generate MediaTek BootROM header for SPL/U-Boot images
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <image.h>
+#include <u-boot/sha256.h>
+#include "imagetool.h"
+#include "mtk_image.h"
+
+/* NAND header for SPI-NAND with 2KB page + 64B spare */
+static const union nand_boot_header snand_hdr_2k_64_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x03, 0x00, 0x40, 0x00,
+		0x40, 0x00, 0x00, 0x08, 0x10, 0x00, 0x16, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x7B, 0xC4, 0x17, 0x9D,
+		0xCA, 0x42, 0x90, 0xD0, 0x98, 0xD0, 0xE0, 0xF7,
+		0xDB, 0xCD, 0x16, 0xF6, 0x03, 0x73, 0xD2, 0xB8,
+		0x93, 0xB2, 0x56, 0x5A, 0x84, 0x6E, 0x00, 0x00
+	}
+};
+
+/* NAND header for SPI-NAND with 2KB page + 120B/128B spare */
+static const union nand_boot_header snand_hdr_2k_128_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x05, 0x00, 0x70, 0x00,
+		0x40, 0x00, 0x00, 0x08, 0x10, 0x00, 0x16, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x90, 0x28, 0xED, 0x13,
+		0x7F, 0x12, 0x22, 0xCD, 0x3D, 0x06, 0xF1, 0xB3,
+		0x6F, 0x2E, 0xD9, 0xA0, 0x9D, 0x7A, 0xBD, 0xD7,
+		0xB3, 0x28, 0x3C, 0x13, 0xDB, 0x4E, 0x00, 0x00
+	}
+};
+
+/* NAND header for SPI-NAND with 4KB page + 256B spare */
+static const union nand_boot_header snand_hdr_4k_256_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x10, 0x05, 0x00, 0xE0, 0x00,
+		0x40, 0x00, 0x00, 0x08, 0x10, 0x00, 0x16, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x47, 0xED, 0x0E, 0xC3,
+		0x83, 0xBF, 0x41, 0xD2, 0x85, 0x21, 0x97, 0x57,
+		0xC4, 0x2E, 0x6B, 0x7A, 0x40, 0xE0, 0xCF, 0x8F,
+		0x37, 0xBD, 0x17, 0xB6, 0xC7, 0xFE, 0x00, 0x00
+	}
+};
+
+/* NAND header for Parallel NAND 1Gb with 2KB page + 64B spare */
+static const union nand_boot_header nand_hdr_1gb_2k_64_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x05, 0x00, 0x40, 0x00,
+		0x40, 0x00, 0x00, 0x04, 0x0B, 0x00, 0x11, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x12, 0x28, 0x1C, 0x12,
+		0x8F, 0xFD, 0xF8, 0x32, 0x6F, 0x6D, 0xCF, 0x6C,
+		0xDA, 0x21, 0x70, 0x8C, 0xDA, 0x0A, 0x22, 0x82,
+		0xAA, 0x59, 0xFA, 0x7C, 0x42, 0x2D, 0x00, 0x00
+	}
+};
+
+/* NAND header for Parallel NAND 2Gb with 2KB page + 64B spare */
+static const union nand_boot_header nand_hdr_2gb_2k_64_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x05, 0x00, 0x40, 0x00,
+		0x40, 0x00, 0x00, 0x08, 0x0B, 0x00, 0x11, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x20, 0x9C, 0x3D, 0x2D,
+		0x7B, 0x68, 0x63, 0x52, 0x2E, 0x04, 0x63, 0xF1,
+		0x35, 0x4E, 0x44, 0x3E, 0xF8, 0xAC, 0x9B, 0x95,
+		0xAB, 0xFE, 0xE4, 0xE1, 0xD5, 0xF9, 0x00, 0x00
+	}
+};
+
+/* NAND header for Parallel NAND 4Gb with 2KB page + 64B spare */
+static const union nand_boot_header nand_hdr_4gb_2k_64_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x05, 0x00, 0x40, 0x00,
+		0x40, 0x00, 0x00, 0x10, 0x0B, 0x00, 0x11, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0xE3, 0x0F, 0x86, 0x32,
+		0x68, 0x05, 0xD9, 0xC8, 0x13, 0xDF, 0xC5, 0x0B,
+		0x35, 0x3A, 0x68, 0xA5, 0x3C, 0x0C, 0x73, 0x87,
+		0x63, 0xB0, 0xBE, 0xCC, 0x84, 0x47, 0x00, 0x00
+	}
+};
+
+/* NAND header for Parallel NAND 2Gb with 2KB page + 128B spare */
+static const union nand_boot_header nand_hdr_2gb_2k_128_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x05, 0x00, 0x70, 0x00,
+		0x40, 0x00, 0x00, 0x08, 0x0B, 0x00, 0x11, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x01, 0xA5, 0xE9, 0x5A,
+		0xDF, 0x58, 0x62, 0x41, 0xD6, 0x26, 0x77, 0xBC,
+		0x76, 0x1F, 0x27, 0x4E, 0x4F, 0x6C, 0xC3, 0xF0,
+		0x36, 0xDE, 0xD9, 0xB3, 0xFF, 0x93, 0x00, 0x00
+	}
+};
+
+/* NAND header for Parallel NAND 4Gb with 2KB page + 128B spare */
+static const union nand_boot_header nand_hdr_4gb_2k_128_data = {
+	.data = {
+		0x42, 0x4F, 0x4F, 0x54, 0x4C, 0x4F, 0x41, 0x44,
+		0x45, 0x52, 0x21, 0x00, 0x56, 0x30, 0x30, 0x36,
+		0x4E, 0x46, 0x49, 0x49, 0x4E, 0x46, 0x4F, 0x00,
+		0x00, 0x00, 0x00, 0x08, 0x05, 0x00, 0x70, 0x00,
+		0x40, 0x00, 0x00, 0x10, 0x0B, 0x00, 0x11, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0xC2, 0x36, 0x52, 0x45,
+		0xCC, 0x35, 0xD8, 0xDB, 0xEB, 0xFD, 0xD1, 0x46,
+		0x76, 0x6B, 0x0B, 0xD5, 0x8B, 0xCC, 0x2B, 0xE2,
+		0xFE, 0x90, 0x83, 0x9E, 0xAE, 0x2D, 0x00, 0x00
+	}
+};
+
+static const struct nand_header_type {
+	const char *name;
+	const union nand_boot_header *data;
+} nand_headers[] = {
+	{
+		.name = "2k+64",
+		.data = &snand_hdr_2k_64_data
+	}, {
+		.name = "2k+120",
+		.data = &snand_hdr_2k_128_data
+	}, {
+		.name = "2k+128",
+		.data = &snand_hdr_2k_128_data
+	}, {
+		.name = "4k+256",
+		.data = &snand_hdr_4k_256_data
+	}, {
+		.name = "1g:2k+64",
+		.data = &nand_hdr_1gb_2k_64_data
+	}, {
+		.name = "2g:2k+64",
+		.data = &nand_hdr_2gb_2k_64_data
+	}, {
+		.name = "4g:2k+64",
+		.data = &nand_hdr_4gb_2k_64_data
+	}, {
+		.name = "2g:2k+128",
+		.data = &nand_hdr_2gb_2k_128_data
+	}, {
+		.name = "4g:2k+128",
+		.data = &nand_hdr_4gb_2k_128_data
+	}
+};
+
+static const struct brom_img_type {
+	const char *name;
+	enum brlyt_img_type type;
+} brom_images[] = {
+	{
+		.name = "nand",
+		.type = BRLYT_TYPE_NAND
+	}, {
+		.name = "emmc",
+		.type = BRLYT_TYPE_EMMC
+	}, {
+		.name = "nor",
+		.type = BRLYT_TYPE_NOR
+	}, {
+		.name = "sdmmc",
+		.type = BRLYT_TYPE_SDMMC
+	}, {
+		.name = "snand",
+		.type = BRLYT_TYPE_SNAND
+	}
+};
+
+/* Image type selected by user */
+static enum brlyt_img_type hdr_media;
+static int use_lk_hdr;
+
+/* LK image name */
+static char lk_name[32] = "U-Boot";
+
+/* NAND header selected by user */
+static const union nand_boot_header *hdr_nand;
+
+/* GFH header + 2 * 4KB pages of NAND */
+static char hdr_tmp[sizeof(struct gfh_header) + 0x2000];
+
+static int mtk_image_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_MTKIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int mtk_brom_parse_imagename(const char *imagename)
+{
+#define is_blank_char(c) \
+	((c) == '\t' || (c) == '\n' || (c) == '\r' || (c) == ' ')
+
+	char *buf = strdup(imagename), *key, *val, *end, *next;
+	int i;
+
+	/* User passed arguments from image name */
+	static const char *media = "";
+	static const char *nandinfo = "";
+	static const char *lk = "";
+
+	key = buf;
+	while (key) {
+		next = strchr(key, ';');
+		if (next)
+			*next = 0;
+
+		val = strchr(key, '=');
+		if (val) {
+			*val++ = 0;
+
+			/* Trim key */
+			while (is_blank_char(*key))
+				key++;
+
+			end = key + strlen(key) - 1;
+			while ((end >= key) && is_blank_char(*end))
+				end--;
+			end++;
+
+			if (is_blank_char(*end))
+				*end = 0;
+
+			/* Trim value */
+			while (is_blank_char(*val))
+				val++;
+
+			end = val + strlen(val) - 1;
+			while ((end >= val) && is_blank_char(*end))
+				end--;
+			end++;
+
+			if (is_blank_char(*end))
+				*end = 0;
+
+			/* record user passed arguments */
+			if (!strcmp(key, "media"))
+				media = val;
+
+			if (!strcmp(key, "nandinfo"))
+				nandinfo = val;
+
+			if (!strcmp(key, "lk"))
+				lk = val;
+
+			if (!strcmp(key, "lkname"))
+				snprintf(lk_name, sizeof(lk_name), "%s", val);
+		}
+
+		if (next)
+			key = next + 1;
+		else
+			break;
+	}
+
+	/* if user specified LK image header, skip following checks */
+	if (lk && lk[0] == '1') {
+		use_lk_hdr = 1;
+		free(buf);
+		return 0;
+	}
+
+	/* parse media type */
+	for (i = 0; i < ARRAY_SIZE(brom_images); i++) {
+		if (!strcmp(brom_images[i].name, media)) {
+			hdr_media = brom_images[i].type;
+			break;
+		}
+	}
+
+	/* parse nand header type */
+	for (i = 0; i < ARRAY_SIZE(nand_headers); i++) {
+		if (!strcmp(nand_headers[i].name, nandinfo)) {
+			hdr_nand = nand_headers[i].data;
+			break;
+		}
+	}
+
+	free(buf);
+
+	if (hdr_media == BRLYT_TYPE_INVALID) {
+		fprintf(stderr, "Error: media type is invalid or missing.\n");
+		fprintf(stderr, "       Please specify -n \"media=<type>\"\n");
+		return -EINVAL;
+	}
+
+	if ((hdr_media == BRLYT_TYPE_NAND || hdr_media == BRLYT_TYPE_SNAND) &&
+	    !hdr_nand) {
+		fprintf(stderr, "Error: nand info is invalid or missing.\n");
+		fprintf(stderr, "       Please specify -n \"media=%s;"
+				"nandinfo=<info>\"\n", media);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mtk_image_check_params(struct image_tool_params *params)
+{
+	if (!params->addr) {
+		fprintf(stderr, "Error: Load Address must be set.\n");
+		return -EINVAL;
+	}
+
+	if (!params->imagename) {
+		fprintf(stderr, "Error: Image Name must be set.\n");
+		return -EINVAL;
+	}
+
+	return mtk_brom_parse_imagename(params->imagename);
+}
+
+static int mtk_image_vrec_header(struct image_tool_params *params,
+				 struct image_type_params *tparams)
+{
+	if (use_lk_hdr) {
+		tparams->header_size = sizeof(union lk_hdr);
+		tparams->hdr = &hdr_tmp;
+		memset(&hdr_tmp, 0xff, tparams->header_size);
+		return 0;
+	}
+
+	if (hdr_media == BRLYT_TYPE_NAND || hdr_media == BRLYT_TYPE_SNAND)
+		tparams->header_size = 2 * le16_to_cpu(hdr_nand->pagesize);
+	else
+		tparams->header_size = sizeof(struct gen_device_header);
+
+	tparams->header_size += sizeof(struct gfh_header);
+	tparams->hdr = &hdr_tmp;
+
+	memset(&hdr_tmp, 0xff, tparams->header_size);
+
+	return SHA256_SUM_LEN;
+}
+
+static int mtk_image_verify_gen_header(const uint8_t *ptr, int print)
+{
+	union gen_boot_header *gbh = (union gen_boot_header *)ptr;
+	struct brom_layout_header *bh;
+	struct gfh_header *gfh;
+	const char *bootmedia;
+
+	if (!strcmp(gbh->name, SF_BOOT_NAME))
+		bootmedia = "Serial NOR";
+	else if (!strcmp(gbh->name, EMMC_BOOT_NAME))
+		bootmedia = "eMMC";
+	else if (!strcmp(gbh->name, SDMMC_BOOT_NAME))
+		bootmedia = "SD/MMC";
+	else
+		return -1;
+
+	if (print)
+		printf("Boot Media:   %s\n", bootmedia);
+
+	if (le32_to_cpu(gbh->version) != 1 ||
+	    le32_to_cpu(gbh->size) != sizeof(union gen_boot_header))
+		return -1;
+
+	bh = (struct brom_layout_header *)(ptr + le32_to_cpu(gbh->size));
+
+	if (strcmp(bh->name, BRLYT_NAME))
+		return -1;
+
+	if (le32_to_cpu(bh->magic) != BRLYT_MAGIC ||
+	    (le32_to_cpu(bh->type) != BRLYT_TYPE_NOR &&
+	    le32_to_cpu(bh->type) != BRLYT_TYPE_EMMC &&
+	    le32_to_cpu(bh->type) != BRLYT_TYPE_SDMMC))
+		return -1;
+
+	gfh = (struct gfh_header *)(ptr + le32_to_cpu(bh->header_size));
+
+	if (strcmp(gfh->file_info.name, GFH_FILE_INFO_NAME))
+		return -1;
+
+	if (le32_to_cpu(gfh->file_info.flash_type) != GFH_FLASH_TYPE_GEN)
+		return -1;
+
+	if (print)
+		printf("Load Address: %08x\n",
+		       le32_to_cpu(gfh->file_info.load_addr) +
+		       le32_to_cpu(gfh->file_info.jump_offset));
+
+	return 0;
+}
+
+static int mtk_image_verify_nand_header(const uint8_t *ptr, int print)
+{
+	union nand_boot_header *nh = (union nand_boot_header *)ptr;
+	struct brom_layout_header *bh;
+	struct gfh_header *gfh;
+	const char *bootmedia;
+
+	if (strncmp(nh->version, NAND_BOOT_VERSION, sizeof(nh->version)) ||
+	    strcmp(nh->id, NAND_BOOT_ID))
+		return -1;
+
+	bh = (struct brom_layout_header *)(ptr + le16_to_cpu(nh->pagesize));
+
+	if (strcmp(bh->name, BRLYT_NAME))
+		return -1;
+
+	if (le32_to_cpu(bh->magic) != BRLYT_MAGIC) {
+		return -1;
+	} else {
+		if (le32_to_cpu(bh->type) == BRLYT_TYPE_NAND)
+			bootmedia = "Parallel NAND";
+		else if (le32_to_cpu(bh->type) == BRLYT_TYPE_SNAND)
+			bootmedia = "Serial NAND";
+		else
+			return -1;
+	}
+
+	if (print) {
+		printf("Boot Media: %s\n", bootmedia);
+
+		if (le32_to_cpu(bh->type) == BRLYT_TYPE_NAND) {
+			uint64_t capacity =
+				(uint64_t)le16_to_cpu(nh->numblocks) *
+				(uint64_t)le16_to_cpu(nh->pages_of_block) *
+				(uint64_t)le16_to_cpu(nh->pagesize) * 8;
+			printf("Capacity:     %dGb\n",
+			       (uint32_t)(capacity >> 30));
+		}
+
+		if (le16_to_cpu(nh->pagesize) >= 1024)
+			printf("Page Size:    %dKB\n",
+			       le16_to_cpu(nh->pagesize) >> 10);
+		else
+			printf("Page Size:    %dB\n",
+			       le16_to_cpu(nh->pagesize));
+
+		printf("Spare Size:   %dB\n", le16_to_cpu(nh->oobsize));
+	}
+
+	gfh = (struct gfh_header *)(ptr + 2 * le16_to_cpu(nh->pagesize));
+
+	if (strcmp(gfh->file_info.name, GFH_FILE_INFO_NAME))
+		return -1;
+
+	if (le32_to_cpu(gfh->file_info.flash_type) != GFH_FLASH_TYPE_NAND)
+		return -1;
+
+	if (print)
+		printf("Load Address: %08x\n",
+		       le32_to_cpu(gfh->file_info.load_addr) +
+		       le32_to_cpu(gfh->file_info.jump_offset));
+
+	return 0;
+}
+
+static int mtk_image_verify_header(unsigned char *ptr, int image_size,
+				   struct image_tool_params *params)
+{
+	union lk_hdr *lk = (union lk_hdr *)ptr;
+
+	/* nothing to verify for LK image header */
+	if (le32_to_cpu(lk->magic) == LK_PART_MAGIC)
+		return 0;
+
+	if (!strcmp((char *)ptr, NAND_BOOT_NAME))
+		return mtk_image_verify_nand_header(ptr, 0);
+	else
+		return mtk_image_verify_gen_header(ptr, 0);
+
+	return -1;
+}
+
+static void mtk_image_print_header(const void *ptr)
+{
+	union lk_hdr *lk = (union lk_hdr *)ptr;
+
+	if (le32_to_cpu(lk->magic) == LK_PART_MAGIC) {
+		printf("Image Type:   MediaTek LK Image\n");
+		printf("Load Address: %08x\n", le32_to_cpu(lk->loadaddr));
+		return;
+	}
+
+	printf("Image Type:   MediaTek BootROM Loadable Image\n");
+
+	if (!strcmp((char *)ptr, NAND_BOOT_NAME))
+		mtk_image_verify_nand_header(ptr, 1);
+	else
+		mtk_image_verify_gen_header(ptr, 1);
+}
+
+static void put_brom_layout_header(struct brom_layout_header *hdr, int type)
+{
+	strncpy(hdr->name, BRLYT_NAME, sizeof(hdr->name));
+	hdr->version = cpu_to_le32(1);
+	hdr->magic = cpu_to_le32(BRLYT_MAGIC);
+	hdr->type = cpu_to_le32(type);
+}
+
+static void put_ghf_common_header(struct gfh_common_header *gfh, int size,
+				  int type, int ver)
+{
+	memcpy(gfh->magic, GFH_HEADER_MAGIC, sizeof(gfh->magic));
+	gfh->version = ver;
+	gfh->size = cpu_to_le16(size);
+	gfh->type = cpu_to_le16(type);
+}
+
+static void put_ghf_header(struct gfh_header *gfh, int file_size,
+			   int dev_hdr_size, int load_addr, int flash_type)
+{
+	memset(gfh, 0, sizeof(struct gfh_header));
+
+	/* GFH_FILE_INFO header */
+	put_ghf_common_header(&gfh->file_info.gfh, sizeof(gfh->file_info),
+			      GFH_TYPE_FILE_INFO, 1);
+	strncpy(gfh->file_info.name, GFH_FILE_INFO_NAME,
+		sizeof(gfh->file_info.name));
+	gfh->file_info.unused = cpu_to_le32(1);
+	gfh->file_info.file_type = cpu_to_le16(1);
+	gfh->file_info.flash_type = flash_type;
+	gfh->file_info.sig_type = GFH_SIG_TYPE_SHA256;
+	gfh->file_info.load_addr = cpu_to_le32(load_addr - sizeof(*gfh));
+	gfh->file_info.total_size = cpu_to_le32(file_size - dev_hdr_size);
+	gfh->file_info.max_size = cpu_to_le32(file_size);
+	gfh->file_info.hdr_size = sizeof(*gfh);
+	gfh->file_info.sig_size = SHA256_SUM_LEN;
+	gfh->file_info.jump_offset = sizeof(*gfh);
+	gfh->file_info.processed = cpu_to_le32(1);
+
+	/* GFH_BL_INFO header */
+	put_ghf_common_header(&gfh->bl_info.gfh, sizeof(gfh->bl_info),
+			      GFH_TYPE_BL_INFO, 1);
+	gfh->bl_info.attr = cpu_to_le32(1);
+
+	/* GFH_BROM_CFG header */
+	put_ghf_common_header(&gfh->brom_cfg.gfh, sizeof(gfh->brom_cfg),
+			      GFH_TYPE_BROM_CFG, 3);
+	gfh->brom_cfg.cfg_bits = cpu_to_le32(
+		GFH_BROM_CFG_USBDL_AUTO_DETECT_DIS |
+		GFH_BROM_CFG_USBDL_BY_KCOL0_TIMEOUT_EN |
+		GFH_BROM_CFG_USBDL_BY_FLAG_TIMEOUT_EN);
+	gfh->brom_cfg.usbdl_by_kcol0_timeout_ms = cpu_to_le32(5000);
+
+	/* GFH_BL_SEC_KEY header */
+	put_ghf_common_header(&gfh->bl_sec_key.gfh, sizeof(gfh->bl_sec_key),
+			      GFH_TYPE_BL_SEC_KEY, 1);
+
+	/* GFH_ANTI_CLONE header */
+	put_ghf_common_header(&gfh->anti_clone.gfh, sizeof(gfh->anti_clone),
+			      GFH_TYPE_ANTI_CLONE, 1);
+	gfh->anti_clone.ac_offset = cpu_to_le32(0x10);
+	gfh->anti_clone.ac_len = cpu_to_le32(0x80);
+
+	/* GFH_BROM_SEC_CFG header */
+	put_ghf_common_header(&gfh->brom_sec_cfg.gfh,
+			      sizeof(gfh->brom_sec_cfg),
+			      GFH_TYPE_BROM_SEC_CFG, 1);
+	gfh->brom_sec_cfg.cfg_bits =
+		cpu_to_le32(BROM_SEC_CFG_JTAG_EN | BROM_SEC_CFG_UART_EN);
+}
+
+static void put_hash(uint8_t *buff, int size)
+{
+	sha256_context ctx;
+
+	sha256_starts(&ctx);
+	sha256_update(&ctx, buff, size);
+	sha256_finish(&ctx, buff + size);
+}
+
+static void mtk_image_set_gen_header(void *ptr, off_t filesize,
+				     uint32_t loadaddr)
+{
+	struct gen_device_header *hdr = (struct gen_device_header *)ptr;
+	struct gfh_header *gfh;
+	const char *bootname = NULL;
+
+	if (hdr_media == BRLYT_TYPE_NOR)
+		bootname = SF_BOOT_NAME;
+	else if (hdr_media == BRLYT_TYPE_EMMC)
+		bootname = EMMC_BOOT_NAME;
+	else if (hdr_media == BRLYT_TYPE_SDMMC)
+		bootname = SDMMC_BOOT_NAME;
+
+	/* Generic device header */
+	snprintf(hdr->boot.name, sizeof(hdr->boot.name), "%s", bootname);
+	hdr->boot.version = cpu_to_le32(1);
+	hdr->boot.size = cpu_to_le32(sizeof(hdr->boot));
+
+	/* BRLYT header */
+	put_brom_layout_header(&hdr->brlyt, hdr_media);
+	hdr->brlyt.header_size = cpu_to_le32(sizeof(struct gen_device_header));
+	hdr->brlyt.total_size = cpu_to_le32(filesize);
+	hdr->brlyt.header_size_2 = hdr->brlyt.header_size;
+	hdr->brlyt.total_size_2 = hdr->brlyt.total_size;
+
+	/* GFH header */
+	gfh = (struct gfh_header *)(ptr + sizeof(struct gen_device_header));
+	put_ghf_header(gfh, filesize, sizeof(struct gen_device_header),
+		       loadaddr, GFH_FLASH_TYPE_GEN);
+
+	/* Generate SHA256 hash */
+	put_hash((uint8_t *)gfh,
+		 filesize - sizeof(struct gen_device_header) - SHA256_SUM_LEN);
+}
+
+static void mtk_image_set_nand_header(void *ptr, off_t filesize,
+				      uint32_t loadaddr)
+{
+	union nand_boot_header *nh = (union nand_boot_header *)ptr;
+	struct brom_layout_header *brlyt;
+	struct gfh_header *gfh;
+	uint32_t payload_pages;
+	int i;
+
+	/* NAND device header, repeat 4 times */
+	for (i = 0; i < 4; i++)
+		memcpy(nh + i, hdr_nand, sizeof(union nand_boot_header));
+
+	/* BRLYT header */
+	payload_pages = (filesize + le16_to_cpu(hdr_nand->pagesize) - 1) /
+			le16_to_cpu(hdr_nand->pagesize);
+	brlyt = (struct brom_layout_header *)
+		(ptr + le16_to_cpu(hdr_nand->pagesize));
+	put_brom_layout_header(brlyt, hdr_media);
+	brlyt->header_size = cpu_to_le32(2);
+	brlyt->total_size = cpu_to_le32(payload_pages);
+	brlyt->header_size_2 = brlyt->header_size;
+	brlyt->total_size_2 = brlyt->total_size;
+	brlyt->unused = cpu_to_le32(1);
+
+	/* GFH header */
+	gfh = (struct gfh_header *)(ptr + 2 * le16_to_cpu(hdr_nand->pagesize));
+	put_ghf_header(gfh, filesize, 2 * le16_to_cpu(hdr_nand->pagesize),
+		       loadaddr, GFH_FLASH_TYPE_NAND);
+
+	/* Generate SHA256 hash */
+	put_hash((uint8_t *)gfh,
+		 filesize - 2 * le16_to_cpu(hdr_nand->pagesize) - SHA256_SUM_LEN);
+}
+
+static void mtk_image_set_header(void *ptr, struct stat *sbuf, int ifd,
+				 struct image_tool_params *params)
+{
+	union lk_hdr *lk = (union lk_hdr *)ptr;
+
+	if (use_lk_hdr) {
+		lk->magic = cpu_to_le32(LK_PART_MAGIC);
+		lk->size = cpu_to_le32(sbuf->st_size - sizeof(union lk_hdr));
+		lk->loadaddr = cpu_to_le32(params->addr);
+		lk->mode = 0xffffffff; /* must be non-zero */
+		memset(lk->name, 0, sizeof(lk->name));
+		strncpy(lk->name, lk_name, sizeof(lk->name));
+		return;
+	}
+
+	if (hdr_media == BRLYT_TYPE_NAND || hdr_media == BRLYT_TYPE_SNAND)
+		mtk_image_set_nand_header(ptr, sbuf->st_size, params->addr);
+	else
+		mtk_image_set_gen_header(ptr, sbuf->st_size, params->addr);
+}
+
+U_BOOT_IMAGE_TYPE(
+	mtk_image,
+	"MediaTek BootROM Loadable Image support",
+	0,
+	NULL,
+	mtk_image_check_params,
+	mtk_image_verify_header,
+	mtk_image_print_header,
+	mtk_image_set_header,
+	NULL,
+	mtk_image_check_image_types,
+	NULL,
+	mtk_image_vrec_header
+);
diff --git a/tools/u-boot-tools/mtk_image.h b/tools/u-boot-tools/mtk_image.h
new file mode 100644
index 0000000000000000000000000000000000000000..0a9eab372b96707fcef9722881b2e535c85379fe
--- /dev/null
+++ b/tools/u-boot-tools/mtk_image.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * MediaTek BootROM header definitions
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#ifndef _MTK_IMAGE_H
+#define _MTK_IMAGE_H
+
+/* Device header definitions */
+
+/* Header for NOR/SD/eMMC */
+union gen_boot_header {
+	struct {
+		char name[12];
+		__le32 version;
+		__le32 size;
+	};
+
+	uint8_t pad[0x200];
+};
+
+#define EMMC_BOOT_NAME		"EMMC_BOOT"
+#define SF_BOOT_NAME		"SF_BOOT"
+#define SDMMC_BOOT_NAME		"SDMMC_BOOT"
+
+/* Header for NAND */
+union nand_boot_header {
+	struct {
+		char name[12];
+		char version[4];
+		char id[8];
+		__le16 ioif;
+		__le16 pagesize;
+		__le16 addrcycles;
+		__le16 oobsize;
+		__le16 pages_of_block;
+		__le16 numblocks;
+		__le16 writesize_shift;
+		__le16 erasesize_shift;
+		uint8_t dummy[60];
+		uint8_t ecc_parity[28];
+	};
+
+	uint8_t data[0x80];
+};
+
+#define NAND_BOOT_NAME		"BOOTLOADER!"
+#define NAND_BOOT_VERSION	"V006"
+#define NAND_BOOT_ID		"NFIINFO"
+
+/* BootROM layout header */
+struct brom_layout_header {
+	char name[8];
+	__le32 version;
+	__le32 header_size;
+	__le32 total_size;
+	__le32 magic;
+	__le32 type;
+	__le32 header_size_2;
+	__le32 total_size_2;
+	__le32 unused;
+};
+
+#define BRLYT_NAME		"BRLYT"
+#define BRLYT_MAGIC		0x42424242
+
+enum brlyt_img_type {
+	BRLYT_TYPE_INVALID = 0,
+	BRLYT_TYPE_NAND = 0x10002,
+	BRLYT_TYPE_EMMC = 0x10005,
+	BRLYT_TYPE_NOR = 0x10007,
+	BRLYT_TYPE_SDMMC = 0x10008,
+	BRLYT_TYPE_SNAND = 0x10009
+};
+
+/* Combined device header for NOR/SD/eMMC */
+struct gen_device_header {
+	union gen_boot_header boot;
+
+	union {
+		struct brom_layout_header brlyt;
+		uint8_t brlyt_pad[0x400];
+	};
+};
+
+/* BootROM header definitions */
+struct gfh_common_header {
+	uint8_t magic[3];
+	uint8_t version;
+	__le16 size;
+	__le16 type;
+};
+
+#define GFH_HEADER_MAGIC	"MMM"
+
+#define GFH_TYPE_FILE_INFO	0
+#define GFH_TYPE_BL_INFO	1
+#define GFH_TYPE_BROM_CFG	7
+#define GFH_TYPE_BL_SEC_KEY	3
+#define GFH_TYPE_ANTI_CLONE	2
+#define GFH_TYPE_BROM_SEC_CFG	8
+
+struct gfh_file_info {
+	struct gfh_common_header gfh;
+	char name[12];
+	__le32 unused;
+	__le16 file_type;
+	uint8_t flash_type;
+	uint8_t sig_type;
+	__le32 load_addr;
+	__le32 total_size;
+	__le32 max_size;
+	__le32 hdr_size;
+	__le32 sig_size;
+	__le32 jump_offset;
+	__le32 processed;
+};
+
+#define GFH_FILE_INFO_NAME	"FILE_INFO"
+
+#define GFH_FLASH_TYPE_GEN	5
+#define GFH_FLASH_TYPE_NAND	2
+
+#define GFH_SIG_TYPE_NONE	0
+#define GFH_SIG_TYPE_SHA256	1
+
+struct gfh_bl_info {
+	struct gfh_common_header gfh;
+	__le32 attr;
+};
+
+struct gfh_brom_cfg {
+	struct gfh_common_header gfh;
+	__le32 cfg_bits;
+	__le32 usbdl_by_auto_detect_timeout_ms;
+	uint8_t unused[0x48];
+	__le32 usbdl_by_kcol0_timeout_ms;
+	__le32 usbdl_by_flag_timeout_ms;
+	uint32_t pad;
+};
+
+#define GFH_BROM_CFG_USBDL_BY_AUTO_DETECT_TIMEOUT_EN	0x02
+#define GFH_BROM_CFG_USBDL_AUTO_DETECT_DIS		0x10
+#define GFH_BROM_CFG_USBDL_BY_KCOL0_TIMEOUT_EN		0x80
+#define GFH_BROM_CFG_USBDL_BY_FLAG_TIMEOUT_EN		0x100
+
+struct gfh_bl_sec_key {
+	struct gfh_common_header gfh;
+	uint8_t pad[0x20c];
+};
+
+struct gfh_anti_clone {
+	struct gfh_common_header gfh;
+	uint8_t ac_b2k;
+	uint8_t ac_b2c;
+	uint16_t pad;
+	__le32 ac_offset;
+	__le32 ac_len;
+};
+
+struct gfh_brom_sec_cfg {
+	struct gfh_common_header gfh;
+	__le32 cfg_bits;
+	char customer_name[0x20];
+	__le32 pad;
+};
+
+#define BROM_SEC_CFG_JTAG_EN	1
+#define BROM_SEC_CFG_UART_EN	2
+
+struct gfh_header {
+	struct gfh_file_info file_info;
+	struct gfh_bl_info bl_info;
+	struct gfh_brom_cfg brom_cfg;
+	struct gfh_bl_sec_key bl_sec_key;
+	struct gfh_anti_clone anti_clone;
+	struct gfh_brom_sec_cfg brom_sec_cfg;
+};
+
+/* LK image header */
+
+union lk_hdr {
+	struct {
+		__le32 magic;
+		__le32 size;
+		char name[32];
+		__le32 loadaddr;
+		__le32 mode;
+	};
+
+	uint8_t data[512];
+};
+
+#define LK_PART_MAGIC		0x58881688
+
+#endif /* _MTK_IMAGE_H */
diff --git a/tools/u-boot-tools/mtk_image.o b/tools/u-boot-tools/mtk_image.o
new file mode 100644
index 0000000000000000000000000000000000000000..8fa227658411f02bbb42a3cbe2fdbcf9a6fb17a5
Binary files /dev/null and b/tools/u-boot-tools/mtk_image.o differ
diff --git a/tools/u-boot-tools/mxsboot.c b/tools/u-boot-tools/mxsboot.c
new file mode 100644
index 0000000000000000000000000000000000000000..04d86f87a86e5050d2e66de92dedd49af1f1ee02
--- /dev/null
+++ b/tools/u-boot-tools/mxsboot.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale i.MX28 image generator
+ *
+ * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
+ * on behalf of DENX Software Engineering GmbH
+ */
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "compiler.h"
+
+/* Taken from <linux/kernel.h> */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+/*
+ * Default BCB layout.
+ *
+ * TWEAK this if you have blown any OCOTP fuses.
+ */
+#define	STRIDE_PAGES		64
+#define	STRIDE_COUNT		4
+
+/*
+ * Layout for 256Mb big NAND with 2048b page size, 64b OOB size and
+ * 128kb erase size.
+ *
+ * TWEAK this if you have different kind of NAND chip.
+ */
+static uint32_t nand_writesize = 2048;
+static uint32_t nand_oobsize = 64;
+static uint32_t nand_erasesize = 128 * 1024;
+
+/*
+ * Sector on which the SigmaTel boot partition (0x53) starts.
+ */
+static uint32_t sd_sector = 2048;
+
+/*
+ * Each of the U-Boot bootstreams is at maximum 1MB big.
+ *
+ * TWEAK this if, for some wild reason, you need to boot bigger image.
+ */
+#define	MAX_BOOTSTREAM_SIZE	(1 * 1024 * 1024)
+
+/* i.MX28 NAND controller-specific constants. DO NOT TWEAK! */
+#define	MXS_NAND_DMA_DESCRIPTOR_COUNT		4
+#define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE		512
+#define	MXS_NAND_METADATA_SIZE			10
+#define	MXS_NAND_BITS_PER_ECC_LEVEL		13
+#define	MXS_NAND_COMMAND_BUFFER_SIZE		32
+
+struct mx28_nand_fcb {
+	uint32_t		checksum;
+	uint32_t		fingerprint;
+	uint32_t		version;
+	struct {
+		uint8_t			data_setup;
+		uint8_t			data_hold;
+		uint8_t			address_setup;
+		uint8_t			dsample_time;
+		uint8_t			nand_timing_state;
+		uint8_t			rea;
+		uint8_t			rloh;
+		uint8_t			rhoh;
+	}			timing;
+	uint32_t		page_data_size;
+	uint32_t		total_page_size;
+	uint32_t		sectors_per_block;
+	uint32_t		number_of_nands;		/* Ignored */
+	uint32_t		total_internal_die;		/* Ignored */
+	uint32_t		cell_type;			/* Ignored */
+	uint32_t		ecc_block_n_ecc_type;
+	uint32_t		ecc_block_0_size;
+	uint32_t		ecc_block_n_size;
+	uint32_t		ecc_block_0_ecc_type;
+	uint32_t		metadata_bytes;
+	uint32_t		num_ecc_blocks_per_page;
+	uint32_t		ecc_block_n_ecc_level_sdk;	/* Ignored */
+	uint32_t		ecc_block_0_size_sdk;		/* Ignored */
+	uint32_t		ecc_block_n_size_sdk;		/* Ignored */
+	uint32_t		ecc_block_0_ecc_level_sdk;	/* Ignored */
+	uint32_t		num_ecc_blocks_per_page_sdk;	/* Ignored */
+	uint32_t		metadata_bytes_sdk;		/* Ignored */
+	uint32_t		erase_threshold;
+	uint32_t		boot_patch;
+	uint32_t		patch_sectors;
+	uint32_t		firmware1_starting_sector;
+	uint32_t		firmware2_starting_sector;
+	uint32_t		sectors_in_firmware1;
+	uint32_t		sectors_in_firmware2;
+	uint32_t		dbbt_search_area_start_address;
+	uint32_t		badblock_marker_byte;
+	uint32_t		badblock_marker_start_bit;
+	uint32_t		bb_marker_physical_offset;
+};
+
+struct mx28_nand_dbbt {
+	uint32_t		checksum;
+	uint32_t		fingerprint;
+	uint32_t		version;
+	uint32_t		number_bb;
+	uint32_t		number_2k_pages_bb;
+};
+
+struct mx28_nand_bbt {
+	uint32_t		nand;
+	uint32_t		number_bb;
+	uint32_t		badblock[510];
+};
+
+struct mx28_sd_drive_info {
+	uint32_t		chip_num;
+	uint32_t		drive_type;
+	uint32_t		tag;
+	uint32_t		first_sector_number;
+	uint32_t		sector_count;
+};
+
+struct mx28_sd_config_block {
+	uint32_t			signature;
+	uint32_t			primary_boot_tag;
+	uint32_t			secondary_boot_tag;
+	uint32_t			num_copies;
+	struct mx28_sd_drive_info	drv_info[1];
+};
+
+static inline uint32_t mx28_nand_ecc_chunk_cnt(uint32_t page_data_size)
+{
+	return page_data_size / MXS_NAND_CHUNK_DATA_CHUNK_SIZE;
+}
+
+static inline uint32_t mx28_nand_ecc_size_in_bits(uint32_t ecc_strength)
+{
+	return ecc_strength * MXS_NAND_BITS_PER_ECC_LEVEL;
+}
+
+static inline uint32_t mx28_nand_get_ecc_strength(uint32_t page_data_size,
+						uint32_t page_oob_size)
+{
+	int ecc_strength;
+
+	/*
+	 * Determine the ECC layout with the formula:
+	 *	ECC bits per chunk = (total page spare data bits) /
+	 *		(bits per ECC level) / (chunks per page)
+	 * where:
+	 *	total page spare data bits =
+	 *		(page oob size - meta data size) * (bits per byte)
+	 */
+	ecc_strength = ((page_oob_size - MXS_NAND_METADATA_SIZE) * 8)
+			/ (MXS_NAND_BITS_PER_ECC_LEVEL *
+				mx28_nand_ecc_chunk_cnt(page_data_size));
+
+	return round_down(ecc_strength, 2);
+}
+
+static inline uint32_t mx28_nand_get_mark_offset(uint32_t page_data_size,
+						uint32_t ecc_strength)
+{
+	uint32_t chunk_data_size_in_bits;
+	uint32_t chunk_ecc_size_in_bits;
+	uint32_t chunk_total_size_in_bits;
+	uint32_t block_mark_chunk_number;
+	uint32_t block_mark_chunk_bit_offset;
+	uint32_t block_mark_bit_offset;
+
+	chunk_data_size_in_bits = MXS_NAND_CHUNK_DATA_CHUNK_SIZE * 8;
+	chunk_ecc_size_in_bits  = mx28_nand_ecc_size_in_bits(ecc_strength);
+
+	chunk_total_size_in_bits =
+			chunk_data_size_in_bits + chunk_ecc_size_in_bits;
+
+	/* Compute the bit offset of the block mark within the physical page. */
+	block_mark_bit_offset = page_data_size * 8;
+
+	/* Subtract the metadata bits. */
+	block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8;
+
+	/*
+	 * Compute the chunk number (starting at zero) in which the block mark
+	 * appears.
+	 */
+	block_mark_chunk_number =
+			block_mark_bit_offset / chunk_total_size_in_bits;
+
+	/*
+	 * Compute the bit offset of the block mark within its chunk, and
+	 * validate it.
+	 */
+	block_mark_chunk_bit_offset = block_mark_bit_offset -
+			(block_mark_chunk_number * chunk_total_size_in_bits);
+
+	if (block_mark_chunk_bit_offset > chunk_data_size_in_bits)
+		return 1;
+
+	/*
+	 * Now that we know the chunk number in which the block mark appears,
+	 * we can subtract all the ECC bits that appear before it.
+	 */
+	block_mark_bit_offset -=
+		block_mark_chunk_number * chunk_ecc_size_in_bits;
+
+	return block_mark_bit_offset;
+}
+
+static inline uint32_t mx28_nand_mark_byte_offset(void)
+{
+	uint32_t ecc_strength;
+	ecc_strength = mx28_nand_get_ecc_strength(nand_writesize, nand_oobsize);
+	return mx28_nand_get_mark_offset(nand_writesize, ecc_strength) >> 3;
+}
+
+static inline uint32_t mx28_nand_mark_bit_offset(void)
+{
+	uint32_t ecc_strength;
+	ecc_strength = mx28_nand_get_ecc_strength(nand_writesize, nand_oobsize);
+	return mx28_nand_get_mark_offset(nand_writesize, ecc_strength) & 0x7;
+}
+
+static uint32_t mx28_nand_block_csum(uint8_t *block, uint32_t size)
+{
+	uint32_t csum = 0;
+	int i;
+
+	for (i = 0; i < size; i++)
+		csum += block[i];
+
+	return csum ^ 0xffffffff;
+}
+
+static struct mx28_nand_fcb *mx28_nand_get_fcb(uint32_t size)
+{
+	struct mx28_nand_fcb *fcb;
+	uint32_t bcb_size_bytes;
+	uint32_t stride_size_bytes;
+	uint32_t bootstream_size_pages;
+	uint32_t fw1_start_page;
+	uint32_t fw2_start_page;
+
+	fcb = malloc(nand_writesize);
+	if (!fcb) {
+		printf("MX28 NAND: Unable to allocate FCB\n");
+		return NULL;
+	}
+
+	memset(fcb, 0, nand_writesize);
+
+	fcb->fingerprint =			0x20424346;
+	fcb->version =				0x01000000;
+
+	/*
+	 * FIXME: These here are default values as found in kobs-ng. We should
+	 * probably retrieve the data from NAND or something.
+	 */
+	fcb->timing.data_setup =		80;
+	fcb->timing.data_hold =			60;
+	fcb->timing.address_setup =		25;
+	fcb->timing.dsample_time =		6;
+
+	fcb->page_data_size =		nand_writesize;
+	fcb->total_page_size =		nand_writesize + nand_oobsize;
+	fcb->sectors_per_block =	nand_erasesize / nand_writesize;
+
+	fcb->num_ecc_blocks_per_page =	(nand_writesize / 512) - 1;
+	fcb->ecc_block_0_size =		512;
+	fcb->ecc_block_n_size =		512;
+	fcb->metadata_bytes =		10;
+	fcb->ecc_block_n_ecc_type = mx28_nand_get_ecc_strength(
+					nand_writesize, nand_oobsize) >> 1;
+	fcb->ecc_block_0_ecc_type = mx28_nand_get_ecc_strength(
+					nand_writesize, nand_oobsize) >> 1;
+	if (fcb->ecc_block_n_ecc_type == 0) {
+		printf("MX28 NAND: Unsupported NAND geometry\n");
+		goto err;
+	}
+
+	fcb->boot_patch =			0;
+	fcb->patch_sectors =			0;
+
+	fcb->badblock_marker_byte =	mx28_nand_mark_byte_offset();
+	fcb->badblock_marker_start_bit = mx28_nand_mark_bit_offset();
+	fcb->bb_marker_physical_offset = nand_writesize;
+
+	stride_size_bytes = STRIDE_PAGES * nand_writesize;
+	bcb_size_bytes = stride_size_bytes * STRIDE_COUNT;
+
+	bootstream_size_pages = (size + (nand_writesize - 1)) /
+					nand_writesize;
+
+	fw1_start_page = 2 * bcb_size_bytes / nand_writesize;
+	fw2_start_page = (2 * bcb_size_bytes + MAX_BOOTSTREAM_SIZE) /
+				nand_writesize;
+
+	fcb->firmware1_starting_sector =	fw1_start_page;
+	fcb->firmware2_starting_sector =	fw2_start_page;
+	fcb->sectors_in_firmware1 =		bootstream_size_pages;
+	fcb->sectors_in_firmware2 =		bootstream_size_pages;
+
+	fcb->dbbt_search_area_start_address =	STRIDE_PAGES * STRIDE_COUNT;
+
+	return fcb;
+
+err:
+	free(fcb);
+	return NULL;
+}
+
+static struct mx28_nand_dbbt *mx28_nand_get_dbbt(void)
+{
+	struct mx28_nand_dbbt *dbbt;
+
+	dbbt = malloc(nand_writesize);
+	if (!dbbt) {
+		printf("MX28 NAND: Unable to allocate DBBT\n");
+		return NULL;
+	}
+
+	memset(dbbt, 0, nand_writesize);
+
+	dbbt->fingerprint	= 0x54424244;
+	dbbt->version		= 0x1;
+
+	return dbbt;
+}
+
+static inline uint8_t mx28_nand_parity_13_8(const uint8_t b)
+{
+	uint32_t parity = 0, tmp;
+
+	tmp = ((b >> 6) ^ (b >> 5) ^ (b >> 3) ^ (b >> 2)) & 1;
+	parity |= tmp << 0;
+
+	tmp = ((b >> 7) ^ (b >> 5) ^ (b >> 4) ^ (b >> 2) ^ (b >> 1)) & 1;
+	parity |= tmp << 1;
+
+	tmp = ((b >> 7) ^ (b >> 6) ^ (b >> 5) ^ (b >> 1) ^ (b >> 0)) & 1;
+	parity |= tmp << 2;
+
+	tmp = ((b >> 7) ^ (b >> 4) ^ (b >> 3) ^ (b >> 0)) & 1;
+	parity |= tmp << 3;
+
+	tmp = ((b >> 6) ^ (b >> 4) ^ (b >> 3) ^
+		(b >> 2) ^ (b >> 1) ^ (b >> 0)) & 1;
+	parity |= tmp << 4;
+
+	return parity;
+}
+
+static uint8_t *mx28_nand_fcb_block(struct mx28_nand_fcb *fcb)
+{
+	uint8_t *block;
+	uint8_t *ecc;
+	int i;
+
+	block = malloc(nand_writesize + nand_oobsize);
+	if (!block) {
+		printf("MX28 NAND: Unable to allocate FCB block\n");
+		return NULL;
+	}
+
+	memset(block, 0, nand_writesize + nand_oobsize);
+
+	/* Update the FCB checksum */
+	fcb->checksum = mx28_nand_block_csum(((uint8_t *)fcb) + 4, 508);
+
+	/* Figure 12-11. in iMX28RM, rev. 1, says FCB is at offset 12 */
+	memcpy(block + 12, fcb, sizeof(struct mx28_nand_fcb));
+
+	/* ECC is at offset 12 + 512 */
+	ecc = block + 12 + 512;
+
+	/* Compute the ECC parity */
+	for (i = 0; i < sizeof(struct mx28_nand_fcb); i++)
+		ecc[i] = mx28_nand_parity_13_8(block[i + 12]);
+
+	return block;
+}
+
+static int mx28_nand_write_fcb(struct mx28_nand_fcb *fcb, uint8_t *buf)
+{
+	uint32_t offset;
+	uint8_t *fcbblock;
+	int ret = 0;
+	int i;
+
+	fcbblock = mx28_nand_fcb_block(fcb);
+	if (!fcbblock)
+		return -1;
+
+	for (i = 0; i < STRIDE_PAGES * STRIDE_COUNT; i += STRIDE_PAGES) {
+		offset = i * nand_writesize;
+		memcpy(buf + offset, fcbblock, nand_writesize + nand_oobsize);
+		/* Mark the NAND page is OK. */
+		buf[offset + nand_writesize] = 0xff;
+	}
+
+	free(fcbblock);
+	return ret;
+}
+
+static int mx28_nand_write_dbbt(struct mx28_nand_dbbt *dbbt, uint8_t *buf)
+{
+	uint32_t offset;
+	int i = STRIDE_PAGES * STRIDE_COUNT;
+
+	for (; i < 2 * STRIDE_PAGES * STRIDE_COUNT; i += STRIDE_PAGES) {
+		offset = i * nand_writesize;
+		memcpy(buf + offset, dbbt, sizeof(struct mx28_nand_dbbt));
+	}
+
+	return 0;
+}
+
+static int mx28_nand_write_firmware(struct mx28_nand_fcb *fcb, int infd,
+				    uint8_t *buf)
+{
+	int ret;
+	off_t size;
+	uint32_t offset1, offset2;
+
+	size = lseek(infd, 0, SEEK_END);
+	lseek(infd, 0, SEEK_SET);
+
+	offset1 = fcb->firmware1_starting_sector * nand_writesize;
+	offset2 = fcb->firmware2_starting_sector * nand_writesize;
+
+	ret = read(infd, buf + offset1, size);
+	if (ret != size)
+		return -1;
+
+	memcpy(buf + offset2, buf + offset1, size);
+
+	return 0;
+}
+
+static void usage(void)
+{
+	printf(
+		"Usage: mxsboot [ops] <type> <infile> <outfile>\n"
+		"Augment BootStream file with a proper header for i.MX28 boot\n"
+		"\n"
+		"  <type>	type of image:\n"
+		"                 \"nand\" for NAND image\n"
+		"                 \"sd\" for SD image\n"
+		"  <infile>     input file, the u-boot.sb bootstream\n"
+		"  <outfile>    output file, the bootable image\n"
+		"\n");
+	printf(
+		"For NAND boot, these options are accepted:\n"
+		"  -w <size>    NAND page size\n"
+		"  -o <size>    NAND OOB size\n"
+		"  -e <size>    NAND erase size\n"
+		"\n"
+		"For SD boot, these options are accepted:\n"
+		"  -p <sector>  Sector where the SGTL partition starts\n"
+	);
+}
+
+static int mx28_create_nand_image(int infd, int outfd)
+{
+	struct mx28_nand_fcb *fcb;
+	struct mx28_nand_dbbt *dbbt;
+	int ret = -1;
+	uint8_t *buf;
+	int size;
+	ssize_t wr_size;
+
+	size = nand_writesize * 512 + 2 * MAX_BOOTSTREAM_SIZE;
+
+	buf = malloc(size);
+	if (!buf) {
+		printf("Can not allocate output buffer of %d bytes\n", size);
+		goto err0;
+	}
+
+	memset(buf, 0, size);
+
+	fcb = mx28_nand_get_fcb(MAX_BOOTSTREAM_SIZE);
+	if (!fcb) {
+		printf("Unable to compile FCB\n");
+		goto err1;
+	}
+
+	dbbt = mx28_nand_get_dbbt();
+	if (!dbbt) {
+		printf("Unable to compile DBBT\n");
+		goto err2;
+	}
+
+	ret = mx28_nand_write_fcb(fcb, buf);
+	if (ret) {
+		printf("Unable to write FCB to buffer\n");
+		goto err3;
+	}
+
+	ret = mx28_nand_write_dbbt(dbbt, buf);
+	if (ret) {
+		printf("Unable to write DBBT to buffer\n");
+		goto err3;
+	}
+
+	ret = mx28_nand_write_firmware(fcb, infd, buf);
+	if (ret) {
+		printf("Unable to write firmware to buffer\n");
+		goto err3;
+	}
+
+	wr_size = write(outfd, buf, size);
+	if (wr_size != size) {
+		ret = -1;
+		goto err3;
+	}
+
+	ret = 0;
+
+err3:
+	free(dbbt);
+err2:
+	free(fcb);
+err1:
+	free(buf);
+err0:
+	return ret;
+}
+
+static int mx28_create_sd_image(int infd, int outfd)
+{
+	int ret = -1;
+	uint32_t *buf;
+	int size;
+	off_t fsize;
+	ssize_t wr_size;
+	struct mx28_sd_config_block *cb;
+
+	fsize = lseek(infd, 0, SEEK_END);
+	lseek(infd, 0, SEEK_SET);
+	size = fsize + 4 * 512;
+
+	buf = malloc(size);
+	if (!buf) {
+		printf("Can not allocate output buffer of %d bytes\n", size);
+		goto err0;
+	}
+
+	ret = read(infd, (uint8_t *)buf + 4 * 512, fsize);
+	if (ret != fsize) {
+		ret = -1;
+		goto err1;
+	}
+
+	cb = (struct mx28_sd_config_block *)buf;
+
+	cb->signature = cpu_to_le32(0x00112233);
+	cb->primary_boot_tag = cpu_to_le32(0x1);
+	cb->secondary_boot_tag = cpu_to_le32(0x1);
+	cb->num_copies = cpu_to_le32(1);
+	cb->drv_info[0].chip_num = cpu_to_le32(0x0);
+	cb->drv_info[0].drive_type = cpu_to_le32(0x0);
+	cb->drv_info[0].tag = cpu_to_le32(0x1);
+	cb->drv_info[0].first_sector_number = cpu_to_le32(sd_sector + 4);
+	cb->drv_info[0].sector_count = cpu_to_le32((size - 4) / 512);
+
+	wr_size = write(outfd, buf, size);
+	if (wr_size != size) {
+		ret = -1;
+		goto err1;
+	}
+
+	ret = 0;
+
+err1:
+	free(buf);
+err0:
+	return ret;
+}
+
+static int parse_ops(int argc, char **argv)
+{
+	int i;
+	int tmp;
+	char *end;
+	enum param {
+		PARAM_WRITE,
+		PARAM_OOB,
+		PARAM_ERASE,
+		PARAM_PART,
+		PARAM_SD,
+		PARAM_NAND
+	};
+	int type;
+
+	if (argc < 4)
+		return -1;
+
+	for (i = 1; i < argc; i++) {
+		if (!strncmp(argv[i], "-w", 2))
+			type = PARAM_WRITE;
+		else if (!strncmp(argv[i], "-o", 2))
+			type = PARAM_OOB;
+		else if (!strncmp(argv[i], "-e", 2))
+			type = PARAM_ERASE;
+		else if (!strncmp(argv[i], "-p", 2))
+			type = PARAM_PART;
+		else	/* SD/MMC */
+			break;
+
+		tmp = strtol(argv[++i], &end, 10);
+		if (tmp % 2)
+			return -1;
+		if (tmp <= 0)
+			return -1;
+
+		if (type == PARAM_WRITE)
+			nand_writesize = tmp;
+		if (type == PARAM_OOB)
+			nand_oobsize = tmp;
+		if (type == PARAM_ERASE)
+			nand_erasesize = tmp;
+		if (type == PARAM_PART)
+			sd_sector = tmp;
+	}
+
+	if (strcmp(argv[i], "sd") && strcmp(argv[i], "nand"))
+		return -1;
+
+	if (i + 3 != argc)
+		return -1;
+
+	return i;
+}
+
+int main(int argc, char **argv)
+{
+	int infd, outfd;
+	int ret = 0;
+	int offset;
+
+	offset = parse_ops(argc, argv);
+	if (offset < 0) {
+		usage();
+		ret = 1;
+		goto err1;
+	}
+
+	infd = open(argv[offset + 1], O_RDONLY);
+	if (infd < 0) {
+		printf("Input BootStream file can not be opened\n");
+		ret = 2;
+		goto err1;
+	}
+
+	outfd = open(argv[offset + 2], O_CREAT | O_TRUNC | O_WRONLY,
+					S_IRUSR | S_IWUSR);
+	if (outfd < 0) {
+		printf("Output file can not be created\n");
+		ret = 3;
+		goto err2;
+	}
+
+	if (!strcmp(argv[offset], "sd"))
+		ret = mx28_create_sd_image(infd, outfd);
+	else if (!strcmp(argv[offset], "nand"))
+		ret = mx28_create_nand_image(infd, outfd);
+
+	close(outfd);
+err2:
+	close(infd);
+err1:
+	return ret;
+}
diff --git a/tools/u-boot-tools/mxsimage.c b/tools/u-boot-tools/mxsimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..0bb5c6aa6b994b5189d2fc4f9a2e20bcc7004061
--- /dev/null
+++ b/tools/u-boot-tools/mxsimage.c
@@ -0,0 +1,2365 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale i.MX23/i.MX28 SB image generator
+ *
+ * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de>
+ */
+
+#ifdef CONFIG_MXS
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+
+#include <openssl/evp.h>
+
+#include "imagetool.h"
+#include "mxsimage.h"
+#include "pbl_crc32.h"
+#include <image.h>
+
+/*
+ * OpenSSL 1.1.0 and newer compatibility functions:
+ * https://wiki.openssl.org/index.php/1.1_API_Changes
+ */
+#if OPENSSL_VERSION_NUMBER < 0x10100000L || \
+    (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000fL)
+static void *OPENSSL_zalloc(size_t num)
+{
+	void *ret = OPENSSL_malloc(num);
+
+	if (ret != NULL)
+		memset(ret, 0, num);
+	return ret;
+}
+
+EVP_MD_CTX *EVP_MD_CTX_new(void)
+{
+	return OPENSSL_zalloc(sizeof(EVP_MD_CTX));
+}
+
+void EVP_MD_CTX_free(EVP_MD_CTX *ctx)
+{
+	EVP_MD_CTX_cleanup(ctx);
+	OPENSSL_free(ctx);
+}
+
+int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx)
+{
+	return EVP_CIPHER_CTX_cleanup(ctx);
+}
+#endif
+
+/*
+ * DCD block
+ * |-Write to address command block
+ * |  0xf00 == 0xf33d
+ * |  0xba2 == 0xb33f
+ * |-ORR address with mask command block
+ * |  0xf00 |= 0x1337
+ * |-Write to address command block
+ * |  0xba2 == 0xd00d
+ * :
+ */
+#define SB_HAB_DCD_WRITE	0xccUL
+#define SB_HAB_DCD_CHECK	0xcfUL
+#define SB_HAB_DCD_NOOP		0xc0UL
+#define SB_HAB_DCD_MASK_BIT	(1 << 3)
+#define SB_HAB_DCD_SET_BIT	(1 << 4)
+
+/* Addr.n = Value.n */
+#define	SB_DCD_WRITE	\
+	(SB_HAB_DCD_WRITE << 24)
+/* Addr.n &= ~Value.n */
+#define	SB_DCD_ANDC	\
+	((SB_HAB_DCD_WRITE << 24) | SB_HAB_DCD_SET_BIT)
+/* Addr.n |= Value.n */
+#define	SB_DCD_ORR	\
+	((SB_HAB_DCD_WRITE << 24) | SB_HAB_DCD_SET_BIT | SB_HAB_DCD_MASK_BIT)
+/* (Addr.n & Value.n) == 0 */
+#define	SB_DCD_CHK_EQZ	\
+	(SB_HAB_DCD_CHECK << 24)
+/* (Addr.n & Value.n) == Value.n */
+#define	SB_DCD_CHK_EQ	\
+	((SB_HAB_DCD_CHECK << 24) | SB_HAB_DCD_SET_BIT)
+/* (Addr.n & Value.n) != Value.n */
+#define	SB_DCD_CHK_NEQ	\
+	((SB_HAB_DCD_CHECK << 24) | SB_HAB_DCD_MASK_BIT)
+/* (Addr.n & Value.n) != 0 */
+#define	SB_DCD_CHK_NEZ	\
+	((SB_HAB_DCD_CHECK << 24) | SB_HAB_DCD_SET_BIT | SB_HAB_DCD_MASK_BIT)
+/* NOP */
+#define	SB_DCD_NOOP	\
+	(SB_HAB_DCD_NOOP << 24)
+
+struct sb_dcd_ctx {
+	struct sb_dcd_ctx		*dcd;
+
+	uint32_t			id;
+
+	/* The DCD block. */
+	uint32_t			*payload;
+	/* Size of the whole DCD block. */
+	uint32_t			size;
+
+	/* Pointer to previous DCD command block. */
+	uint32_t			*prev_dcd_head;
+};
+
+/*
+ * IMAGE
+ *   |-SECTION
+ *   |    |-CMD
+ *   |    |-CMD
+ *   |    `-CMD
+ *   |-SECTION
+ *   |    |-CMD
+ *   :    :
+ */
+struct sb_cmd_list {
+	char				*cmd;
+	size_t				len;
+	unsigned int			lineno;
+};
+
+struct sb_cmd_ctx {
+	uint32_t			size;
+
+	struct sb_cmd_ctx		*cmd;
+
+	uint8_t				*data;
+	uint32_t			length;
+
+	struct sb_command		payload;
+	struct sb_command		c_payload;
+};
+
+struct sb_section_ctx {
+	uint32_t			size;
+
+	/* Section flags */
+	unsigned int			boot:1;
+
+	struct sb_section_ctx		*sect;
+
+	struct sb_cmd_ctx		*cmd_head;
+	struct sb_cmd_ctx		*cmd_tail;
+
+	struct sb_sections_header	payload;
+};
+
+struct sb_image_ctx {
+	unsigned int			in_section:1;
+	unsigned int			in_dcd:1;
+	/* Image configuration */
+	unsigned int			display_progress:1;
+	unsigned int			silent_dump:1;
+	char				*input_filename;
+	char				*output_filename;
+	char				*cfg_filename;
+	uint8_t				image_key[16];
+
+	/* Number of section in the image */
+	unsigned int			sect_count;
+	/* Bootable section */
+	unsigned int			sect_boot;
+	unsigned int			sect_boot_found:1;
+
+	struct sb_section_ctx		*sect_head;
+	struct sb_section_ctx		*sect_tail;
+
+	struct sb_dcd_ctx		*dcd_head;
+	struct sb_dcd_ctx		*dcd_tail;
+
+	EVP_CIPHER_CTX			*cipher_ctx;
+	EVP_MD_CTX			*md_ctx;
+	uint8_t				digest[32];
+	struct sb_key_dictionary_key	sb_dict_key;
+
+	struct sb_boot_image_header	payload;
+};
+
+/*
+ * Instruction semantics:
+ * NOOP
+ * TAG [LAST]
+ * LOAD       address file
+ * LOAD  IVT  address IVT_entry_point
+ * FILL address pattern length
+ * JUMP [HAB] address [r0_arg]
+ * CALL [HAB] address [r0_arg]
+ * MODE mode
+ *      For i.MX23, mode = USB/I2C/SPI1_FLASH/SPI2_FLASH/NAND_BCH
+ *                         JTAG/SPI3_EEPROM/SD_SSP0/SD_SSP1
+ *      For i.MX28, mode = USB/I2C/SPI2_FLASH/SPI3_FLASH/NAND_BCH
+ *                         JTAG/SPI2_EEPROM/SD_SSP0/SD_SSP1
+ */
+
+/*
+ * AES libcrypto
+ */
+static int sb_aes_init(struct sb_image_ctx *ictx, uint8_t *iv, int enc)
+{
+	EVP_CIPHER_CTX *ctx;
+	int ret;
+
+	/* If there is no init vector, init vector is all zeroes. */
+	if (!iv)
+		iv = ictx->image_key;
+
+	ctx = EVP_CIPHER_CTX_new();
+	ret = EVP_CipherInit(ctx, EVP_aes_128_cbc(), ictx->image_key, iv, enc);
+	if (ret == 1) {
+		EVP_CIPHER_CTX_set_padding(ctx, 0);
+		ictx->cipher_ctx = ctx;
+	}
+	return ret;
+}
+
+static int sb_aes_crypt(struct sb_image_ctx *ictx, uint8_t *in_data,
+			uint8_t *out_data, int in_len)
+{
+	EVP_CIPHER_CTX *ctx = ictx->cipher_ctx;
+	int ret, outlen;
+	uint8_t *outbuf;
+
+	outbuf = malloc(in_len);
+	if (!outbuf)
+		return -ENOMEM;
+	memset(outbuf, 0, sizeof(in_len));
+
+	ret = EVP_CipherUpdate(ctx, outbuf, &outlen, in_data, in_len);
+	if (!ret) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (out_data)
+		memcpy(out_data, outbuf, outlen);
+
+err:
+	free(outbuf);
+	return ret;
+}
+
+static int sb_aes_deinit(EVP_CIPHER_CTX *ctx)
+{
+	return EVP_CIPHER_CTX_reset(ctx);
+}
+
+static int sb_aes_reinit(struct sb_image_ctx *ictx, int enc)
+{
+	int ret;
+	EVP_CIPHER_CTX *ctx = ictx->cipher_ctx;
+	struct sb_boot_image_header *sb_header = &ictx->payload;
+	uint8_t *iv = sb_header->iv;
+
+	ret = sb_aes_deinit(ctx);
+	if (!ret)
+		return ret;
+	return sb_aes_init(ictx, iv, enc);
+}
+
+/*
+ * Debug
+ */
+static void soprintf(struct sb_image_ctx *ictx, const char *fmt, ...)
+{
+	va_list ap;
+
+	if (ictx->silent_dump)
+		return;
+
+	va_start(ap, fmt);
+	vfprintf(stdout, fmt, ap);
+	va_end(ap);
+}
+
+/*
+ * Code
+ */
+static time_t sb_get_timestamp(void)
+{
+	struct tm time_2000 = {
+		.tm_yday	= 1,	/* Jan. 1st */
+		.tm_year	= 100,	/* 2000 */
+	};
+	time_t seconds_to_2000 = mktime(&time_2000);
+	time_t seconds_to_now = time(NULL);
+
+	return seconds_to_now - seconds_to_2000;
+}
+
+static int sb_get_time(time_t time, struct tm *tm)
+{
+	struct tm time_2000 = {
+		.tm_yday	= 1,	/* Jan. 1st */
+		.tm_year	= 0,	/* 1900 */
+	};
+	const time_t seconds_to_2000 = mktime(&time_2000);
+	const time_t seconds_to_now = seconds_to_2000 + time;
+	struct tm *ret;
+	ret = gmtime_r(&seconds_to_now, tm);
+	return ret ? 0 : -EINVAL;
+}
+
+static void sb_encrypt_sb_header(struct sb_image_ctx *ictx)
+{
+	EVP_MD_CTX *md_ctx = ictx->md_ctx;
+	struct sb_boot_image_header *sb_header = &ictx->payload;
+	uint8_t *sb_header_ptr = (uint8_t *)sb_header;
+
+	/* Encrypt the header, compute the digest. */
+	sb_aes_crypt(ictx, sb_header_ptr, NULL, sizeof(*sb_header));
+	EVP_DigestUpdate(md_ctx, sb_header_ptr, sizeof(*sb_header));
+}
+
+static void sb_encrypt_sb_sections_header(struct sb_image_ctx *ictx)
+{
+	EVP_MD_CTX *md_ctx = ictx->md_ctx;
+	struct sb_section_ctx *sctx = ictx->sect_head;
+	struct sb_sections_header *shdr;
+	uint8_t *sb_sections_header_ptr;
+	const int size = sizeof(*shdr);
+
+	while (sctx) {
+		shdr = &sctx->payload;
+		sb_sections_header_ptr = (uint8_t *)shdr;
+
+		sb_aes_crypt(ictx, sb_sections_header_ptr,
+			     ictx->sb_dict_key.cbc_mac, size);
+		EVP_DigestUpdate(md_ctx, sb_sections_header_ptr, size);
+
+		sctx = sctx->sect;
+	};
+}
+
+static void sb_encrypt_key_dictionary_key(struct sb_image_ctx *ictx)
+{
+	EVP_MD_CTX *md_ctx = ictx->md_ctx;
+
+	sb_aes_crypt(ictx, ictx->image_key, ictx->sb_dict_key.key,
+		     sizeof(ictx->sb_dict_key.key));
+	EVP_DigestUpdate(md_ctx, &ictx->sb_dict_key, sizeof(ictx->sb_dict_key));
+}
+
+static void sb_decrypt_key_dictionary_key(struct sb_image_ctx *ictx)
+{
+	EVP_MD_CTX *md_ctx = ictx->md_ctx;
+
+	EVP_DigestUpdate(md_ctx, &ictx->sb_dict_key, sizeof(ictx->sb_dict_key));
+	sb_aes_crypt(ictx, ictx->sb_dict_key.key, ictx->image_key,
+		     sizeof(ictx->sb_dict_key.key));
+}
+
+static void sb_encrypt_tag(struct sb_image_ctx *ictx,
+		struct sb_cmd_ctx *cctx)
+{
+	EVP_MD_CTX *md_ctx = ictx->md_ctx;
+	struct sb_command *cmd = &cctx->payload;
+
+	sb_aes_crypt(ictx, (uint8_t *)cmd,
+		     (uint8_t *)&cctx->c_payload, sizeof(*cmd));
+	EVP_DigestUpdate(md_ctx, &cctx->c_payload, sizeof(*cmd));
+}
+
+static int sb_encrypt_image(struct sb_image_ctx *ictx)
+{
+	/* Start image-wide crypto. */
+	ictx->md_ctx = EVP_MD_CTX_new();
+	EVP_DigestInit(ictx->md_ctx, EVP_sha1());
+
+	/*
+	 * SB image header.
+	 */
+	sb_aes_init(ictx, NULL, 1);
+	sb_encrypt_sb_header(ictx);
+
+	/*
+	 * SB sections header.
+	 */
+	sb_encrypt_sb_sections_header(ictx);
+
+	/*
+	 * Key dictionary.
+	 */
+	sb_aes_reinit(ictx, 1);
+	sb_encrypt_key_dictionary_key(ictx);
+
+	/*
+	 * Section tags.
+	 */
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	struct sb_section_ctx *sctx = ictx->sect_head;
+
+	while (sctx) {
+		cctx = sctx->cmd_head;
+
+		sb_aes_reinit(ictx, 1);
+
+		while (cctx) {
+			ccmd = &cctx->payload;
+
+			sb_encrypt_tag(ictx, cctx);
+
+			if (ccmd->header.tag == ROM_TAG_CMD) {
+				sb_aes_reinit(ictx, 1);
+			} else if (ccmd->header.tag == ROM_LOAD_CMD) {
+				sb_aes_crypt(ictx, cctx->data, cctx->data,
+					     cctx->length);
+				EVP_DigestUpdate(ictx->md_ctx, cctx->data,
+						 cctx->length);
+			}
+
+			cctx = cctx->cmd;
+		}
+
+		sctx = sctx->sect;
+	};
+
+	/*
+	 * Dump the SHA1 of the whole image.
+	 */
+	sb_aes_reinit(ictx, 1);
+
+	EVP_DigestFinal(ictx->md_ctx, ictx->digest, NULL);
+	EVP_MD_CTX_free(ictx->md_ctx);
+	sb_aes_crypt(ictx, ictx->digest, ictx->digest, sizeof(ictx->digest));
+
+	/* Stop the encryption session. */
+	sb_aes_deinit(ictx->cipher_ctx);
+
+	return 0;
+}
+
+static int sb_load_file(struct sb_cmd_ctx *cctx, char *filename)
+{
+	long real_size, roundup_size;
+	uint8_t *data;
+	long ret;
+	unsigned long size;
+	FILE *fp;
+
+	if (!filename) {
+		fprintf(stderr, "ERR: Missing filename!\n");
+		return -EINVAL;
+	}
+
+	fp = fopen(filename, "r");
+	if (!fp)
+		goto err_open;
+
+	ret = fseek(fp, 0, SEEK_END);
+	if (ret < 0)
+		goto err_file;
+
+	real_size = ftell(fp);
+	if (real_size < 0)
+		goto err_file;
+
+	ret = fseek(fp, 0, SEEK_SET);
+	if (ret < 0)
+		goto err_file;
+
+	roundup_size = roundup(real_size, SB_BLOCK_SIZE);
+	data = calloc(1, roundup_size);
+	if (!data)
+		goto err_file;
+
+	size = fread(data, 1, real_size, fp);
+	if (size != (unsigned long)real_size)
+		goto err_alloc;
+
+	cctx->data = data;
+	cctx->length = roundup_size;
+
+	fclose(fp);
+	return 0;
+
+err_alloc:
+	free(data);
+err_file:
+	fclose(fp);
+err_open:
+	fprintf(stderr, "ERR: Failed to load file \"%s\"\n", filename);
+	return -EINVAL;
+}
+
+static uint8_t sb_command_checksum(struct sb_command *inst)
+{
+	uint8_t *inst_ptr = (uint8_t *)inst;
+	uint8_t csum = 0;
+	unsigned int i;
+
+	for (i = 0; i < sizeof(struct sb_command); i++)
+		csum += inst_ptr[i];
+
+	return csum;
+}
+
+static int sb_token_to_long(char *tok, uint32_t *rid)
+{
+	char *endptr;
+	unsigned long id;
+
+	if (tok[0] != '0' || tok[1] != 'x') {
+		fprintf(stderr, "ERR: Invalid hexadecimal number!\n");
+		return -EINVAL;
+	}
+
+	tok += 2;
+
+	errno = 0;
+	id = strtoul(tok, &endptr, 16);
+	if ((errno == ERANGE && id == ULONG_MAX) || (errno != 0 && id == 0)) {
+		fprintf(stderr, "ERR: Value can't be decoded!\n");
+		return -EINVAL;
+	}
+
+	/* Check for 32-bit overflow. */
+	if (id > 0xffffffff) {
+		fprintf(stderr, "ERR: Value too big!\n");
+		return -EINVAL;
+	}
+
+	if (endptr == tok) {
+		fprintf(stderr, "ERR: Deformed value!\n");
+		return -EINVAL;
+	}
+
+	*rid = (uint32_t)id;
+	return 0;
+}
+
+static int sb_grow_dcd(struct sb_dcd_ctx *dctx, unsigned int inc_size)
+{
+	uint32_t *tmp;
+
+	if (!inc_size)
+		return 0;
+
+	dctx->size += inc_size;
+	tmp = realloc(dctx->payload, dctx->size);
+	if (!tmp)
+		return -ENOMEM;
+
+	dctx->payload = tmp;
+
+	/* Assemble and update the HAB DCD header. */
+	dctx->payload[0] = htonl((SB_HAB_DCD_TAG << 24) |
+				 (dctx->size << 8) |
+				 SB_HAB_VERSION);
+
+	return 0;
+}
+
+static int sb_build_dcd(struct sb_image_ctx *ictx, struct sb_cmd_list *cmd)
+{
+	struct sb_dcd_ctx *dctx;
+
+	char *tok;
+	uint32_t id;
+	int ret;
+
+	dctx = calloc(1, sizeof(*dctx));
+	if (!dctx)
+		return -ENOMEM;
+
+	ret = sb_grow_dcd(dctx, 4);
+	if (ret)
+		goto err_dcd;
+
+	/* Read DCD block number. */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: DCD block without number!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err_dcd;
+	}
+
+	/* Parse the DCD block number. */
+	ret = sb_token_to_long(tok, &id);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Malformed DCD block number!\n",
+			cmd->lineno);
+		goto err_dcd;
+	}
+
+	dctx->id = id;
+
+	/*
+	 * The DCD block is now constructed. Append it to the list.
+	 * WARNING: The DCD size is still not computed and will be
+	 * updated while parsing it's commands.
+	 */
+	if (!ictx->dcd_head) {
+		ictx->dcd_head = dctx;
+		ictx->dcd_tail = dctx;
+	} else {
+		ictx->dcd_tail->dcd = dctx;
+		ictx->dcd_tail = dctx;
+	}
+
+	return 0;
+
+err_dcd:
+	free(dctx->payload);
+	free(dctx);
+	return ret;
+}
+
+static int sb_build_dcd_block(struct sb_image_ctx *ictx,
+			      struct sb_cmd_list *cmd,
+			      uint32_t type)
+{
+	char *tok;
+	uint32_t address, value, length;
+	int ret;
+
+	struct sb_dcd_ctx *dctx = ictx->dcd_tail;
+	uint32_t *dcd;
+
+	if (dctx->prev_dcd_head && (type != SB_DCD_NOOP) &&
+	    ((dctx->prev_dcd_head[0] & 0xff0000ff) == type)) {
+		/* Same instruction as before, just append it. */
+		ret = sb_grow_dcd(dctx, 8);
+		if (ret)
+			return ret;
+	} else if (type == SB_DCD_NOOP) {
+		ret = sb_grow_dcd(dctx, 4);
+		if (ret)
+			return ret;
+
+		/* Update DCD command block pointer. */
+		dctx->prev_dcd_head = dctx->payload +
+				dctx->size / sizeof(*dctx->payload) - 1;
+
+		/* NOOP has only 4 bytes and no payload. */
+		goto noop;
+	} else {
+		/*
+		 * Either a different instruction block started now
+		 * or this is the first instruction block.
+		 */
+		ret = sb_grow_dcd(dctx, 12);
+		if (ret)
+			return ret;
+
+		/* Update DCD command block pointer. */
+		dctx->prev_dcd_head = dctx->payload +
+				dctx->size / sizeof(*dctx->payload) - 3;
+	}
+
+	dcd = dctx->payload + dctx->size / sizeof(*dctx->payload) - 2;
+
+	/*
+	 * Prepare the command.
+	 */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing DCD address!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Read DCD destination address. */
+	ret = sb_token_to_long(tok, &address);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect DCD address!\n",
+			cmd->lineno);
+		goto err;
+	}
+
+	tok = strtok(NULL, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing DCD value!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Read DCD operation value. */
+	ret = sb_token_to_long(tok, &value);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect DCD value!\n",
+			cmd->lineno);
+		goto err;
+	}
+
+	/* Fill in the new DCD entry. */
+	dcd[0] = htonl(address);
+	dcd[1] = htonl(value);
+
+noop:
+	/* Update the DCD command block. */
+	length = dctx->size -
+		 ((dctx->prev_dcd_head - dctx->payload) *
+		 sizeof(*dctx->payload));
+	dctx->prev_dcd_head[0] = htonl(type | (length << 8));
+
+err:
+	return ret;
+}
+
+static int sb_build_section(struct sb_image_ctx *ictx, struct sb_cmd_list *cmd)
+{
+	struct sb_section_ctx *sctx;
+	struct sb_sections_header *shdr;
+	char *tok;
+	uint32_t bootable = 0;
+	uint32_t id;
+	int ret;
+
+	sctx = calloc(1, sizeof(*sctx));
+	if (!sctx)
+		return -ENOMEM;
+
+	/* Read section number. */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Section without number!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err_sect;
+	}
+
+	/* Parse the section number. */
+	ret = sb_token_to_long(tok, &id);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Malformed section number!\n",
+			cmd->lineno);
+		goto err_sect;
+	}
+
+	/* Read section's BOOTABLE flag. */
+	tok = strtok(NULL, " ");
+	if (tok && (strlen(tok) == 8) && !strncmp(tok, "BOOTABLE", 8))
+		bootable = SB_SECTION_FLAG_BOOTABLE;
+
+	sctx->boot = bootable;
+
+	shdr = &sctx->payload;
+	shdr->section_number = id;
+	shdr->section_flags = bootable;
+
+	/*
+	 * The section is now constructed. Append it to the list.
+	 * WARNING: The section size is still not computed and will
+	 * be updated while parsing it's commands.
+	 */
+	ictx->sect_count++;
+
+	/* Mark that this section is bootable one. */
+	if (bootable) {
+		if (ictx->sect_boot_found) {
+			fprintf(stderr,
+				"#%i WARN: Multiple bootable section!\n",
+				cmd->lineno);
+		} else {
+			ictx->sect_boot = id;
+			ictx->sect_boot_found = 1;
+		}
+	}
+
+	if (!ictx->sect_head) {
+		ictx->sect_head = sctx;
+		ictx->sect_tail = sctx;
+	} else {
+		ictx->sect_tail->sect = sctx;
+		ictx->sect_tail = sctx;
+	}
+
+	return 0;
+
+err_sect:
+	free(sctx);
+	return ret;
+}
+
+static int sb_build_command_nop(struct sb_image_ctx *ictx)
+{
+	struct sb_section_ctx *sctx = ictx->sect_tail;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+
+	cctx = calloc(1, sizeof(*cctx));
+	if (!cctx)
+		return -ENOMEM;
+
+	ccmd = &cctx->payload;
+
+	/*
+	 * Construct the command.
+	 */
+	ccmd->header.checksum	= 0x5a;
+	ccmd->header.tag	= ROM_NOP_CMD;
+
+	cctx->size = sizeof(*ccmd);
+
+	/*
+	 * Append the command to the last section.
+	 */
+	if (!sctx->cmd_head) {
+		sctx->cmd_head = cctx;
+		sctx->cmd_tail = cctx;
+	} else {
+		sctx->cmd_tail->cmd = cctx;
+		sctx->cmd_tail = cctx;
+	}
+
+	return 0;
+}
+
+static int sb_build_command_tag(struct sb_image_ctx *ictx,
+				struct sb_cmd_list *cmd)
+{
+	struct sb_section_ctx *sctx = ictx->sect_tail;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	char *tok;
+
+	cctx = calloc(1, sizeof(*cctx));
+	if (!cctx)
+		return -ENOMEM;
+
+	ccmd = &cctx->payload;
+
+	/*
+	 * Prepare the command.
+	 */
+	/* Check for the LAST keyword. */
+	tok = strtok(cmd->cmd, " ");
+	if (tok && !strcmp(tok, "LAST"))
+		ccmd->header.flags = ROM_TAG_CMD_FLAG_ROM_LAST_TAG;
+
+	/*
+	 * Construct the command.
+	 */
+	ccmd->header.checksum	= 0x5a;
+	ccmd->header.tag	= ROM_TAG_CMD;
+
+	cctx->size = sizeof(*ccmd);
+
+	/*
+	 * Append the command to the last section.
+	 */
+	if (!sctx->cmd_head) {
+		sctx->cmd_head = cctx;
+		sctx->cmd_tail = cctx;
+	} else {
+		sctx->cmd_tail->cmd = cctx;
+		sctx->cmd_tail = cctx;
+	}
+
+	return 0;
+}
+
+static int sb_build_command_load(struct sb_image_ctx *ictx,
+				 struct sb_cmd_list *cmd)
+{
+	struct sb_section_ctx *sctx = ictx->sect_tail;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	char *tok;
+	int ret, is_ivt = 0, is_dcd = 0;
+	uint32_t dest, dcd = 0;
+
+	cctx = calloc(1, sizeof(*cctx));
+	if (!cctx)
+		return -ENOMEM;
+
+	ccmd = &cctx->payload;
+
+	/*
+	 * Prepare the command.
+	 */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing LOAD address or 'IVT'!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Check for "IVT" flag. */
+	if (!strcmp(tok, "IVT"))
+		is_ivt = 1;
+	if (!strcmp(tok, "DCD"))
+		is_dcd = 1;
+	if (is_ivt || is_dcd) {
+		tok = strtok(NULL, " ");
+		if (!tok) {
+			fprintf(stderr, "#%i ERR: Missing LOAD address!\n",
+				cmd->lineno);
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Read load destination address. */
+	ret = sb_token_to_long(tok, &dest);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect LOAD address!\n",
+			cmd->lineno);
+		goto err;
+	}
+
+	/* Read filename or IVT entrypoint or DCD block ID. */
+	tok = strtok(NULL, " ");
+	if (!tok) {
+		fprintf(stderr,
+			"#%i ERR: Missing LOAD filename or IVT ep or DCD block ID!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (is_ivt) {
+		/* Handle IVT. */
+		struct sb_ivt_header *ivt;
+		uint32_t ivtep;
+		ret = sb_token_to_long(tok, &ivtep);
+
+		if (ret) {
+			fprintf(stderr,
+				"#%i ERR: Incorrect IVT entry point!\n",
+				cmd->lineno);
+			goto err;
+		}
+
+		ivt = calloc(1, sizeof(*ivt));
+		if (!ivt) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		ivt->header = sb_hab_ivt_header();
+		ivt->entry = ivtep;
+		ivt->self = dest;
+
+		cctx->data = (uint8_t *)ivt;
+		cctx->length = sizeof(*ivt);
+	} else if (is_dcd) {
+		struct sb_dcd_ctx *dctx = ictx->dcd_head;
+		uint32_t dcdid;
+		uint8_t *payload;
+		uint32_t asize;
+		ret = sb_token_to_long(tok, &dcdid);
+
+		if (ret) {
+			fprintf(stderr,
+				"#%i ERR: Incorrect DCD block ID!\n",
+				cmd->lineno);
+			goto err;
+		}
+
+		while (dctx) {
+			if (dctx->id == dcdid)
+				break;
+			dctx = dctx->dcd;
+		}
+
+		if (!dctx) {
+			fprintf(stderr, "#%i ERR: DCD block %08x not found!\n",
+				cmd->lineno, dcdid);
+			goto err;
+		}
+
+		asize = roundup(dctx->size, SB_BLOCK_SIZE);
+		payload = calloc(1, asize);
+		if (!payload) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		memcpy(payload, dctx->payload, dctx->size);
+
+		cctx->data = payload;
+		cctx->length = asize;
+
+		/* Set the Load DCD flag. */
+		dcd = ROM_LOAD_CMD_FLAG_DCD_LOAD;
+	} else {
+		/* Regular LOAD of a file. */
+		ret = sb_load_file(cctx, tok);
+		if (ret) {
+			fprintf(stderr, "#%i ERR: Cannot load '%s'!\n",
+				cmd->lineno, tok);
+			goto err;
+		}
+	}
+
+	if (cctx->length & (SB_BLOCK_SIZE - 1)) {
+		fprintf(stderr, "#%i ERR: Unaligned payload!\n",
+			cmd->lineno);
+	}
+
+	/*
+	 * Construct the command.
+	 */
+	ccmd->header.checksum	= 0x5a;
+	ccmd->header.tag	= ROM_LOAD_CMD;
+	ccmd->header.flags	= dcd;
+
+	ccmd->load.address	= dest;
+	ccmd->load.count	= cctx->length;
+	ccmd->load.crc32	= pbl_crc32(0,
+					    (const char *)cctx->data,
+					    cctx->length);
+
+	cctx->size = sizeof(*ccmd) + cctx->length;
+
+	/*
+	 * Append the command to the last section.
+	 */
+	if (!sctx->cmd_head) {
+		sctx->cmd_head = cctx;
+		sctx->cmd_tail = cctx;
+	} else {
+		sctx->cmd_tail->cmd = cctx;
+		sctx->cmd_tail = cctx;
+	}
+
+	return 0;
+
+err:
+	free(cctx);
+	return ret;
+}
+
+static int sb_build_command_fill(struct sb_image_ctx *ictx,
+				 struct sb_cmd_list *cmd)
+{
+	struct sb_section_ctx *sctx = ictx->sect_tail;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	char *tok;
+	uint32_t address, pattern, length;
+	int ret;
+
+	cctx = calloc(1, sizeof(*cctx));
+	if (!cctx)
+		return -ENOMEM;
+
+	ccmd = &cctx->payload;
+
+	/*
+	 * Prepare the command.
+	 */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing FILL address!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Read fill destination address. */
+	ret = sb_token_to_long(tok, &address);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect FILL address!\n",
+			cmd->lineno);
+		goto err;
+	}
+
+	tok = strtok(NULL, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing FILL pattern!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Read fill pattern address. */
+	ret = sb_token_to_long(tok, &pattern);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect FILL pattern!\n",
+			cmd->lineno);
+		goto err;
+	}
+
+	tok = strtok(NULL, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing FILL length!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Read fill pattern address. */
+	ret = sb_token_to_long(tok, &length);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect FILL length!\n",
+			cmd->lineno);
+		goto err;
+	}
+
+	/*
+	 * Construct the command.
+	 */
+	ccmd->header.checksum	= 0x5a;
+	ccmd->header.tag	= ROM_FILL_CMD;
+
+	ccmd->fill.address	= address;
+	ccmd->fill.count	= length;
+	ccmd->fill.pattern	= pattern;
+
+	cctx->size = sizeof(*ccmd);
+
+	/*
+	 * Append the command to the last section.
+	 */
+	if (!sctx->cmd_head) {
+		sctx->cmd_head = cctx;
+		sctx->cmd_tail = cctx;
+	} else {
+		sctx->cmd_tail->cmd = cctx;
+		sctx->cmd_tail = cctx;
+	}
+
+	return 0;
+
+err:
+	free(cctx);
+	return ret;
+}
+
+static int sb_build_command_jump_call(struct sb_image_ctx *ictx,
+				      struct sb_cmd_list *cmd,
+				      unsigned int is_call)
+{
+	struct sb_section_ctx *sctx = ictx->sect_tail;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	char *tok;
+	uint32_t dest, arg = 0x0;
+	uint32_t hab = 0;
+	int ret;
+	const char *cmdname = is_call ? "CALL" : "JUMP";
+
+	cctx = calloc(1, sizeof(*cctx));
+	if (!cctx)
+		return -ENOMEM;
+
+	ccmd = &cctx->payload;
+
+	/*
+	 * Prepare the command.
+	 */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr,
+			"#%i ERR: Missing %s address or 'HAB'!\n",
+			cmd->lineno, cmdname);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Check for "HAB" flag. */
+	if (!strcmp(tok, "HAB")) {
+		hab = is_call ? ROM_CALL_CMD_FLAG_HAB : ROM_JUMP_CMD_FLAG_HAB;
+		tok = strtok(NULL, " ");
+		if (!tok) {
+			fprintf(stderr, "#%i ERR: Missing %s address!\n",
+				cmd->lineno, cmdname);
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+	/* Read load destination address. */
+	ret = sb_token_to_long(tok, &dest);
+	if (ret) {
+		fprintf(stderr, "#%i ERR: Incorrect %s address!\n",
+			cmd->lineno, cmdname);
+		goto err;
+	}
+
+	tok = strtok(NULL, " ");
+	if (tok) {
+		ret = sb_token_to_long(tok, &arg);
+		if (ret) {
+			fprintf(stderr,
+				"#%i ERR: Incorrect %s argument!\n",
+				cmd->lineno, cmdname);
+			goto err;
+		}
+	}
+
+	/*
+	 * Construct the command.
+	 */
+	ccmd->header.checksum	= 0x5a;
+	ccmd->header.tag	= is_call ? ROM_CALL_CMD : ROM_JUMP_CMD;
+	ccmd->header.flags	= hab;
+
+	ccmd->call.address	= dest;
+	ccmd->call.argument	= arg;
+
+	cctx->size = sizeof(*ccmd);
+
+	/*
+	 * Append the command to the last section.
+	 */
+	if (!sctx->cmd_head) {
+		sctx->cmd_head = cctx;
+		sctx->cmd_tail = cctx;
+	} else {
+		sctx->cmd_tail->cmd = cctx;
+		sctx->cmd_tail = cctx;
+	}
+
+	return 0;
+
+err:
+	free(cctx);
+	return ret;
+}
+
+static int sb_build_command_jump(struct sb_image_ctx *ictx,
+				 struct sb_cmd_list *cmd)
+{
+	return sb_build_command_jump_call(ictx, cmd, 0);
+}
+
+static int sb_build_command_call(struct sb_image_ctx *ictx,
+				 struct sb_cmd_list *cmd)
+{
+	return sb_build_command_jump_call(ictx, cmd, 1);
+}
+
+static int sb_build_command_mode(struct sb_image_ctx *ictx,
+				 struct sb_cmd_list *cmd)
+{
+	struct sb_section_ctx *sctx = ictx->sect_tail;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	char *tok;
+	int ret;
+	unsigned int i;
+	uint32_t mode = 0xffffffff;
+
+	cctx = calloc(1, sizeof(*cctx));
+	if (!cctx)
+		return -ENOMEM;
+
+	ccmd = &cctx->payload;
+
+	/*
+	 * Prepare the command.
+	 */
+	tok = strtok(cmd->cmd, " ");
+	if (!tok) {
+		fprintf(stderr, "#%i ERR: Missing MODE boot mode argument!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(modetable); i++) {
+		if (!strcmp(tok, modetable[i].name)) {
+			mode = modetable[i].mode;
+			break;
+		}
+
+		if (!modetable[i].altname)
+			continue;
+
+		if (!strcmp(tok, modetable[i].altname)) {
+			mode = modetable[i].mode;
+			break;
+		}
+	}
+
+	if (mode == 0xffffffff) {
+		fprintf(stderr, "#%i ERR: Invalid MODE boot mode argument!\n",
+			cmd->lineno);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/*
+	 * Construct the command.
+	 */
+	ccmd->header.checksum	= 0x5a;
+	ccmd->header.tag	= ROM_MODE_CMD;
+
+	ccmd->mode.mode		= mode;
+
+	cctx->size = sizeof(*ccmd);
+
+	/*
+	 * Append the command to the last section.
+	 */
+	if (!sctx->cmd_head) {
+		sctx->cmd_head = cctx;
+		sctx->cmd_tail = cctx;
+	} else {
+		sctx->cmd_tail->cmd = cctx;
+		sctx->cmd_tail = cctx;
+	}
+
+	return 0;
+
+err:
+	free(cctx);
+	return ret;
+}
+
+static int sb_prefill_image_header(struct sb_image_ctx *ictx)
+{
+	struct sb_boot_image_header *hdr = &ictx->payload;
+
+	/* Fill signatures */
+	memcpy(hdr->signature1, "STMP", 4);
+	memcpy(hdr->signature2, "sgtl", 4);
+
+	/* SB Image version 1.1 */
+	hdr->major_version = SB_VERSION_MAJOR;
+	hdr->minor_version = SB_VERSION_MINOR;
+
+	/* Boot image major version */
+	hdr->product_version.major = htons(0x999);
+	hdr->product_version.minor = htons(0x999);
+	hdr->product_version.revision = htons(0x999);
+	/* Boot image major version */
+	hdr->component_version.major = htons(0x999);
+	hdr->component_version.minor = htons(0x999);
+	hdr->component_version.revision = htons(0x999);
+
+	/* Drive tag must be 0x0 for i.MX23 */
+	hdr->drive_tag = 0;
+
+	hdr->header_blocks =
+		sizeof(struct sb_boot_image_header) / SB_BLOCK_SIZE;
+	hdr->section_header_size =
+		sizeof(struct sb_sections_header) / SB_BLOCK_SIZE;
+	hdr->timestamp_us = sb_get_timestamp() * 1000000;
+
+	hdr->flags = ictx->display_progress ?
+		SB_IMAGE_FLAG_DISPLAY_PROGRESS : 0;
+
+	/* FIXME -- We support only default key */
+	hdr->key_count = 1;
+
+	return 0;
+}
+
+static int sb_postfill_image_header(struct sb_image_ctx *ictx)
+{
+	struct sb_boot_image_header *hdr = &ictx->payload;
+	struct sb_section_ctx *sctx = ictx->sect_head;
+	uint32_t kd_size, sections_blocks;
+	EVP_MD_CTX *md_ctx;
+
+	/* The main SB header size in blocks. */
+	hdr->image_blocks = hdr->header_blocks;
+
+	/* Size of the key dictionary, which has single zero entry. */
+	kd_size = hdr->key_count * sizeof(struct sb_key_dictionary_key);
+	hdr->image_blocks += kd_size / SB_BLOCK_SIZE;
+
+	/* Now count the payloads. */
+	hdr->section_count = ictx->sect_count;
+	while (sctx) {
+		hdr->image_blocks += sctx->size / SB_BLOCK_SIZE;
+		sctx = sctx->sect;
+	}
+
+	if (!ictx->sect_boot_found) {
+		fprintf(stderr, "ERR: No bootable section selected!\n");
+		return -EINVAL;
+	}
+	hdr->first_boot_section_id = ictx->sect_boot;
+
+	/* The n * SB section size in blocks. */
+	sections_blocks = hdr->section_count * hdr->section_header_size;
+	hdr->image_blocks += sections_blocks;
+
+	/* Key dictionary offset. */
+	hdr->key_dictionary_block = hdr->header_blocks + sections_blocks;
+
+	/* Digest of the whole image. */
+	hdr->image_blocks += 2;
+
+	/* Pointer past the dictionary. */
+	hdr->first_boot_tag_block =
+		hdr->key_dictionary_block + kd_size / SB_BLOCK_SIZE;
+
+	/* Compute header digest. */
+	md_ctx = EVP_MD_CTX_new();
+
+	EVP_DigestInit(md_ctx, EVP_sha1());
+	EVP_DigestUpdate(md_ctx, hdr->signature1,
+			 sizeof(struct sb_boot_image_header) -
+			 sizeof(hdr->digest));
+	EVP_DigestFinal(md_ctx, hdr->digest, NULL);
+	EVP_MD_CTX_free(md_ctx);
+
+	return 0;
+}
+
+static int sb_fixup_sections_and_tags(struct sb_image_ctx *ictx)
+{
+	/* Fixup the placement of sections. */
+	struct sb_boot_image_header *ihdr = &ictx->payload;
+	struct sb_section_ctx *sctx = ictx->sect_head;
+	struct sb_sections_header *shdr;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	uint32_t offset = ihdr->first_boot_tag_block;
+
+	while (sctx) {
+		shdr = &sctx->payload;
+
+		/* Fill in the section TAG offset. */
+		shdr->section_offset = offset + 1;
+		offset += shdr->section_size;
+
+		/* Section length is measured from the TAG block. */
+		shdr->section_size--;
+
+		/* Fixup the TAG command. */
+		cctx = sctx->cmd_head;
+		while (cctx) {
+			ccmd = &cctx->payload;
+			if (ccmd->header.tag == ROM_TAG_CMD) {
+				ccmd->tag.section_number = shdr->section_number;
+				ccmd->tag.section_length = shdr->section_size;
+				ccmd->tag.section_flags = shdr->section_flags;
+			}
+
+			/* Update the command checksum. */
+			ccmd->header.checksum = sb_command_checksum(ccmd);
+
+			cctx = cctx->cmd;
+		}
+
+		sctx = sctx->sect;
+	}
+
+	return 0;
+}
+
+static int sb_parse_line(struct sb_image_ctx *ictx, struct sb_cmd_list *cmd)
+{
+	char *tok;
+	char *line = cmd->cmd;
+	char *rptr = NULL;
+	int ret;
+
+	/* Analyze the identifier on this line first. */
+	tok = strtok_r(line, " ", &rptr);
+	if (!tok || (strlen(tok) == 0)) {
+		fprintf(stderr, "#%i ERR: Invalid line!\n", cmd->lineno);
+		return -EINVAL;
+	}
+
+	cmd->cmd = rptr;
+
+	/* set DISPLAY_PROGRESS flag */
+	if (!strcmp(tok, "DISPLAYPROGRESS")) {
+		ictx->display_progress = 1;
+		return 0;
+	}
+
+	/* DCD */
+	if (!strcmp(tok, "DCD")) {
+		ictx->in_section = 0;
+		ictx->in_dcd = 1;
+		sb_build_dcd(ictx, cmd);
+		return 0;
+	}
+
+	/* Section */
+	if (!strcmp(tok, "SECTION")) {
+		ictx->in_section = 1;
+		ictx->in_dcd = 0;
+		sb_build_section(ictx, cmd);
+		return 0;
+	}
+
+	if (!ictx->in_section && !ictx->in_dcd) {
+		fprintf(stderr, "#%i ERR: Data outside of a section!\n",
+			cmd->lineno);
+		return -EINVAL;
+	}
+
+	if (ictx->in_section) {
+		/* Section commands */
+		if (!strcmp(tok, "NOP")) {
+			ret = sb_build_command_nop(ictx);
+		} else if (!strcmp(tok, "TAG")) {
+			ret = sb_build_command_tag(ictx, cmd);
+		} else if (!strcmp(tok, "LOAD")) {
+			ret = sb_build_command_load(ictx, cmd);
+		} else if (!strcmp(tok, "FILL")) {
+			ret = sb_build_command_fill(ictx, cmd);
+		} else if (!strcmp(tok, "JUMP")) {
+			ret = sb_build_command_jump(ictx, cmd);
+		} else if (!strcmp(tok, "CALL")) {
+			ret = sb_build_command_call(ictx, cmd);
+		} else if (!strcmp(tok, "MODE")) {
+			ret = sb_build_command_mode(ictx, cmd);
+		} else {
+			fprintf(stderr,
+				"#%i ERR: Unsupported instruction '%s'!\n",
+				cmd->lineno, tok);
+			return -ENOTSUP;
+		}
+	} else if (ictx->in_dcd) {
+		char *lptr;
+		uint32_t ilen = '1';
+
+		tok = strtok_r(tok, ".", &lptr);
+		if (!tok || (strlen(tok) == 0) || (lptr && strlen(lptr) != 1)) {
+			fprintf(stderr, "#%i ERR: Invalid line!\n",
+				cmd->lineno);
+			return -EINVAL;
+		}
+
+		if (lptr &&
+		    (lptr[0] != '1' && lptr[0] != '2' && lptr[0] != '4')) {
+			fprintf(stderr, "#%i ERR: Invalid instruction width!\n",
+				cmd->lineno);
+			return -EINVAL;
+		}
+
+		if (lptr)
+			ilen = lptr[0] - '1';
+
+		/* DCD commands */
+		if (!strcmp(tok, "WRITE")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_WRITE | ilen);
+		} else if (!strcmp(tok, "ANDC")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_ANDC | ilen);
+		} else if (!strcmp(tok, "ORR")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_ORR | ilen);
+		} else if (!strcmp(tok, "EQZ")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_CHK_EQZ | ilen);
+		} else if (!strcmp(tok, "EQ")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_CHK_EQ | ilen);
+		} else if (!strcmp(tok, "NEQ")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_CHK_NEQ | ilen);
+		} else if (!strcmp(tok, "NEZ")) {
+			ret = sb_build_dcd_block(ictx, cmd,
+						 SB_DCD_CHK_NEZ | ilen);
+		} else if (!strcmp(tok, "NOOP")) {
+			ret = sb_build_dcd_block(ictx, cmd, SB_DCD_NOOP);
+		} else {
+			fprintf(stderr,
+				"#%i ERR: Unsupported instruction '%s'!\n",
+				cmd->lineno, tok);
+			return -ENOTSUP;
+		}
+	} else {
+		fprintf(stderr, "#%i ERR: Unsupported instruction '%s'!\n",
+			cmd->lineno, tok);
+		return -ENOTSUP;
+	}
+
+	/*
+	 * Here we have at least one section with one command, otherwise we
+	 * would have failed already higher above.
+	 *
+	 * FIXME -- should the updating happen here ?
+	 */
+	if (ictx->in_section && !ret) {
+		ictx->sect_tail->size += ictx->sect_tail->cmd_tail->size;
+		ictx->sect_tail->payload.section_size =
+			ictx->sect_tail->size / SB_BLOCK_SIZE;
+	}
+
+	return ret;
+}
+
+static int sb_load_cmdfile(struct sb_image_ctx *ictx)
+{
+	struct sb_cmd_list cmd;
+	int lineno = 1;
+	FILE *fp;
+	char *line = NULL;
+	ssize_t rlen;
+	size_t len;
+
+	fp = fopen(ictx->cfg_filename, "r");
+	if (!fp)
+		goto err_file;
+
+	while ((rlen = getline(&line, &len, fp)) > 0) {
+		memset(&cmd, 0, sizeof(cmd));
+
+		/* Strip the trailing newline. */
+		line[rlen - 1] = '\0';
+
+		cmd.cmd = line;
+		cmd.len = rlen;
+		cmd.lineno = lineno++;
+
+		sb_parse_line(ictx, &cmd);
+	}
+
+	free(line);
+
+	fclose(fp);
+
+	return 0;
+
+err_file:
+	fclose(fp);
+	fprintf(stderr, "ERR: Failed to load file \"%s\"\n",
+		ictx->cfg_filename);
+	return -EINVAL;
+}
+
+static int sb_build_tree_from_cfg(struct sb_image_ctx *ictx)
+{
+	int ret;
+
+	ret = sb_load_cmdfile(ictx);
+	if (ret)
+		return ret;
+
+	ret = sb_prefill_image_header(ictx);
+	if (ret)
+		return ret;
+
+	ret = sb_postfill_image_header(ictx);
+	if (ret)
+		return ret;
+
+	ret = sb_fixup_sections_and_tags(ictx);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int sb_verify_image_header(struct sb_image_ctx *ictx,
+				  FILE *fp, long fsize)
+{
+	/* Verify static fields in the image header. */
+	struct sb_boot_image_header *hdr = &ictx->payload;
+	const char *stat[2] = { "[PASS]", "[FAIL]" };
+	struct tm tm;
+	int sz, ret = 0;
+	unsigned char digest[20];
+	EVP_MD_CTX *md_ctx;
+	unsigned long size;
+
+	/* Start image-wide crypto. */
+	ictx->md_ctx = EVP_MD_CTX_new();
+	EVP_DigestInit(ictx->md_ctx, EVP_sha1());
+
+	soprintf(ictx, "---------- Verifying SB Image Header ----------\n");
+
+	size = fread(&ictx->payload, 1, sizeof(ictx->payload), fp);
+	if (size != sizeof(ictx->payload)) {
+		fprintf(stderr, "ERR: SB image header too short!\n");
+		return -EINVAL;
+	}
+
+	/* Compute header digest. */
+	md_ctx = EVP_MD_CTX_new();
+	EVP_DigestInit(md_ctx, EVP_sha1());
+	EVP_DigestUpdate(md_ctx, hdr->signature1,
+			 sizeof(struct sb_boot_image_header) -
+			 sizeof(hdr->digest));
+	EVP_DigestFinal(md_ctx, digest, NULL);
+	EVP_MD_CTX_free(md_ctx);
+
+	sb_aes_init(ictx, NULL, 1);
+	sb_encrypt_sb_header(ictx);
+
+	if (memcmp(digest, hdr->digest, 20))
+		ret = -EINVAL;
+	soprintf(ictx, "%s Image header checksum:        %s\n", stat[!!ret],
+		 ret ? "BAD" : "OK");
+	if (ret)
+		return ret;
+
+	if (memcmp(hdr->signature1, "STMP", 4) ||
+	    memcmp(hdr->signature2, "sgtl", 4))
+		ret = -EINVAL;
+	soprintf(ictx, "%s Signatures:                   '%.4s' '%.4s'\n",
+		 stat[!!ret], hdr->signature1, hdr->signature2);
+	if (ret)
+		return ret;
+
+	if ((hdr->major_version != SB_VERSION_MAJOR) ||
+	    ((hdr->minor_version != 1) && (hdr->minor_version != 2)))
+		ret = -EINVAL;
+	soprintf(ictx, "%s Image version:                v%i.%i\n", stat[!!ret],
+		 hdr->major_version, hdr->minor_version);
+	if (ret)
+		return ret;
+
+	ret = sb_get_time(hdr->timestamp_us / 1000000, &tm);
+	soprintf(ictx,
+		 "%s Creation time:                %02i:%02i:%02i %02i/%02i/%04i\n",
+		 stat[!!ret], tm.tm_hour, tm.tm_min, tm.tm_sec,
+		 tm.tm_mday, tm.tm_mon, tm.tm_year + 2000);
+	if (ret)
+		return ret;
+
+	soprintf(ictx, "%s Product version:              %x.%x.%x\n", stat[0],
+		 ntohs(hdr->product_version.major),
+		 ntohs(hdr->product_version.minor),
+		 ntohs(hdr->product_version.revision));
+	soprintf(ictx, "%s Component version:            %x.%x.%x\n", stat[0],
+		 ntohs(hdr->component_version.major),
+		 ntohs(hdr->component_version.minor),
+		 ntohs(hdr->component_version.revision));
+
+	if (hdr->flags & ~SB_IMAGE_FLAGS_MASK)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Image flags:                  %s\n", stat[!!ret],
+		 hdr->flags & SB_IMAGE_FLAG_DISPLAY_PROGRESS ?
+		 "Display_progress" : "");
+	if (ret)
+		return ret;
+
+	if (hdr->drive_tag != 0)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Drive tag:                    %i\n", stat[!!ret],
+		 hdr->drive_tag);
+	if (ret)
+		return ret;
+
+	sz = sizeof(struct sb_boot_image_header) / SB_BLOCK_SIZE;
+	if (hdr->header_blocks != sz)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Image header size (blocks):   %i\n", stat[!!ret],
+		 hdr->header_blocks);
+	if (ret)
+		return ret;
+
+	sz = sizeof(struct sb_sections_header) / SB_BLOCK_SIZE;
+	if (hdr->section_header_size != sz)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Section header size (blocks): %i\n", stat[!!ret],
+		 hdr->section_header_size);
+	if (ret)
+		return ret;
+
+	soprintf(ictx, "%s Sections count:               %i\n", stat[!!ret],
+		 hdr->section_count);
+	soprintf(ictx, "%s First bootable section        %i\n", stat[!!ret],
+		 hdr->first_boot_section_id);
+
+	if (hdr->image_blocks != fsize / SB_BLOCK_SIZE)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Image size (blocks):          %i\n", stat[!!ret],
+		 hdr->image_blocks);
+	if (ret)
+		return ret;
+
+	sz = hdr->header_blocks + hdr->section_header_size * hdr->section_count;
+	if (hdr->key_dictionary_block != sz)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Key dict offset (blocks):     %i\n", stat[!!ret],
+		 hdr->key_dictionary_block);
+	if (ret)
+		return ret;
+
+	if (hdr->key_count != 1)
+		ret = -EINVAL;
+	soprintf(ictx, "%s Number of encryption keys:    %i\n", stat[!!ret],
+		 hdr->key_count);
+	if (ret)
+		return ret;
+
+	sz = hdr->header_blocks + hdr->section_header_size * hdr->section_count;
+	sz += hdr->key_count *
+		sizeof(struct sb_key_dictionary_key) / SB_BLOCK_SIZE;
+	if (hdr->first_boot_tag_block != (unsigned)sz)
+		ret = -EINVAL;
+	soprintf(ictx, "%s First TAG block (blocks):     %i\n", stat[!!ret],
+		 hdr->first_boot_tag_block);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void sb_decrypt_tag(struct sb_image_ctx *ictx,
+		struct sb_cmd_ctx *cctx)
+{
+	EVP_MD_CTX *md_ctx = ictx->md_ctx;
+	struct sb_command *cmd = &cctx->payload;
+
+	sb_aes_crypt(ictx, (uint8_t *)&cctx->c_payload,
+		     (uint8_t *)&cctx->payload, sizeof(*cmd));
+	EVP_DigestUpdate(md_ctx, &cctx->c_payload, sizeof(*cmd));
+}
+
+static int sb_verify_command(struct sb_image_ctx *ictx,
+			     struct sb_cmd_ctx *cctx, FILE *fp,
+			     unsigned long *tsize)
+{
+	struct sb_command *ccmd = &cctx->payload;
+	unsigned long size, asize;
+	char *csum, *flag = "";
+	int ret;
+	unsigned int i;
+	uint8_t csn, csc = ccmd->header.checksum;
+	ccmd->header.checksum = 0x5a;
+	csn = sb_command_checksum(ccmd);
+	ccmd->header.checksum = csc;
+
+	if (csc == csn)
+		ret = 0;
+	else
+		ret = -EINVAL;
+	csum = ret ? "checksum BAD" : "checksum OK";
+
+	switch (ccmd->header.tag) {
+	case ROM_NOP_CMD:
+		soprintf(ictx, " NOOP # %s\n", csum);
+		return ret;
+	case ROM_TAG_CMD:
+		if (ccmd->header.flags & ROM_TAG_CMD_FLAG_ROM_LAST_TAG)
+			flag = "LAST";
+		soprintf(ictx, " TAG %s # %s\n", flag, csum);
+		sb_aes_reinit(ictx, 0);
+		return ret;
+	case ROM_LOAD_CMD:
+		soprintf(ictx, " LOAD addr=0x%08x length=0x%08x # %s\n",
+			 ccmd->load.address, ccmd->load.count, csum);
+
+		cctx->length = ccmd->load.count;
+		asize = roundup(cctx->length, SB_BLOCK_SIZE);
+		cctx->data = malloc(asize);
+		if (!cctx->data)
+			return -ENOMEM;
+
+		size = fread(cctx->data, 1, asize, fp);
+		if (size != asize) {
+			fprintf(stderr,
+				"ERR: SB LOAD command payload too short!\n");
+			return -EINVAL;
+		}
+
+		*tsize += size;
+
+		EVP_DigestUpdate(ictx->md_ctx, cctx->data, asize);
+		sb_aes_crypt(ictx, cctx->data, cctx->data, asize);
+
+		if (ccmd->load.crc32 != pbl_crc32(0,
+						  (const char *)cctx->data,
+						  asize)) {
+			fprintf(stderr,
+				"ERR: SB LOAD command payload CRC32 invalid!\n");
+			return -EINVAL;
+		}
+		return 0;
+	case ROM_FILL_CMD:
+		soprintf(ictx,
+			 " FILL addr=0x%08x length=0x%08x pattern=0x%08x # %s\n",
+			 ccmd->fill.address, ccmd->fill.count,
+			 ccmd->fill.pattern, csum);
+		return 0;
+	case ROM_JUMP_CMD:
+		if (ccmd->header.flags & ROM_JUMP_CMD_FLAG_HAB)
+			flag = " HAB";
+		soprintf(ictx,
+			 " JUMP%s addr=0x%08x r0_arg=0x%08x # %s\n",
+			 flag, ccmd->fill.address, ccmd->jump.argument, csum);
+		return 0;
+	case ROM_CALL_CMD:
+		if (ccmd->header.flags & ROM_CALL_CMD_FLAG_HAB)
+			flag = " HAB";
+		soprintf(ictx,
+			 " CALL%s addr=0x%08x r0_arg=0x%08x # %s\n",
+			 flag, ccmd->fill.address, ccmd->jump.argument, csum);
+		return 0;
+	case ROM_MODE_CMD:
+		for (i = 0; i < ARRAY_SIZE(modetable); i++) {
+			if (ccmd->mode.mode == modetable[i].mode) {
+				soprintf(ictx, " MODE %s # %s\n",
+					 modetable[i].name, csum);
+				break;
+			}
+		}
+		fprintf(stderr, " MODE !INVALID! # %s\n", csum);
+		return 0;
+	}
+
+	return ret;
+}
+
+static int sb_verify_commands(struct sb_image_ctx *ictx,
+			      struct sb_section_ctx *sctx, FILE *fp)
+{
+	unsigned long size, tsize = 0;
+	struct sb_cmd_ctx *cctx;
+	int ret;
+
+	sb_aes_reinit(ictx, 0);
+
+	while (tsize < sctx->size) {
+		cctx = calloc(1, sizeof(*cctx));
+		if (!cctx)
+			return -ENOMEM;
+		if (!sctx->cmd_head) {
+			sctx->cmd_head = cctx;
+			sctx->cmd_tail = cctx;
+		} else {
+			sctx->cmd_tail->cmd = cctx;
+			sctx->cmd_tail = cctx;
+		}
+
+		size = fread(&cctx->c_payload, 1, sizeof(cctx->c_payload), fp);
+		if (size != sizeof(cctx->c_payload)) {
+			fprintf(stderr, "ERR: SB command header too short!\n");
+			return -EINVAL;
+		}
+
+		tsize += size;
+
+		sb_decrypt_tag(ictx, cctx);
+
+		ret = sb_verify_command(ictx, cctx, fp, &tsize);
+		if (ret)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int sb_verify_sections_cmds(struct sb_image_ctx *ictx, FILE *fp)
+{
+	struct sb_boot_image_header *hdr = &ictx->payload;
+	struct sb_sections_header *shdr;
+	unsigned int i;
+	int ret;
+	struct sb_section_ctx *sctx;
+	unsigned long size;
+	char *bootable = "";
+
+	soprintf(ictx, "----- Verifying  SB Sections and Commands -----\n");
+
+	for (i = 0; i < hdr->section_count; i++) {
+		sctx = calloc(1, sizeof(*sctx));
+		if (!sctx)
+			return -ENOMEM;
+		if (!ictx->sect_head) {
+			ictx->sect_head = sctx;
+			ictx->sect_tail = sctx;
+		} else {
+			ictx->sect_tail->sect = sctx;
+			ictx->sect_tail = sctx;
+		}
+
+		size = fread(&sctx->payload, 1, sizeof(sctx->payload), fp);
+		if (size != sizeof(sctx->payload)) {
+			fprintf(stderr, "ERR: SB section header too short!\n");
+			return -EINVAL;
+		}
+	}
+
+	size = fread(&ictx->sb_dict_key, 1, sizeof(ictx->sb_dict_key), fp);
+	if (size != sizeof(ictx->sb_dict_key)) {
+		fprintf(stderr, "ERR: SB key dictionary too short!\n");
+		return -EINVAL;
+	}
+
+	sb_encrypt_sb_sections_header(ictx);
+	sb_aes_reinit(ictx, 0);
+	sb_decrypt_key_dictionary_key(ictx);
+
+	sb_aes_reinit(ictx, 0);
+
+	sctx = ictx->sect_head;
+	while (sctx) {
+		shdr = &sctx->payload;
+
+		if (shdr->section_flags & SB_SECTION_FLAG_BOOTABLE) {
+			sctx->boot = 1;
+			bootable = " BOOTABLE";
+		}
+
+		sctx->size = (shdr->section_size * SB_BLOCK_SIZE) +
+			     sizeof(struct sb_command);
+		soprintf(ictx, "SECTION 0x%x%s # size = %i bytes\n",
+			 shdr->section_number, bootable, sctx->size);
+
+		if (shdr->section_flags & ~SB_SECTION_FLAG_BOOTABLE)
+			fprintf(stderr, " WARN: Unknown section flag(s) %08x\n",
+				shdr->section_flags);
+
+		if ((shdr->section_flags & SB_SECTION_FLAG_BOOTABLE) &&
+		    (hdr->first_boot_section_id != shdr->section_number)) {
+			fprintf(stderr,
+				" WARN: Bootable section does ID not match image header ID!\n");
+		}
+
+		ret = sb_verify_commands(ictx, sctx, fp);
+		if (ret)
+			return ret;
+
+		sctx = sctx->sect;
+	}
+
+	/*
+	 * FIXME IDEA:
+	 * check if the first TAG command is at sctx->section_offset
+	 */
+	return 0;
+}
+
+static int sb_verify_image_end(struct sb_image_ctx *ictx,
+			       FILE *fp, off_t filesz)
+{
+	uint8_t digest[32];
+	unsigned long size;
+	off_t pos;
+	int ret;
+
+	soprintf(ictx, "------------- Verifying image end -------------\n");
+
+	size = fread(digest, 1, sizeof(digest), fp);
+	if (size != sizeof(digest)) {
+		fprintf(stderr, "ERR: SB key dictionary too short!\n");
+		return -EINVAL;
+	}
+
+	pos = ftell(fp);
+	if (pos != filesz) {
+		fprintf(stderr, "ERR: Trailing data past the image!\n");
+		return -EINVAL;
+	}
+
+	/* Check the image digest. */
+	EVP_DigestFinal(ictx->md_ctx, ictx->digest, NULL);
+	EVP_MD_CTX_free(ictx->md_ctx);
+
+	/* Decrypt the image digest from the input image. */
+	sb_aes_reinit(ictx, 0);
+	sb_aes_crypt(ictx, digest, digest, sizeof(digest));
+
+	/* Check all of 20 bytes of the SHA1 hash. */
+	ret = memcmp(digest, ictx->digest, 20) ? -EINVAL : 0;
+
+	if (ret)
+		soprintf(ictx, "[FAIL] Full-image checksum:          BAD\n");
+	else
+		soprintf(ictx, "[PASS] Full-image checksum:          OK\n");
+
+	return ret;
+}
+
+
+static int sb_build_tree_from_img(struct sb_image_ctx *ictx)
+{
+	long filesize;
+	int ret;
+	FILE *fp;
+
+	if (!ictx->input_filename) {
+		fprintf(stderr, "ERR: Missing filename!\n");
+		return -EINVAL;
+	}
+
+	fp = fopen(ictx->input_filename, "r");
+	if (!fp)
+		goto err_open;
+
+	ret = fseek(fp, 0, SEEK_END);
+	if (ret < 0)
+		goto err_file;
+
+	filesize = ftell(fp);
+	if (filesize < 0)
+		goto err_file;
+
+	ret = fseek(fp, 0, SEEK_SET);
+	if (ret < 0)
+		goto err_file;
+
+	if (filesize < (signed)sizeof(ictx->payload)) {
+		fprintf(stderr, "ERR: File too short!\n");
+		goto err_file;
+	}
+
+	if (filesize & (SB_BLOCK_SIZE - 1)) {
+		fprintf(stderr, "ERR: The file is not aligned!\n");
+		goto err_file;
+	}
+
+	/* Load and verify image header */
+	ret = sb_verify_image_header(ictx, fp, filesize);
+	if (ret)
+		goto err_verify;
+
+	/* Load and verify sections and commands */
+	ret = sb_verify_sections_cmds(ictx, fp);
+	if (ret)
+		goto err_verify;
+
+	ret = sb_verify_image_end(ictx, fp, filesize);
+	if (ret)
+		goto err_verify;
+
+	ret = 0;
+
+err_verify:
+	soprintf(ictx, "-------------------- Result -------------------\n");
+	soprintf(ictx, "Verification %s\n", ret ? "FAILED" : "PASSED");
+
+	/* Stop the encryption session. */
+	sb_aes_deinit(ictx->cipher_ctx);
+
+	fclose(fp);
+	return ret;
+
+err_file:
+	fclose(fp);
+err_open:
+	fprintf(stderr, "ERR: Failed to load file \"%s\"\n",
+		ictx->input_filename);
+	return -EINVAL;
+}
+
+static void sb_free_image(struct sb_image_ctx *ictx)
+{
+	struct sb_section_ctx *sctx = ictx->sect_head, *s_head;
+	struct sb_dcd_ctx *dctx = ictx->dcd_head, *d_head;
+	struct sb_cmd_ctx *cctx, *c_head;
+
+	while (sctx) {
+		s_head = sctx;
+		c_head = sctx->cmd_head;
+
+		while (c_head) {
+			cctx = c_head;
+			c_head = c_head->cmd;
+			if (cctx->data)
+				free(cctx->data);
+			free(cctx);
+		}
+
+		sctx = sctx->sect;
+		free(s_head);
+	}
+
+	while (dctx) {
+		d_head = dctx;
+		dctx = dctx->dcd;
+		free(d_head->payload);
+		free(d_head);
+	}
+}
+
+/*
+ * MXSSB-MKIMAGE glue code.
+ */
+static int mxsimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_MXSIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static void mxsimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+}
+
+int mxsimage_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return -1;
+	if (!strlen(params->imagename)) {
+		fprintf(stderr,
+			"Error: %s - Configuration file not specified, it is needed for mxsimage generation\n",
+			params->cmdname);
+		return -1;
+	}
+
+	/*
+	 * Check parameters:
+	 * XIP is not allowed and verify that incompatible
+	 * parameters are not sent at the same time
+	 * For example, if list is required a data image must not be provided
+	 */
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)) ||
+		(params->xflag) || !(strlen(params->imagename));
+}
+
+static int mxsimage_verify_print_header(char *file, int silent)
+{
+	int ret;
+	struct sb_image_ctx ctx;
+
+	memset(&ctx, 0, sizeof(ctx));
+
+	ctx.input_filename = file;
+	ctx.silent_dump = silent;
+
+	ret = sb_build_tree_from_img(&ctx);
+	sb_free_image(&ctx);
+
+	return ret;
+}
+
+char *imagefile;
+static int mxsimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct sb_boot_image_header *hdr;
+
+	if (!ptr)
+		return -EINVAL;
+
+	hdr = (struct sb_boot_image_header *)ptr;
+
+	/*
+	 * Check if the header contains the MXS image signatures,
+	 * if so, do a full-image verification.
+	 */
+	if (memcmp(hdr->signature1, "STMP", 4) ||
+	    memcmp(hdr->signature2, "sgtl", 4))
+		return -EINVAL;
+
+	imagefile = params->imagefile;
+
+	return mxsimage_verify_print_header(params->imagefile, 1);
+}
+
+static void mxsimage_print_header(const void *hdr)
+{
+	if (imagefile)
+		mxsimage_verify_print_header(imagefile, 0);
+}
+
+static int sb_build_image(struct sb_image_ctx *ictx,
+			  struct image_type_params *tparams)
+{
+	struct sb_boot_image_header *sb_header = &ictx->payload;
+	struct sb_section_ctx *sctx;
+	struct sb_cmd_ctx *cctx;
+	struct sb_command *ccmd;
+	struct sb_key_dictionary_key *sb_dict_key = &ictx->sb_dict_key;
+
+	uint8_t *image, *iptr;
+
+	/* Calculate image size. */
+	uint32_t size = sizeof(*sb_header) +
+		ictx->sect_count * sizeof(struct sb_sections_header) +
+		sizeof(*sb_dict_key) + sizeof(ictx->digest);
+
+	sctx = ictx->sect_head;
+	while (sctx) {
+		size += sctx->size;
+		sctx = sctx->sect;
+	};
+
+	image = malloc(size);
+	if (!image)
+		return -ENOMEM;
+	iptr = image;
+
+	memcpy(iptr, sb_header, sizeof(*sb_header));
+	iptr += sizeof(*sb_header);
+
+	sctx = ictx->sect_head;
+	while (sctx) {
+		memcpy(iptr, &sctx->payload, sizeof(struct sb_sections_header));
+		iptr += sizeof(struct sb_sections_header);
+		sctx = sctx->sect;
+	};
+
+	memcpy(iptr, sb_dict_key, sizeof(*sb_dict_key));
+	iptr += sizeof(*sb_dict_key);
+
+	sctx = ictx->sect_head;
+	while (sctx) {
+		cctx = sctx->cmd_head;
+		while (cctx) {
+			ccmd = &cctx->payload;
+
+			memcpy(iptr, &cctx->c_payload, sizeof(cctx->payload));
+			iptr += sizeof(cctx->payload);
+
+			if (ccmd->header.tag == ROM_LOAD_CMD) {
+				memcpy(iptr, cctx->data, cctx->length);
+				iptr += cctx->length;
+			}
+
+			cctx = cctx->cmd;
+		}
+
+		sctx = sctx->sect;
+	};
+
+	memcpy(iptr, ictx->digest, sizeof(ictx->digest));
+	iptr += sizeof(ictx->digest);
+
+	/* Configure the mkimage */
+	tparams->hdr = image;
+	tparams->header_size = size;
+
+	return 0;
+}
+
+static int mxsimage_generate(struct image_tool_params *params,
+	struct image_type_params *tparams)
+{
+	int ret;
+	struct sb_image_ctx ctx;
+
+	/* Do not copy the U-Boot image! */
+	params->skipcpy = 1;
+
+	memset(&ctx, 0, sizeof(ctx));
+
+	ctx.cfg_filename = params->imagename;
+	ctx.output_filename = params->imagefile;
+
+	ret = sb_build_tree_from_cfg(&ctx);
+	if (ret)
+		goto fail;
+
+	ret = sb_encrypt_image(&ctx);
+	if (!ret)
+		ret = sb_build_image(&ctx, tparams);
+
+fail:
+	sb_free_image(&ctx);
+
+	return ret;
+}
+
+/*
+ * mxsimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	mxsimage,
+	"Freescale MXS Boot Image support",
+	0,
+	NULL,
+	mxsimage_check_params,
+	mxsimage_verify_header,
+	mxsimage_print_header,
+	mxsimage_set_header,
+	NULL,
+	mxsimage_check_image_types,
+	NULL,
+	mxsimage_generate
+);
+#endif
diff --git a/tools/u-boot-tools/mxsimage.h b/tools/u-boot-tools/mxsimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..a6a45a4b03a0a42e049b1731fc8be32f1c9add05
--- /dev/null
+++ b/tools/u-boot-tools/mxsimage.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Freescale i.MX28 SB image generator
+ *
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ */
+
+#ifndef __MXSSB_H__
+#define __MXSSB_H__
+
+#include <stdint.h>
+#include <arpa/inet.h>
+
+#define SB_BLOCK_SIZE		16
+
+#define roundup(x, y)		((((x) + ((y) - 1)) / (y)) * (y))
+#define ARRAY_SIZE(x)		(sizeof(x) / sizeof((x)[0]))
+
+struct sb_boot_image_version {
+	uint16_t	major;
+	uint16_t	pad0;
+	uint16_t	minor;
+	uint16_t	pad1;
+	uint16_t	revision;
+	uint16_t	pad2;
+};
+
+struct sb_boot_image_header {
+	union {
+		/* SHA1 of the header. */
+		uint8_t	digest[20];
+		struct {
+			/* CBC-MAC initialization vector. */
+			uint8_t iv[16];
+			uint8_t extra[4];
+		};
+	};
+	/* 'STMP' */
+	uint8_t		signature1[4];
+	/* Major version of the image format. */
+	uint8_t		major_version;
+	/* Minor version of the image format. */
+	uint8_t		minor_version;
+	/* Flags associated with the image. */
+	uint16_t	flags;
+	/* Size of the image in 16b blocks. */
+	uint32_t	image_blocks;
+	/* Offset of the first tag in 16b blocks. */
+	uint32_t	first_boot_tag_block;
+	/* ID of the section to boot from. */
+	uint32_t	first_boot_section_id;
+	/* Amount of crypto keys. */
+	uint16_t	key_count;
+	/* Offset to the key dictionary in 16b blocks. */
+	uint16_t	key_dictionary_block;
+	/* Size of this header in 16b blocks. */
+	uint16_t	header_blocks;
+	/* Amount of section headers. */
+	uint16_t	section_count;
+	/* Section header size in 16b blocks. */
+	uint16_t	section_header_size;
+	/* Padding to align timestamp to uint64_t. */
+	uint8_t		padding0[2];
+	/* 'sgtl' (since v1.1) */
+	uint8_t		signature2[4];
+	/* Image generation date, in microseconds since 1.1.2000 . */
+	uint64_t	timestamp_us;
+	/* Product version. */
+	struct sb_boot_image_version
+			product_version;
+	/* Component version. */
+	struct sb_boot_image_version
+			component_version;
+	/* Drive tag for the system drive. (since v1.1) */
+	uint16_t	drive_tag;
+	/* Padding. */
+	uint8_t		padding1[6];
+};
+
+#define	SB_VERSION_MAJOR	1
+#define	SB_VERSION_MINOR	1
+
+/* Enable to HTLLC boot report. */
+#define SB_IMAGE_FLAG_DISPLAY_PROGRESS	(1 << 0)
+#define SB_IMAGE_FLAGS_MASK SB_IMAGE_FLAG_DISPLAY_PROGRESS
+
+struct sb_key_dictionary_key {
+	/* The CBC-MAC of image and sections header. */
+	uint8_t		cbc_mac[SB_BLOCK_SIZE];
+	/* The AES key encrypted by image key (zero). */
+	uint8_t		key[SB_BLOCK_SIZE];
+};
+
+struct sb_ivt_header {
+	uint32_t	header;
+	uint32_t	entry;
+	uint32_t	reserved1;
+	uint32_t	dcd;
+	uint32_t	boot_data;
+	uint32_t	self;
+	uint32_t	csf;
+	uint32_t	reserved2;
+};
+
+#define	SB_HAB_IVT_TAG			0xd1UL
+#define	SB_HAB_DCD_TAG			0xd2UL
+
+#define	SB_HAB_VERSION			0x40UL
+
+/*
+ * The "size" field in the IVT header is not naturally aligned,
+ * use this macro to fill first 4 bytes of the IVT header without
+ * causing issues on some systems (esp. M68k, PPC, MIPS-BE, ARM-BE).
+ */
+static inline uint32_t sb_hab_ivt_header(void)
+{
+	uint32_t ret = 0;
+	ret |= SB_HAB_IVT_TAG << 24;
+	ret |= sizeof(struct sb_ivt_header) << 16;
+	ret |= SB_HAB_VERSION;
+	return htonl(ret);
+}
+
+struct sb_sections_header {
+	/* Section number. */
+	uint32_t	section_number;
+	/* Offset of this sections first instruction after "TAG". */
+	uint32_t	section_offset;
+	/* Size of the section in 16b blocks. */
+	uint32_t	section_size;
+	/* Section flags. */
+	uint32_t	section_flags;
+};
+
+#define	SB_SECTION_FLAG_BOOTABLE	(1 << 0)
+
+struct sb_command {
+	struct {
+		uint8_t		checksum;
+		uint8_t		tag;
+		uint16_t	flags;
+#define ROM_TAG_CMD_FLAG_ROM_LAST_TAG	0x1
+#define ROM_LOAD_CMD_FLAG_DCD_LOAD	0x1	/* MX28 only */
+#define ROM_JUMP_CMD_FLAG_HAB		0x1	/* MX28 only */
+#define ROM_CALL_CMD_FLAG_HAB		0x1	/* MX28 only */
+	} header;
+
+	union {
+	struct {
+		uint32_t	reserved[3];
+	} nop;
+	struct {
+		uint32_t	section_number;
+		uint32_t	section_length;
+		uint32_t	section_flags;
+	} tag;
+	struct {
+		uint32_t	address;
+		uint32_t	count;
+		uint32_t	crc32;
+	} load;
+	struct {
+		uint32_t	address;
+		uint32_t	count;
+		uint32_t	pattern;
+	} fill;
+	struct {
+		uint32_t	address;
+		uint32_t	reserved;
+		/* Passed in register r0 before JUMP */
+		uint32_t	argument;
+	} jump;
+	struct {
+		uint32_t	address;
+		uint32_t	reserved;
+		/* Passed in register r0 before CALL */
+		uint32_t	argument;
+	} call;
+	struct {
+		uint32_t	reserved1;
+		uint32_t	reserved2;
+		uint32_t	mode;
+	} mode;
+
+	};
+};
+
+/*
+ * Most of the mode names are same or at least similar
+ * on i.MX23 and i.MX28, but some of the mode names
+ * differ. The "name" field represents the mode name
+ * on i.MX28 as seen in Table 12-2 of the datasheet.
+ * The "altname" field represents the differently named
+ * fields on i.MX23 as seen in Table 35-3 of the
+ * datasheet.
+ */
+static const struct {
+	const char	*name;
+	const char	*altname;
+	const uint8_t	mode;
+} modetable[] = {
+	{ "USB",		NULL,		0x00 },
+	{ "I2C",		NULL,		0x01 },
+	{ "SPI2_FLASH",		"SPI1_FLASH",	0x02 },
+	{ "SPI3_FLASH",		"SPI2_FLASH",	0x03 },
+	{ "NAND_BCH",		NULL,		0x04 },
+	{ "JTAG",		NULL,		0x06 },
+	{ "SPI3_EEPROM",	"SPI2_EEPROM",	0x08 },
+	{ "SD_SSP0",		NULL,		0x09 },
+	{ "SD_SSP1",		NULL,		0x0A }
+};
+
+enum sb_tag {
+	ROM_NOP_CMD	= 0x00,
+	ROM_TAG_CMD	= 0x01,
+	ROM_LOAD_CMD	= 0x02,
+	ROM_FILL_CMD	= 0x03,
+	ROM_JUMP_CMD	= 0x04,
+	ROM_CALL_CMD	= 0x05,
+	ROM_MODE_CMD	= 0x06
+};
+
+struct sb_source_entry {
+	uint8_t		tag;
+	uint32_t	address;
+	uint32_t	flags;
+	char		*filename;
+};
+
+#endif	/* __MXSSB_H__ */
diff --git a/tools/u-boot-tools/mxsimage.o b/tools/u-boot-tools/mxsimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..398e95efc88ebdc2654e5cca81165bc685222ae5
Binary files /dev/null and b/tools/u-boot-tools/mxsimage.o differ
diff --git a/tools/u-boot-tools/ncb.c b/tools/u-boot-tools/ncb.c
new file mode 100644
index 0000000000000000000000000000000000000000..ec8d8a7435208e432b81b8daf29be868ec408921
--- /dev/null
+++ b/tools/u-boot-tools/ncb.c
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+int main (int argc, char *argv[])
+{
+	int s, len, o, port = 6666;
+	char buf[512];
+	struct sockaddr_in addr;
+	socklen_t addr_len = sizeof addr;
+
+	if (argc > 1)
+		port = atoi (argv[1]);
+
+	s = socket (PF_INET, SOCK_DGRAM, IPPROTO_UDP);
+
+	o = 1;
+	len = 4;
+	setsockopt (3, SOL_SOCKET, SO_REUSEADDR, &o, len);
+
+	addr.sin_family = AF_INET;
+	addr.sin_port = htons (port);
+	addr.sin_addr.s_addr = INADDR_ANY;	/* receive broadcasts */
+
+	bind (s, (struct sockaddr *) &addr, sizeof addr);
+
+	for (;;) {
+		len = recvfrom (s, buf, sizeof buf, 0, (struct sockaddr *) &addr, &addr_len);
+		if (len < 0)
+			break;
+		if (write (1, buf, len) != len)
+			fprintf(stderr, "WARNING: serial characters dropped\n");
+	}
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/netconsole b/tools/u-boot-tools/netconsole
new file mode 100755
index 0000000000000000000000000000000000000000..1a0ef22244e3d5dbe0cfd4983ca1666198cd8a81
--- /dev/null
+++ b/tools/u-boot-tools/netconsole
@@ -0,0 +1,63 @@
+#!/bin/sh
+
+usage() {
+	(
+	echo "Usage: $0 <board-IP> [board-port [board-in-port]]"
+	echo ""
+	echo "If port is not specified, '6666' will be used"
+	[ -z "$*" ] && exit 0
+	echo ""
+	echo "ERROR: $*"
+	exit 1
+	) 1>&2
+	exit $?
+}
+
+while [ -n "$1" ] ; do
+	case $1 in
+		-h|--help) usage;;
+		--)        break;;
+		-*)        usage "Invalid option $1";;
+		*)         break;;
+	esac
+	shift
+done
+
+ip=$1
+board_out_port=${2:-6666}
+board_in_port=${3:-${board_out_port}}
+
+echo Board out port: ${board_out_port}
+echo Board in port: ${board_in_port}
+
+if [ -z "${ip}" ] || [ -n "$4" ] ; then
+	usage "Invalid number of arguments"
+fi
+
+for nc in netcat nc ; do
+	type ${nc} >/dev/null 2>&1 && break
+done
+
+trap "stty icanon echo intr ^C" 0 2 3 5 10 13 15
+echo "NOTE: the interrupt signal (normally ^C) has been remapped to ^T"
+
+stty -icanon -echo intr ^T
+(
+if type ncb 2>/dev/null ; then
+	# see if ncb is in $PATH
+	exec ncb ${board_out_port}
+
+elif [ -x ${0%/*}/ncb ] ; then
+	# maybe it's in the same dir as the netconsole script
+	exec ${0%/*}/ncb ${board_out_port}
+
+else
+	# blah, just use regular netcat
+	while ${nc} -u -l -p ${board_out_port} < /dev/null ; do
+		:
+	done
+fi
+) &
+pid=$!
+${nc} -u ${ip} ${board_in_port}
+kill ${pid} 2>/dev/null
diff --git a/tools/u-boot-tools/omap/clocks_get_m_n.c b/tools/u-boot-tools/omap/clocks_get_m_n.c
new file mode 100644
index 0000000000000000000000000000000000000000..e21b0e2b60b6b7c26476eb57162f58a23eaa0934
--- /dev/null
+++ b/tools/u-boot-tools/omap/clocks_get_m_n.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Program for finding M & N values for DPLLs
+ * To be run on Host PC
+ *
+ * (C) Copyright 2010
+ * Texas Instruments, <www.ti.com>
+ *
+ * Aneesh V <aneesh@ti.com>
+ */
+#include <stdlib.h>
+#include <stdio.h>
+typedef unsigned int u32;
+#define MAX_N	127
+
+/*
+ * get_m_n_optimized() - Finds optimal DPLL multiplier(M) and divider(N)
+ * values based on the reference frequency, required output frequency,
+ * maximum tolerance for output frequency etc.
+ *
+ * target_freq_khz - output frequency required in KHz
+ * ref_freq_khz - reference(input) frequency in KHz
+ * m - pointer to computed M value
+ * n - pointer to computed N value
+ * tolerance_khz - tolerance for the output frequency. When the algorithm
+ * succeeds in finding vialble M and N values the corresponding output
+ * frequency will be in the range:
+ *	[target_freq_khz - tolerance_khz, target_freq_khz]
+ *
+ * Formula:
+ *	Fdpll = (2 * M * Fref) / (N + 1)
+ *
+ * Considerations for lock-time:
+ *	- Smaller the N, better lock-time, especially lock-time will be
+ *	- For acceptable lock-times:
+ *		Fref / (M + 1) >= 1 MHz
+ *
+ * Considerations for power:
+ *	- The difference in power for different N values giving the same
+ *	  output is negligible. So, we optimize for lock-time
+ *
+ * Hard-constraints:
+ *	- N can not be greater than 127(7 bit field for representing N)
+ *
+ * Usage:
+ *	$ gcc clocks_get_m_n.c
+ *	$ ./a.out
+ */
+int get_m_n_optimized(u32 target_freq_khz, u32 ref_freq_khz, u32 *M, u32 *N)
+{
+	u32 freq = target_freq_khz;
+	u32 m_optimal, n_optimal, freq_optimal = 0, freq_old;
+	u32 m, n;
+	n = 1;
+	while (1) {
+		m = target_freq_khz / ref_freq_khz / 2 * n;
+		freq_old = 0;
+		while (1) {
+			freq = ref_freq_khz * 2 * m / n;
+			if (freq > target_freq_khz) {
+				freq = freq_old;
+				m--;
+				break;
+			}
+			m++;
+			freq_old = freq;
+		}
+		if (freq > freq_optimal) {
+			freq_optimal = freq;
+			m_optimal = m;
+			n_optimal = n;
+		}
+		n++;
+		if ((freq_optimal == target_freq_khz) ||
+			((ref_freq_khz / n) < 1000)) {
+			break;
+		}
+	}
+	n--;
+	*M = m_optimal;
+	*N = n_optimal - 1;
+	printf("ref %d m %d n %d target %d locked %d\n", ref_freq_khz,
+		m_optimal, n_optimal - 1, target_freq_khz, freq_optimal);
+	return 0;
+}
+
+void main(void)
+{
+	u32 m, n;
+	printf("\nMPU - 2000000\n");
+	get_m_n_optimized(2000000, 12000, &m, &n);
+	get_m_n_optimized(2000000, 13000, &m, &n);
+	get_m_n_optimized(2000000, 16800, &m, &n);
+	get_m_n_optimized(2000000, 19200, &m, &n);
+	get_m_n_optimized(2000000, 26000, &m, &n);
+	get_m_n_optimized(2000000, 27000, &m, &n);
+	get_m_n_optimized(2000000, 38400, &m, &n);
+
+	printf("\nMPU - 1200000\n");
+	get_m_n_optimized(1200000, 12000, &m, &n);
+	get_m_n_optimized(1200000, 13000, &m, &n);
+	get_m_n_optimized(1200000, 16800, &m, &n);
+	get_m_n_optimized(1200000, 19200, &m, &n);
+	get_m_n_optimized(1200000, 26000, &m, &n);
+	get_m_n_optimized(1200000, 27000, &m, &n);
+	get_m_n_optimized(1200000, 38400, &m, &n);
+
+	printf("\nMPU - 1584000\n");
+	get_m_n_optimized(1584000, 12000, &m, &n);
+	get_m_n_optimized(1584000, 13000, &m, &n);
+	get_m_n_optimized(1584000, 16800, &m, &n);
+	get_m_n_optimized(1584000, 19200, &m, &n);
+	get_m_n_optimized(1584000, 26000, &m, &n);
+	get_m_n_optimized(1584000, 27000, &m, &n);
+	get_m_n_optimized(1584000, 38400, &m, &n);
+
+	printf("\nCore 1600000\n");
+	get_m_n_optimized(1600000, 12000, &m, &n);
+	get_m_n_optimized(1600000, 13000, &m, &n);
+	get_m_n_optimized(1600000, 16800, &m, &n);
+	get_m_n_optimized(1600000, 19200, &m, &n);
+	get_m_n_optimized(1600000, 26000, &m, &n);
+	get_m_n_optimized(1600000, 27000, &m, &n);
+	get_m_n_optimized(1600000, 38400, &m, &n);
+
+	printf("\nPER 1536000\n");
+	get_m_n_optimized(1536000, 12000, &m, &n);
+	get_m_n_optimized(1536000, 13000, &m, &n);
+	get_m_n_optimized(1536000, 16800, &m, &n);
+	get_m_n_optimized(1536000, 19200, &m, &n);
+	get_m_n_optimized(1536000, 26000, &m, &n);
+	get_m_n_optimized(1536000, 27000, &m, &n);
+	get_m_n_optimized(1536000, 38400, &m, &n);
+
+	printf("\nIVA 1862000\n");
+	get_m_n_optimized(1862000, 12000, &m, &n);
+	get_m_n_optimized(1862000, 13000, &m, &n);
+	get_m_n_optimized(1862000, 16800, &m, &n);
+	get_m_n_optimized(1862000, 19200, &m, &n);
+	get_m_n_optimized(1862000, 26000, &m, &n);
+	get_m_n_optimized(1862000, 27000, &m, &n);
+	get_m_n_optimized(1862000, 38400, &m, &n);
+
+	printf("\nIVA Nitro - 1290000\n");
+	get_m_n_optimized(1290000, 12000, &m, &n);
+	get_m_n_optimized(1290000, 13000, &m, &n);
+	get_m_n_optimized(1290000, 16800, &m, &n);
+	get_m_n_optimized(1290000, 19200, &m, &n);
+	get_m_n_optimized(1290000, 26000, &m, &n);
+	get_m_n_optimized(1290000, 27000, &m, &n);
+	get_m_n_optimized(1290000, 38400, &m, &n);
+
+	printf("\nABE 196608 sys clk\n");
+	get_m_n_optimized(196608, 12000, &m, &n);
+	get_m_n_optimized(196608, 13000, &m, &n);
+	get_m_n_optimized(196608, 16800, &m, &n);
+	get_m_n_optimized(196608, 19200, &m, &n);
+	get_m_n_optimized(196608, 26000, &m, &n);
+	get_m_n_optimized(196608, 27000, &m, &n);
+	get_m_n_optimized(196608, 38400, &m, &n);
+
+	printf("\nABE 196608 32K\n");
+	get_m_n_optimized(196608000/4, 32768, &m, &n);
+
+	printf("\nUSB 1920000\n");
+	get_m_n_optimized(1920000, 12000, &m, &n);
+	get_m_n_optimized(1920000, 13000, &m, &n);
+	get_m_n_optimized(1920000, 16800, &m, &n);
+	get_m_n_optimized(1920000, 19200, &m, &n);
+	get_m_n_optimized(1920000, 26000, &m, &n);
+	get_m_n_optimized(1920000, 27000, &m, &n);
+	get_m_n_optimized(1920000, 38400, &m, &n);
+
+	printf("\nCore ES1 1523712\n");
+	get_m_n_optimized(1524000, 12000, &m, &n);
+	get_m_n_optimized(1524000, 13000, &m, &n);
+	get_m_n_optimized(1524000, 16800, &m, &n);
+	get_m_n_optimized(1524000, 19200, &m, &n);
+	get_m_n_optimized(1524000, 26000, &m, &n);
+	get_m_n_optimized(1524000, 27000, &m, &n);
+
+	/* exact recommendation for SDPs */
+	get_m_n_optimized(1523712, 38400, &m, &n);
+
+}
diff --git a/tools/u-boot-tools/omapimage.c b/tools/u-boot-tools/omapimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..c59cdcc79b3d0f4934817fca231bc4e6c6fc7c87
--- /dev/null
+++ b/tools/u-boot-tools/omapimage.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2010
+ * Linaro LTD, www.linaro.org
+ * Author: John Rigby <john.rigby@linaro.org>
+ * Based on TI's signGP.c
+ *
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include "imagetool.h"
+#include <compiler.h>
+#include <image.h>
+#include "gpheader.h"
+#include "omapimage.h"
+
+#define DIV_ROUND_UP(n, d)     (((n) + (d) - 1) / (d))
+
+/* Header size is CH header rounded up to 512 bytes plus GP header */
+#define OMAP_CH_HDR_SIZE 512
+#define OMAP_FILE_HDR_SIZE (OMAP_CH_HDR_SIZE + GPIMAGE_HDR_SIZE)
+
+static int do_swap32 = 0;
+
+static uint8_t omapimage_header[OMAP_FILE_HDR_SIZE];
+
+static int omapimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_OMAPIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static int omapimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct ch_toc *toc = (struct ch_toc *)ptr;
+	struct gp_header *gph = (struct gp_header *)(ptr+OMAP_CH_HDR_SIZE);
+	uint32_t offset, size;
+
+	while (toc->section_offset != 0xffffffff
+			&& toc->section_size != 0xffffffff) {
+		if (do_swap32) {
+			offset = cpu_to_be32(toc->section_offset);
+			size = cpu_to_be32(toc->section_size);
+		} else {
+			offset = toc->section_offset;
+			size = toc->section_size;
+		}
+		if (!offset || !size)
+			return -1;
+		if (offset >= OMAP_CH_HDR_SIZE ||
+		    offset+size >= OMAP_CH_HDR_SIZE)
+			return -1;
+		toc++;
+	}
+
+	return gph_verify_header(gph, do_swap32);
+}
+
+static void omapimage_print_section(struct ch_settings *chs)
+{
+	const char *section_name;
+
+	if (chs->section_key)
+		section_name = "CHSETTINGS";
+	else
+		section_name = "UNKNOWNKEY";
+
+	printf("%s (%x) "
+		"valid:%x "
+		"version:%x "
+		"reserved:%x "
+		"flags:%x\n",
+		section_name,
+		chs->section_key,
+		chs->valid,
+		chs->version,
+		chs->reserved,
+		chs->flags);
+}
+
+static void omapimage_print_header(const void *ptr)
+{
+	const struct ch_toc *toc = (struct ch_toc *)ptr;
+	const struct gp_header *gph =
+			(struct gp_header *)(ptr+OMAP_CH_HDR_SIZE);
+	uint32_t offset, size;
+
+	while (toc->section_offset != 0xffffffff
+			&& toc->section_size != 0xffffffff) {
+		if (do_swap32) {
+			offset = cpu_to_be32(toc->section_offset);
+			size = cpu_to_be32(toc->section_size);
+		} else {
+			offset = toc->section_offset;
+			size = toc->section_size;
+		}
+
+		if (offset >= OMAP_CH_HDR_SIZE ||
+		    offset+size >= OMAP_CH_HDR_SIZE)
+			exit(EXIT_FAILURE);
+
+		printf("Section %s offset %x length %x\n",
+			toc->section_name,
+			toc->section_offset,
+			toc->section_size);
+
+		omapimage_print_section((struct ch_settings *)(ptr+offset));
+		toc++;
+	}
+
+	gph_print_header(gph, do_swap32);
+}
+
+static int toc_offset(void *hdr, void *member)
+{
+	return member - hdr;
+}
+
+static void omapimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	struct ch_toc *toc = (struct ch_toc *)ptr;
+	struct ch_settings *chs = (struct ch_settings *)
+					(ptr + 2 * sizeof(*toc));
+	struct gp_header *gph = (struct gp_header *)(ptr + OMAP_CH_HDR_SIZE);
+
+	toc->section_offset = toc_offset(ptr, chs);
+	toc->section_size = sizeof(struct ch_settings);
+	strcpy((char *)toc->section_name, "CHSETTINGS");
+
+	chs->section_key = KEY_CHSETTINGS;
+	chs->valid = 0;
+	chs->version = 1;
+	chs->reserved = 0;
+	chs->flags = 0;
+
+	toc++;
+	memset(toc, 0xff, sizeof(*toc));
+
+	gph_set_header(gph, sbuf->st_size - OMAP_CH_HDR_SIZE,
+		       params->addr, 0);
+
+	if (strncmp(params->imagename, "byteswap", 8) == 0) {
+		do_swap32 = 1;
+		int swapped = 0;
+		uint32_t *data = (uint32_t *)ptr;
+		const off_t size_in_words =
+			DIV_ROUND_UP(sbuf->st_size, sizeof(uint32_t));
+
+		while (swapped < size_in_words) {
+			*data = cpu_to_be32(*data);
+			swapped++;
+			data++;
+		}
+	}
+}
+
+/*
+ * omapimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	omapimage,
+	"TI OMAP CH/GP Boot Image support",
+	OMAP_FILE_HDR_SIZE,
+	(void *)&omapimage_header,
+	gpimage_check_params,
+	omapimage_verify_header,
+	omapimage_print_header,
+	omapimage_set_header,
+	NULL,
+	omapimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/omapimage.h b/tools/u-boot-tools/omapimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..175fac2da6f389f85a091896472917a25e435373
--- /dev/null
+++ b/tools/u-boot-tools/omapimage.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2010
+ * Linaro LTD, www.linaro.org
+ * Author John Rigby <john.rigby@linaro.org>
+ * Based on TI's signGP.c
+ */
+
+#ifndef _OMAPIMAGE_H_
+#define _OMAPIMAGE_H_
+
+struct ch_toc {
+	uint32_t section_offset;
+	uint32_t section_size;
+	uint8_t unused[12];
+	uint8_t section_name[12];
+};
+
+struct ch_settings {
+	uint32_t section_key;
+	uint8_t valid;
+	uint8_t version;
+	uint16_t reserved;
+	uint32_t flags;
+};
+
+#define KEY_CHSETTINGS 0xC0C0C0C1
+#endif /* _OMAPIMAGE_H_ */
diff --git a/tools/u-boot-tools/omapimage.o b/tools/u-boot-tools/omapimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..758d3398e9a9e521706993cdd503fd58a4ec281b
Binary files /dev/null and b/tools/u-boot-tools/omapimage.o differ
diff --git a/tools/u-boot-tools/os_support.c b/tools/u-boot-tools/os_support.c
new file mode 100644
index 0000000000000000000000000000000000000000..21e43c876a9f4fc9c33b9bde7c635068920ced12
--- /dev/null
+++ b/tools/u-boot-tools/os_support.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2009 Extreme Engineering Solutions, Inc.
+ */
+
+/*
+ * Include additional files required for supporting different operating systems
+ */
+#include "compiler.h"
+#ifdef __MINGW32__
+#include "mingw_support.c"
+#endif
+#if defined(__APPLE__) && __DARWIN_C_LEVEL < 200809L
+#include "getline.c"
+#endif
diff --git a/tools/u-boot-tools/os_support.h b/tools/u-boot-tools/os_support.h
new file mode 100644
index 0000000000000000000000000000000000000000..3a2106ed7e5b8e42b7db477e4f30042b1724e353
--- /dev/null
+++ b/tools/u-boot-tools/os_support.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.0+ */
+/*
+ * Copyright 2009 Extreme Engineering Solutions, Inc.
+ */
+
+#ifndef __OS_SUPPORT_H_
+#define __OS_SUPPORT_H_
+
+#include "compiler.h"
+
+/*
+ * Include additional files required for supporting different operating systems
+ */
+#ifdef __MINGW32__
+#include "mingw_support.h"
+#endif
+
+#if defined(__APPLE__) && __DARWIN_C_LEVEL < 200809L
+#include "getline.h"
+#endif
+
+#endif /* __OS_SUPPORT_H_ */
diff --git a/tools/u-boot-tools/os_support.o b/tools/u-boot-tools/os_support.o
new file mode 100644
index 0000000000000000000000000000000000000000..e561a43ccc9c64a6749b20cc7d4000b7d56eec44
Binary files /dev/null and b/tools/u-boot-tools/os_support.o differ
diff --git a/tools/u-boot-tools/patman/.gitignore b/tools/u-boot-tools/patman/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0d20b6487c61e7d1bde93acf4a14b7a89083a16d
--- /dev/null
+++ b/tools/u-boot-tools/patman/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/u-boot-tools/patman/README b/tools/u-boot-tools/patman/README
new file mode 100644
index 0000000000000000000000000000000000000000..7917fc8bdc33204187c7b3d276eafc184824cd97
--- /dev/null
+++ b/tools/u-boot-tools/patman/README
@@ -0,0 +1,497 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+
+What is this?
+=============
+
+This tool is a Python script which:
+- Creates patch directly from your branch
+- Cleans them up by removing unwanted tags
+- Inserts a cover letter with change lists
+- Runs the patches through checkpatch.pl and its own checks
+- Optionally emails them out to selected people
+
+It is intended to automate patch creation and make it a less
+error-prone process. It is useful for U-Boot and Linux work so far,
+since it uses the checkpatch.pl script.
+
+It is configured almost entirely by tags it finds in your commits.
+This means that you can work on a number of different branches at
+once, and keep the settings with each branch rather than having to
+git format-patch, git send-email, etc. with the correct parameters
+each time. So for example if you put:
+
+Series-to: fred.blogs@napier.co.nz
+
+in one of your commits, the series will be sent there.
+
+In Linux and U-Boot this will also call get_maintainer.pl on each of your
+patches automatically (unless you use -m to disable this).
+
+
+How to use this tool
+====================
+
+This tool requires a certain way of working:
+
+- Maintain a number of branches, one for each patch series you are
+working on
+- Add tags into the commits within each branch to indicate where the
+series should be sent, cover letter, version, etc. Most of these are
+normally in the top commit so it is easy to change them with 'git
+commit --amend'
+- Each branch tracks the upstream branch, so that this script can
+automatically determine the number of commits in it (optional)
+- Check out a branch, and run this script to create and send out your
+patches. Weeks later, change the patches and repeat, knowing that you
+will get a consistent result each time.
+
+
+How to configure it
+===================
+
+For most cases of using patman for U-Boot development, patman can use the
+file 'doc/git-mailrc' in your U-Boot directory to supply the email aliases
+you need. To make this work, tell git where to find the file by typing
+this once:
+
+    git config sendemail.aliasesfile doc/git-mailrc
+
+For both Linux and U-Boot the 'scripts/get_maintainer.pl' handles figuring
+out where to send patches pretty well.
+
+During the first run patman creates a config file for you by taking the default
+user name and email address from the global .gitconfig file.
+
+To add your own, create a file ~/.patman like this:
+
+>>>>
+# patman alias file
+
+[alias]
+me: Simon Glass <sjg@chromium.org>
+
+u-boot: U-Boot Mailing List <u-boot@lists.denx.de>
+wolfgang: Wolfgang Denk <wd@denx.de>
+others: Mike Frysinger <vapier@gentoo.org>, Fred Bloggs <f.bloggs@napier.net>
+
+<<<<
+
+Aliases are recursive.
+
+The checkpatch.pl in the U-Boot tools/ subdirectory will be located and
+used. Failing that you can put it into your path or ~/bin/checkpatch.pl
+
+If you want to avoid sending patches to email addresses that are picked up
+by patman but are known to bounce you can add a [bounces] section to your
+.patman file. Unlike the [alias] section these are simple key: value pairs
+that are not recursive.
+
+>>>
+
+[bounces]
+gonefishing: Fred Bloggs <f.bloggs@napier.net>
+
+<<<
+
+
+If you want to change the defaults for patman's command-line arguments,
+you can add a [settings] section to your .patman file.  This can be used
+for any command line option by referring to the "dest" for the option in
+patman.py.  For reference, the useful ones (at the moment) shown below
+(all with the non-default setting):
+
+>>>
+
+[settings]
+ignore_errors: True
+process_tags: False
+verbose: True
+smtp_server: /path/to/sendmail
+
+<<<
+
+
+If you want to adjust settings (or aliases) that affect just a single
+project you can add a section that looks like [project_settings] or
+[project_alias].  If you want to use tags for your linux work, you could
+do:
+
+>>>
+
+[linux_settings]
+process_tags: True
+
+<<<
+
+
+How to run it
+=============
+
+First do a dry run:
+
+$ ./tools/patman/patman -n
+
+If it can't detect the upstream branch, try telling it how many patches
+there are in your series:
+
+$ ./tools/patman/patman -n -c5
+
+This will create patch files in your current directory and tell you who
+it is thinking of sending them to. Take a look at the patch files.
+
+$ ./tools/patman/patman -n -c5 -s1
+
+Similar to the above, but skip the first commit and take the next 5. This
+is useful if your top commit is for setting up testing.
+
+
+How to install it
+=================
+
+The most up to date version of patman can be found in the U-Boot sources.
+However to use it on other projects it may be more convenient to install it as
+a standalone application. A distutils installer is included, this can be used
+to install patman:
+
+$ cd tools/patman && python setup.py install
+
+
+How to add tags
+===============
+
+To make this script useful you must add tags like the following into any
+commit. Most can only appear once in the whole series.
+
+Series-to: email / alias
+	Email address / alias to send patch series to (you can add this
+	multiple times)
+
+Series-cc: email / alias, ...
+	Email address / alias to Cc patch series to (you can add this
+	multiple times)
+
+Series-version: n
+	Sets the version number of this patch series
+
+Series-prefix: prefix
+	Sets the subject prefix. Normally empty but it can be RFC for
+	RFC patches, or RESEND if you are being ignored. The patch subject
+	is like [RFC PATCH] or [RESEND PATCH].
+	In the meantime, git format.subjectprefix option will be added as
+	well. If your format.subjectprefix is set to InternalProject, then
+	the patch shows like: [InternalProject][RFC/RESEND PATCH]
+
+Series-name: name
+	Sets the name of the series. You don't need to have a name, and
+	patman does not yet use it, but it is convenient to put the branch
+	name here to help you keep track of multiple upstreaming efforts.
+
+Cover-letter:
+This is the patch set title
+blah blah
+more blah blah
+END
+	Sets the cover letter contents for the series. The first line
+	will become the subject of the cover letter
+
+Cover-letter-cc: email / alias
+	Additional email addresses / aliases to send cover letter to (you
+	can add this multiple times)
+
+Series-notes:
+blah blah
+blah blah
+more blah blah
+END
+	Sets some notes for the patch series, which you don't want in
+	the commit messages, but do want to send, The notes are joined
+	together and put after the cover letter. Can appear multiple
+	times.
+
+Commit-notes:
+blah blah
+blah blah
+more blah blah
+END
+	Similar, but for a single commit (patch). These notes will appear
+	immediately below the --- cut in the patch file.
+
+ Signed-off-by: Their Name <email>
+	A sign-off is added automatically to your patches (this is
+	probably a bug). If you put this tag in your patches, it will
+	override the default signoff that patman automatically adds.
+	Multiple duplicate signoffs will be removed.
+
+ Tested-by: Their Name <email>
+ Reviewed-by: Their Name <email>
+ Acked-by: Their Name <email>
+	These indicate that someone has tested/reviewed/acked your patch.
+	When you get this reply on the mailing list, you can add this
+	tag to the relevant commit and the script will include it when
+	you send out the next version. If 'Tested-by:' is set to
+	yourself, it will be removed. No one will believe you.
+
+Series-changes: n
+- Guinea pig moved into its cage
+- Other changes ending with a blank line
+<blank line>
+	This can appear in any commit. It lists the changes for a
+	particular version n of that commit. The change list is
+	created based on this information. Each commit gets its own
+	change list and also the whole thing is repeated in the cover
+	letter (where duplicate change lines are merged).
+
+	By adding your change lists into your commits it is easier to
+	keep track of what happened. When you amend a commit, remember
+	to update the log there and then, knowing that the script will
+	do the rest.
+
+Patch-cc: Their Name <email>
+	This copies a single patch to another email address. Note that the
+	Cc: used by git send-email is ignored by patman, but will be
+	interpreted by git send-email if you use it.
+
+Series-process-log: sort, uniq
+	This tells patman to sort and/or uniq the change logs. It is
+	assumed that each change log entry is only a single line long.
+	Use 'sort' to sort the entries, and 'uniq' to include only
+	unique entries. If omitted, no change log processing is done.
+	Separate each tag with a comma.
+
+Various other tags are silently removed, like these Chrome OS and
+Gerrit tags:
+
+BUG=...
+TEST=...
+Change-Id:
+Review URL:
+Reviewed-on:
+Commit-xxxx: (except Commit-notes)
+
+Exercise for the reader: Try adding some tags to one of your current
+patch series and see how the patches turn out.
+
+
+Where Patches Are Sent
+======================
+
+Once the patches are created, patman sends them using git send-email. The
+whole series is sent to the recipients in Series-to: and Series-cc.
+You can Cc individual patches to other people with the Patch-cc: tag. Tags
+in the subject are also picked up to Cc patches. For example, a commit like
+this:
+
+>>>>
+commit 10212537b85ff9b6e09c82045127522c0f0db981
+Author: Mike Frysinger <vapier@gentoo.org>
+Date:	Mon Nov 7 23:18:44 2011 -0500
+
+    x86: arm: add a git mailrc file for maintainers
+
+    This should make sending out e-mails to the right people easier.
+
+    Patch-cc: sandbox, mikef, ag
+    Patch-cc: afleming
+<<<<
+
+will create a patch which is copied to x86, arm, sandbox, mikef, ag and
+afleming.
+
+If you have a cover letter it will get sent to the union of the Patch-cc
+lists of all of the other patches. If you want to sent it to additional
+people you can add a tag:
+
+Cover-letter-cc: <list of addresses>
+
+These people will get the cover letter even if they are not on the To/Cc
+list for any of the patches.
+
+
+Example Work Flow
+=================
+
+The basic workflow is to create your commits, add some tags to the top
+commit, and type 'patman' to check and send them.
+
+Here is an example workflow for a series of 4 patches. Let's say you have
+these rather contrived patches in the following order in branch us-cmd in
+your tree where 'us' means your upstreaming activity (newest to oldest as
+output by git log --oneline):
+
+    7c7909c wip
+    89234f5 Don't include standard parser if hush is used
+    8d640a7 mmc: sparc: Stop using builtin_run_command()
+    0c859a9 Rename run_command2() to run_command()
+    a74443f sandbox: Rename run_command() to builtin_run_command()
+
+The first patch is some test things that enable your code to be compiled,
+but that you don't want to submit because there is an existing patch for it
+on the list. So you can tell patman to create and check some patches
+(skipping the first patch) with:
+
+    patman -s1 -n
+
+If you want to do all of them including the work-in-progress one, then
+(if you are tracking an upstream branch):
+
+    patman -n
+
+Let's say that patman reports an error in the second patch. Then:
+
+    git rebase -i HEAD~6
+    <change 'pick' to 'edit' in 89234f5>
+    <use editor to make code changes>
+    git add -u
+    git rebase --continue
+
+Now you have an updated patch series. To check it:
+
+    patman -s1 -n
+
+Let's say it is now clean and you want to send it. Now you need to set up
+the destination. So amend the top commit with:
+
+    git commit --amend
+
+Use your editor to add some tags, so that the whole commit message is:
+
+    The current run_command() is really only one of the options, with
+    hush providing the other. It really shouldn't be called directly
+    in case the hush parser is bring used, so rename this function to
+    better explain its purpose.
+
+    Series-to: u-boot
+    Series-cc: bfin, marex
+    Series-prefix: RFC
+    Cover-letter:
+    Unified command execution in one place
+
+    At present two parsers have similar code to execute commands. Also
+    cmd_usage() is called all over the place. This series adds a single
+    function which processes commands called cmd_process().
+    END
+
+    Change-Id: Ica71a14c1f0ecb5650f771a32fecb8d2eb9d8a17
+
+
+You want this to be an RFC and Cc the whole series to the bfin alias and
+to Marek. Two of the patches have tags (those are the bits at the front of
+the subject that say mmc: sparc: and sandbox:), so 8d640a7 will be Cc'd to
+mmc and sparc, and the last one to sandbox.
+
+Now to send the patches, take off the -n flag:
+
+   patman -s1
+
+The patches will be created, shown in your editor, and then sent along with
+the cover letter. Note that patman's tags are automatically removed so that
+people on the list don't see your secret info.
+
+Of course patches often attract comments and you need to make some updates.
+Let's say one person sent comments and you get an Acked-by: on one patch.
+Also, the patch on the list that you were waiting for has been merged,
+so you can drop your wip commit. So you resync with upstream:
+
+    git fetch origin		(or whatever upstream is called)
+    git rebase origin/master
+
+and use git rebase -i to edit the commits, dropping the wip one. You add
+the ack tag to one commit:
+
+    Acked-by: Heiko Schocher <hs@denx.de>
+
+update the Series-cc: in the top commit:
+
+    Series-cc: bfin, marex, Heiko Schocher <hs@denx.de>
+
+and remove the Series-prefix: tag since it it isn't an RFC any more. The
+series is now version two, so the series info in the top commit looks like
+this:
+
+    Series-to: u-boot
+    Series-cc: bfin, marex, Heiko Schocher <hs@denx.de>
+    Series-version: 2
+    Cover-letter:
+    ...
+
+Finally, you need to add a change log to the two commits you changed. You
+add change logs to each individual commit where the changes happened, like
+this:
+
+    Series-changes: 2
+    - Updated the command decoder to reduce code size
+    - Wound the torque propounder up a little more
+
+(note the blank line at the end of the list)
+
+When you run patman it will collect all the change logs from the different
+commits and combine them into the cover letter, if you have one. So finally
+you have a new series of commits:
+
+    faeb973 Don't include standard parser if hush is used
+    1b2f2fe mmc: sparc: Stop using builtin_run_command()
+    cfbe330 Rename run_command2() to run_command()
+    0682677 sandbox: Rename run_command() to builtin_run_command()
+
+so to send them:
+
+    patman
+
+and it will create and send the version 2 series.
+
+General points:
+
+1. When you change back to the us-cmd branch days or weeks later all your
+information is still there, safely stored in the commits. You don't need
+to remember what version you are up to, who you sent the last lot of patches
+to, or anything about the change logs.
+
+2. If you put tags in the subject, patman will Cc the maintainers
+automatically in many cases.
+
+3. If you want to keep the commits from each series you sent so that you can
+compare change and see what you did, you can either create a new branch for
+each version, or just tag the branch before you start changing it:
+
+    git tag sent/us-cmd-rfc
+    ...later...
+    git tag sent/us-cmd-v2
+
+4. If you want to modify the patches a little before sending, you can do
+this in your editor, but be careful!
+
+5. If you want to run git send-email yourself, use the -n flag which will
+print out the command line patman would have used.
+
+6. It is a good idea to add the change log info as you change the commit,
+not later when you can't remember which patch you changed. You can always
+go back and change or remove logs from commits.
+
+
+Other thoughts
+==============
+
+This script has been split into sensible files but still needs work.
+Most of these are indicated by a TODO in the code.
+
+It would be nice if this could handle the In-reply-to side of things.
+
+The tests are incomplete, as is customary. Use the --test flag to run them,
+and make sure you are in the tools/patman directory first:
+
+    $ cd /path/to/u-boot
+    $ cd tools/patman
+    $ ./patman --test
+
+Error handling doesn't always produce friendly error messages - e.g.
+putting an incorrect tag in a commit may provide a confusing message.
+
+There might be a few other features not mentioned in this README. They
+might be bugs. In particular, tags are case sensitive which is probably
+a bad thing.
+
+
+Simon Glass <sjg@chromium.org>
+v1, v2, 19-Oct-11
+revised v3 24-Nov-11
diff --git a/tools/u-boot-tools/patman/__init__.py b/tools/u-boot-tools/patman/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cbe5fa4b0c228700498c1c2d73d51627168fd6c
--- /dev/null
+++ b/tools/u-boot-tools/patman/__init__.py
@@ -0,0 +1,3 @@
+__all__ = ['checkpatch', 'command', 'commit', 'cros_subprocess',
+           'get_maintainer', 'gitutil', 'patchstream', 'project',
+           'series', 'settings', 'terminal', 'test']
diff --git a/tools/u-boot-tools/patman/checkpatch.py b/tools/u-boot-tools/patman/checkpatch.py
new file mode 100644
index 0000000000000000000000000000000000000000..d47ea438b7da58cc1c6d569dbc8c55f0b6e3e6f6
--- /dev/null
+++ b/tools/u-boot-tools/patman/checkpatch.py
@@ -0,0 +1,174 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import collections
+import command
+import gitutil
+import os
+import re
+import sys
+import terminal
+
+def FindCheckPatch():
+    top_level = gitutil.GetTopLevel()
+    try_list = [
+        os.getcwd(),
+        os.path.join(os.getcwd(), '..', '..'),
+        os.path.join(top_level, 'tools'),
+        os.path.join(top_level, 'scripts'),
+        '%s/bin' % os.getenv('HOME'),
+        ]
+    # Look in current dir
+    for path in try_list:
+        fname = os.path.join(path, 'checkpatch.pl')
+        if os.path.isfile(fname):
+            return fname
+
+    # Look upwwards for a Chrome OS tree
+    while not os.path.ismount(path):
+        fname = os.path.join(path, 'src', 'third_party', 'kernel', 'files',
+                'scripts', 'checkpatch.pl')
+        if os.path.isfile(fname):
+            return fname
+        path = os.path.dirname(path)
+
+    sys.exit('Cannot find checkpatch.pl - please put it in your ' +
+             '~/bin directory or use --no-check')
+
+def CheckPatch(fname, verbose=False):
+    """Run checkpatch.pl on a file.
+
+    Returns:
+        namedtuple containing:
+            ok: False=failure, True=ok
+            problems: List of problems, each a dict:
+                'type'; error or warning
+                'msg': text message
+                'file' : filename
+                'line': line number
+            errors: Number of errors
+            warnings: Number of warnings
+            checks: Number of checks
+            lines: Number of lines
+            stdout: Full output of checkpatch
+    """
+    fields = ['ok', 'problems', 'errors', 'warnings', 'checks', 'lines',
+              'stdout']
+    result = collections.namedtuple('CheckPatchResult', fields)
+    result.ok = False
+    result.errors, result.warning, result.checks = 0, 0, 0
+    result.lines = 0
+    result.problems = []
+    chk = FindCheckPatch()
+    item = {}
+    result.stdout = command.Output(chk, '--no-tree', fname,
+                                   raise_on_error=False)
+    #pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+    #stdout, stderr = pipe.communicate()
+
+    # total: 0 errors, 0 warnings, 159 lines checked
+    # or:
+    # total: 0 errors, 2 warnings, 7 checks, 473 lines checked
+    re_stats = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)')
+    re_stats_full = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)'
+                               ' checks, (\d+)')
+    re_ok = re.compile('.*has no obvious style problems')
+    re_bad = re.compile('.*has style problems, please review')
+    re_error = re.compile('ERROR: (.*)')
+    re_warning = re.compile('WARNING: (.*)')
+    re_check = re.compile('CHECK: (.*)')
+    re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')
+
+    for line in result.stdout.splitlines():
+        if verbose:
+            print(line)
+
+        # A blank line indicates the end of a message
+        if not line and item:
+            result.problems.append(item)
+            item = {}
+        match = re_stats_full.match(line)
+        if not match:
+            match = re_stats.match(line)
+        if match:
+            result.errors = int(match.group(1))
+            result.warnings = int(match.group(2))
+            if len(match.groups()) == 4:
+                result.checks = int(match.group(3))
+                result.lines = int(match.group(4))
+            else:
+                result.lines = int(match.group(3))
+        elif re_ok.match(line):
+            result.ok = True
+        elif re_bad.match(line):
+            result.ok = False
+        err_match = re_error.match(line)
+        warn_match = re_warning.match(line)
+        file_match = re_file.match(line)
+        check_match = re_check.match(line)
+        if err_match:
+            item['msg'] = err_match.group(1)
+            item['type'] = 'error'
+        elif warn_match:
+            item['msg'] = warn_match.group(1)
+            item['type'] = 'warning'
+        elif check_match:
+            item['msg'] = check_match.group(1)
+            item['type'] = 'check'
+        elif file_match:
+            item['file'] = file_match.group(1)
+            item['line'] = int(file_match.group(2))
+
+    return result
+
+def GetWarningMsg(col, msg_type, fname, line, msg):
+    '''Create a message for a given file/line
+
+    Args:
+        msg_type: Message type ('error' or 'warning')
+        fname: Filename which reports the problem
+        line: Line number where it was noticed
+        msg: Message to report
+    '''
+    if msg_type == 'warning':
+        msg_type = col.Color(col.YELLOW, msg_type)
+    elif msg_type == 'error':
+        msg_type = col.Color(col.RED, msg_type)
+    elif msg_type == 'check':
+        msg_type = col.Color(col.MAGENTA, msg_type)
+    return '%s:%d: %s: %s\n' % (fname, line, msg_type, msg)
+
+def CheckPatches(verbose, args):
+    '''Run the checkpatch.pl script on each patch'''
+    error_count, warning_count, check_count = 0, 0, 0
+    col = terminal.Color()
+
+    for fname in args:
+        result = CheckPatch(fname, verbose)
+        if not result.ok:
+            error_count += result.errors
+            warning_count += result.warnings
+            check_count += result.checks
+            print('%d errors, %d warnings, %d checks for %s:' % (result.errors,
+                    result.warnings, result.checks, col.Color(col.BLUE, fname)))
+            if (len(result.problems) != result.errors + result.warnings +
+                    result.checks):
+                print("Internal error: some problems lost")
+            for item in result.problems:
+                sys.stderr.write(
+                    GetWarningMsg(col, item.get('type', '<unknown>'),
+                        item.get('file', '<unknown>'),
+                        item.get('line', 0), item.get('msg', 'message')))
+            print
+            #print(stdout)
+    if error_count or warning_count or check_count:
+        str = 'checkpatch.pl found %d error(s), %d warning(s), %d checks(s)'
+        color = col.GREEN
+        if warning_count:
+            color = col.YELLOW
+        if error_count:
+            color = col.RED
+        print(col.Color(color, str % (error_count, warning_count, check_count)))
+        return False
+    return True
diff --git a/tools/u-boot-tools/patman/command.py b/tools/u-boot-tools/patman/command.py
new file mode 100644
index 0000000000000000000000000000000000000000..14edcdaffd29cdc566ea8c70865443dac50170b3
--- /dev/null
+++ b/tools/u-boot-tools/patman/command.py
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import os
+import cros_subprocess
+
+"""Shell command ease-ups for Python."""
+
+class CommandResult:
+    """A class which captures the result of executing a command.
+
+    Members:
+        stdout: stdout obtained from command, as a string
+        stderr: stderr obtained from command, as a string
+        return_code: Return code from command
+        exception: Exception received, or None if all ok
+    """
+    def __init__(self):
+        self.stdout = None
+        self.stderr = None
+        self.combined = None
+        self.return_code = None
+        self.exception = None
+
+    def __init__(self, stdout='', stderr='', combined='', return_code=0,
+                 exception=None):
+        self.stdout = stdout
+        self.stderr = stderr
+        self.combined = combined
+        self.return_code = return_code
+        self.exception = exception
+
+
+# This permits interception of RunPipe for test purposes. If it is set to
+# a function, then that function is called with the pipe list being
+# executed. Otherwise, it is assumed to be a CommandResult object, and is
+# returned as the result for every RunPipe() call.
+# When this value is None, commands are executed as normal.
+test_result = None
+
+def RunPipe(pipe_list, infile=None, outfile=None,
+            capture=False, capture_stderr=False, oneline=False,
+            raise_on_error=True, cwd=None, **kwargs):
+    """
+    Perform a command pipeline, with optional input/output filenames.
+
+    Args:
+        pipe_list: List of command lines to execute. Each command line is
+            piped into the next, and is itself a list of strings. For
+            example [ ['ls', '.git'] ['wc'] ] will pipe the output of
+            'ls .git' into 'wc'.
+        infile: File to provide stdin to the pipeline
+        outfile: File to store stdout
+        capture: True to capture output
+        capture_stderr: True to capture stderr
+        oneline: True to strip newline chars from output
+        kwargs: Additional keyword arguments to cros_subprocess.Popen()
+    Returns:
+        CommandResult object
+    """
+    if test_result:
+        if hasattr(test_result, '__call__'):
+            result = test_result(pipe_list=pipe_list)
+            if result:
+                return result
+        else:
+            return test_result
+        # No result: fall through to normal processing
+    result = CommandResult()
+    last_pipe = None
+    pipeline = list(pipe_list)
+    user_pipestr =  '|'.join([' '.join(pipe) for pipe in pipe_list])
+    kwargs['stdout'] = None
+    kwargs['stderr'] = None
+    while pipeline:
+        cmd = pipeline.pop(0)
+        if last_pipe is not None:
+            kwargs['stdin'] = last_pipe.stdout
+        elif infile:
+            kwargs['stdin'] = open(infile, 'rb')
+        if pipeline or capture:
+            kwargs['stdout'] = cros_subprocess.PIPE
+        elif outfile:
+            kwargs['stdout'] = open(outfile, 'wb')
+        if capture_stderr:
+            kwargs['stderr'] = cros_subprocess.PIPE
+
+        try:
+            last_pipe = cros_subprocess.Popen(cmd, cwd=cwd, **kwargs)
+        except Exception as err:
+            result.exception = err
+            if raise_on_error:
+                raise Exception("Error running '%s': %s" % (user_pipestr, str))
+            result.return_code = 255
+            return result
+
+    if capture:
+        result.stdout, result.stderr, result.combined = (
+                last_pipe.CommunicateFilter(None))
+        if result.stdout and oneline:
+            result.output = result.stdout.rstrip('\r\n')
+        result.return_code = last_pipe.wait()
+    else:
+        result.return_code = os.waitpid(last_pipe.pid, 0)[1]
+    if raise_on_error and result.return_code:
+        raise Exception("Error running '%s'" % user_pipestr)
+    return result
+
+def Output(*cmd, **kwargs):
+    raise_on_error = kwargs.get('raise_on_error', True)
+    return RunPipe([cmd], capture=True, raise_on_error=raise_on_error).stdout
+
+def OutputOneLine(*cmd, **kwargs):
+    raise_on_error = kwargs.pop('raise_on_error', True)
+    return (RunPipe([cmd], capture=True, oneline=True,
+            raise_on_error=raise_on_error,
+            **kwargs).stdout.strip())
+
+def Run(*cmd, **kwargs):
+    return RunPipe([cmd], **kwargs).stdout
+
+def RunList(cmd):
+    return RunPipe([cmd], capture=True).stdout
+
+def StopAll():
+    cros_subprocess.stay_alive = False
diff --git a/tools/u-boot-tools/patman/commit.py b/tools/u-boot-tools/patman/commit.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bf3a0ba5b924e5d454fb4aacfdda565687778ce
--- /dev/null
+++ b/tools/u-boot-tools/patman/commit.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import re
+
+# Separates a tag: at the beginning of the subject from the rest of it
+re_subject_tag = re.compile('([^:\s]*):\s*(.*)')
+
+class Commit:
+    """Holds information about a single commit/patch in the series.
+
+    Args:
+        hash: Commit hash (as a string)
+
+    Variables:
+        hash: Commit hash
+        subject: Subject line
+        tags: List of maintainer tag strings
+        changes: Dict containing a list of changes (single line strings).
+            The dict is indexed by change version (an integer)
+        cc_list: List of people to aliases/emails to cc on this commit
+        notes: List of lines in the commit (not series) notes
+    """
+    def __init__(self, hash):
+        self.hash = hash
+        self.subject = None
+        self.tags = []
+        self.changes = {}
+        self.cc_list = []
+        self.signoff_set = set()
+        self.notes = []
+
+    def AddChange(self, version, info):
+        """Add a new change line to the change list for a version.
+
+        Args:
+            version: Patch set version (integer: 1, 2, 3)
+            info: Description of change in this version
+        """
+        if not self.changes.get(version):
+            self.changes[version] = []
+        self.changes[version].append(info)
+
+    def CheckTags(self):
+        """Create a list of subject tags in the commit
+
+        Subject tags look like this:
+
+            propounder: fort: Change the widget to propound correctly
+
+        Here the tags are propounder and fort. Multiple tags are supported.
+        The list is updated in self.tag.
+
+        Returns:
+            None if ok, else the name of a tag with no email alias
+        """
+        str = self.subject
+        m = True
+        while m:
+            m = re_subject_tag.match(str)
+            if m:
+                tag = m.group(1)
+                self.tags.append(tag)
+                str = m.group(2)
+        return None
+
+    def AddCc(self, cc_list):
+        """Add a list of people to Cc when we send this patch.
+
+        Args:
+            cc_list:    List of aliases or email addresses
+        """
+        self.cc_list += cc_list
+
+    def CheckDuplicateSignoff(self, signoff):
+        """Check a list of signoffs we have send for this patch
+
+        Args:
+            signoff:    Signoff line
+        Returns:
+            True if this signoff is new, False if we have already seen it.
+        """
+        if signoff in self.signoff_set:
+          return False
+        self.signoff_set.add(signoff)
+        return True
diff --git a/tools/u-boot-tools/patman/cros_subprocess.py b/tools/u-boot-tools/patman/cros_subprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebd4300dfd5728d6f5ff52b360d8d8d5be629687
--- /dev/null
+++ b/tools/u-boot-tools/patman/cros_subprocess.py
@@ -0,0 +1,397 @@
+# Copyright (c) 2012 The Chromium OS Authors.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+"""Subprocress execution
+
+This module holds a subclass of subprocess.Popen with our own required
+features, mainly that we get access to the subprocess output while it
+is running rather than just at the end. This makes it easiler to show
+progress information and filter output in real time.
+"""
+
+import errno
+import os
+import pty
+import select
+import subprocess
+import sys
+import unittest
+
+
+# Import these here so the caller does not need to import subprocess also.
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+PIPE_PTY = -3     # Pipe output through a pty
+stay_alive = True
+
+
+class Popen(subprocess.Popen):
+    """Like subprocess.Popen with ptys and incremental output
+
+    This class deals with running a child process and filtering its output on
+    both stdout and stderr while it is running. We do this so we can monitor
+    progress, and possibly relay the output to the user if requested.
+
+    The class is similar to subprocess.Popen, the equivalent is something like:
+
+        Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+    But this class has many fewer features, and two enhancement:
+
+    1. Rather than getting the output data only at the end, this class sends it
+         to a provided operation as it arrives.
+    2. We use pseudo terminals so that the child will hopefully flush its output
+         to us as soon as it is produced, rather than waiting for the end of a
+         line.
+
+    Use CommunicateFilter() to handle output from the subprocess.
+
+    """
+
+    def __init__(self, args, stdin=None, stdout=PIPE_PTY, stderr=PIPE_PTY,
+                 shell=False, cwd=None, env=None, **kwargs):
+        """Cut-down constructor
+
+        Args:
+            args: Program and arguments for subprocess to execute.
+            stdin: See subprocess.Popen()
+            stdout: See subprocess.Popen(), except that we support the sentinel
+                    value of cros_subprocess.PIPE_PTY.
+            stderr: See subprocess.Popen(), except that we support the sentinel
+                    value of cros_subprocess.PIPE_PTY.
+            shell: See subprocess.Popen()
+            cwd: Working directory to change to for subprocess, or None if none.
+            env: Environment to use for this subprocess, or None to inherit parent.
+            kwargs: No other arguments are supported at the moment.    Passing other
+                    arguments will cause a ValueError to be raised.
+        """
+        stdout_pty = None
+        stderr_pty = None
+
+        if stdout == PIPE_PTY:
+            stdout_pty = pty.openpty()
+            stdout = os.fdopen(stdout_pty[1])
+        if stderr == PIPE_PTY:
+            stderr_pty = pty.openpty()
+            stderr = os.fdopen(stderr_pty[1])
+
+        super(Popen, self).__init__(args, stdin=stdin,
+                stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env,
+                **kwargs)
+
+        # If we're on a PTY, we passed the slave half of the PTY to the subprocess.
+        # We want to use the master half on our end from now on.    Setting this here
+        # does make some assumptions about the implementation of subprocess, but
+        # those assumptions are pretty minor.
+
+        # Note that if stderr is STDOUT, then self.stderr will be set to None by
+        # this constructor.
+        if stdout_pty is not None:
+            self.stdout = os.fdopen(stdout_pty[0])
+        if stderr_pty is not None:
+            self.stderr = os.fdopen(stderr_pty[0])
+
+        # Insist that unit tests exist for other arguments we don't support.
+        if kwargs:
+            raise ValueError("Unit tests do not test extra args - please add tests")
+
+    def CommunicateFilter(self, output):
+        """Interact with process: Read data from stdout and stderr.
+
+        This method runs until end-of-file is reached, then waits for the
+        subprocess to terminate.
+
+        The output function is sent all output from the subprocess and must be
+        defined like this:
+
+            def Output([self,] stream, data)
+            Args:
+                stream: the stream the output was received on, which will be
+                        sys.stdout or sys.stderr.
+                data: a string containing the data
+
+        Note: The data read is buffered in memory, so do not use this
+        method if the data size is large or unlimited.
+
+        Args:
+            output: Function to call with each fragment of output.
+
+        Returns:
+            A tuple (stdout, stderr, combined) which is the data received on
+            stdout, stderr and the combined data (interleaved stdout and stderr).
+
+            Note that the interleaved output will only be sensible if you have
+            set both stdout and stderr to PIPE or PIPE_PTY. Even then it depends on
+            the timing of the output in the subprocess. If a subprocess flips
+            between stdout and stderr quickly in succession, by the time we come to
+            read the output from each we may see several lines in each, and will read
+            all the stdout lines, then all the stderr lines. So the interleaving
+            may not be correct. In this case you might want to pass
+            stderr=cros_subprocess.STDOUT to the constructor.
+
+            This feature is still useful for subprocesses where stderr is
+            rarely used and indicates an error.
+
+            Note also that if you set stderr to STDOUT, then stderr will be empty
+            and the combined output will just be the same as stdout.
+        """
+
+        read_set = []
+        write_set = []
+        stdout = None # Return
+        stderr = None # Return
+
+        if self.stdin:
+            # Flush stdio buffer.    This might block, if the user has
+            # been writing to .stdin in an uncontrolled fashion.
+            self.stdin.flush()
+            if input:
+                write_set.append(self.stdin)
+            else:
+                self.stdin.close()
+        if self.stdout:
+            read_set.append(self.stdout)
+            stdout = []
+        if self.stderr and self.stderr != self.stdout:
+            read_set.append(self.stderr)
+            stderr = []
+        combined = []
+
+        input_offset = 0
+        while read_set or write_set:
+            try:
+                rlist, wlist, _ = select.select(read_set, write_set, [], 0.2)
+            except select.error as e:
+                if e.args[0] == errno.EINTR:
+                    continue
+                raise
+
+            if not stay_alive:
+                    self.terminate()
+
+            if self.stdin in wlist:
+                # When select has indicated that the file is writable,
+                # we can write up to PIPE_BUF bytes without risk
+                # blocking.    POSIX defines PIPE_BUF >= 512
+                chunk = input[input_offset : input_offset + 512]
+                bytes_written = os.write(self.stdin.fileno(), chunk)
+                input_offset += bytes_written
+                if input_offset >= len(input):
+                    self.stdin.close()
+                    write_set.remove(self.stdin)
+
+            if self.stdout in rlist:
+                data = ""
+                # We will get an error on read if the pty is closed
+                try:
+                    data = os.read(self.stdout.fileno(), 1024)
+                except OSError:
+                    pass
+                if data == "":
+                    self.stdout.close()
+                    read_set.remove(self.stdout)
+                else:
+                    stdout.append(data)
+                    combined.append(data)
+                    if output:
+                        output(sys.stdout, data)
+            if self.stderr in rlist:
+                data = ""
+                # We will get an error on read if the pty is closed
+                try:
+                    data = os.read(self.stderr.fileno(), 1024)
+                except OSError:
+                    pass
+                if data == "":
+                    self.stderr.close()
+                    read_set.remove(self.stderr)
+                else:
+                    stderr.append(data)
+                    combined.append(data)
+                    if output:
+                        output(sys.stderr, data)
+
+        # All data exchanged.    Translate lists into strings.
+        if stdout is not None:
+            stdout = ''.join(stdout)
+        else:
+            stdout = ''
+        if stderr is not None:
+            stderr = ''.join(stderr)
+        else:
+            stderr = ''
+        combined = ''.join(combined)
+
+        # Translate newlines, if requested.    We cannot let the file
+        # object do the translation: It is based on stdio, which is
+        # impossible to combine with select (unless forcing no
+        # buffering).
+        if self.universal_newlines and hasattr(file, 'newlines'):
+            if stdout:
+                stdout = self._translate_newlines(stdout)
+            if stderr:
+                stderr = self._translate_newlines(stderr)
+
+        self.wait()
+        return (stdout, stderr, combined)
+
+
+# Just being a unittest.TestCase gives us 14 public methods.    Unless we
+# disable this, we can only have 6 tests in a TestCase.    That's not enough.
+#
+# pylint: disable=R0904
+
+class TestSubprocess(unittest.TestCase):
+    """Our simple unit test for this module"""
+
+    class MyOperation:
+        """Provides a operation that we can pass to Popen"""
+        def __init__(self, input_to_send=None):
+            """Constructor to set up the operation and possible input.
+
+            Args:
+                input_to_send: a text string to send when we first get input. We will
+                    add \r\n to the string.
+            """
+            self.stdout_data = ''
+            self.stderr_data = ''
+            self.combined_data = ''
+            self.stdin_pipe = None
+            self._input_to_send = input_to_send
+            if input_to_send:
+                pipe = os.pipe()
+                self.stdin_read_pipe = pipe[0]
+                self._stdin_write_pipe = os.fdopen(pipe[1], 'w')
+
+        def Output(self, stream, data):
+            """Output handler for Popen. Stores the data for later comparison"""
+            if stream == sys.stdout:
+                self.stdout_data += data
+            if stream == sys.stderr:
+                self.stderr_data += data
+            self.combined_data += data
+
+            # Output the input string if we have one.
+            if self._input_to_send:
+                self._stdin_write_pipe.write(self._input_to_send + '\r\n')
+                self._stdin_write_pipe.flush()
+
+    def _BasicCheck(self, plist, oper):
+        """Basic checks that the output looks sane."""
+        self.assertEqual(plist[0], oper.stdout_data)
+        self.assertEqual(plist[1], oper.stderr_data)
+        self.assertEqual(plist[2], oper.combined_data)
+
+        # The total length of stdout and stderr should equal the combined length
+        self.assertEqual(len(plist[0]) + len(plist[1]), len(plist[2]))
+
+    def test_simple(self):
+        """Simple redirection: Get process list"""
+        oper = TestSubprocess.MyOperation()
+        plist = Popen(['ps']).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+
+    def test_stderr(self):
+        """Check stdout and stderr"""
+        oper = TestSubprocess.MyOperation()
+        cmd = 'echo fred >/dev/stderr && false || echo bad'
+        plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(plist [0], 'bad\r\n')
+        self.assertEqual(plist [1], 'fred\r\n')
+
+    def test_shell(self):
+        """Check with and without shell works"""
+        oper = TestSubprocess.MyOperation()
+        cmd = 'echo test >/dev/stderr'
+        self.assertRaises(OSError, Popen, [cmd], shell=False)
+        plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(len(plist [0]), 0)
+        self.assertEqual(plist [1], 'test\r\n')
+
+    def test_list_args(self):
+        """Check with and without shell works using list arguments"""
+        oper = TestSubprocess.MyOperation()
+        cmd = ['echo', 'test', '>/dev/stderr']
+        plist = Popen(cmd, shell=False).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(plist [0], ' '.join(cmd[1:]) + '\r\n')
+        self.assertEqual(len(plist [1]), 0)
+
+        oper = TestSubprocess.MyOperation()
+
+        # this should be interpreted as 'echo' with the other args dropped
+        cmd = ['echo', 'test', '>/dev/stderr']
+        plist = Popen(cmd, shell=True).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(plist [0], '\r\n')
+
+    def test_cwd(self):
+        """Check we can change directory"""
+        for shell in (False, True):
+            oper = TestSubprocess.MyOperation()
+            plist = Popen('pwd', shell=shell, cwd='/tmp').CommunicateFilter(oper.Output)
+            self._BasicCheck(plist, oper)
+            self.assertEqual(plist [0], '/tmp\r\n')
+
+    def test_env(self):
+        """Check we can change environment"""
+        for add in (False, True):
+            oper = TestSubprocess.MyOperation()
+            env = os.environ
+            if add:
+                env ['FRED'] = 'fred'
+            cmd = 'echo $FRED'
+            plist = Popen(cmd, shell=True, env=env).CommunicateFilter(oper.Output)
+            self._BasicCheck(plist, oper)
+            self.assertEqual(plist [0], add and 'fred\r\n' or '\r\n')
+
+    def test_extra_args(self):
+        """Check we can't add extra arguments"""
+        self.assertRaises(ValueError, Popen, 'true', close_fds=False)
+
+    def test_basic_input(self):
+        """Check that incremental input works
+
+        We set up a subprocess which will prompt for name. When we see this prompt
+        we send the name as input to the process. It should then print the name
+        properly to stdout.
+        """
+        oper = TestSubprocess.MyOperation('Flash')
+        prompt = 'What is your name?: '
+        cmd = 'echo -n "%s"; read name; echo Hello $name' % prompt
+        plist = Popen([cmd], stdin=oper.stdin_read_pipe,
+                shell=True).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(len(plist [1]), 0)
+        self.assertEqual(plist [0], prompt + 'Hello Flash\r\r\n')
+
+    def test_isatty(self):
+        """Check that ptys appear as terminals to the subprocess"""
+        oper = TestSubprocess.MyOperation()
+        cmd = ('if [ -t %d ]; then echo "terminal %d" >&%d; '
+                'else echo "not %d" >&%d; fi;')
+        both_cmds = ''
+        for fd in (1, 2):
+            both_cmds += cmd % (fd, fd, fd, fd, fd)
+        plist = Popen(both_cmds, shell=True).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(plist [0], 'terminal 1\r\n')
+        self.assertEqual(plist [1], 'terminal 2\r\n')
+
+        # Now try with PIPE and make sure it is not a terminal
+        oper = TestSubprocess.MyOperation()
+        plist = Popen(both_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                shell=True).CommunicateFilter(oper.Output)
+        self._BasicCheck(plist, oper)
+        self.assertEqual(plist [0], 'not 1\n')
+        self.assertEqual(plist [1], 'not 2\n')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tools/u-boot-tools/patman/func_test.py b/tools/u-boot-tools/patman/func_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..d79e716074b6a2274e81932b7d012b47d12919d9
--- /dev/null
+++ b/tools/u-boot-tools/patman/func_test.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier:	GPL-2.0+
+#
+# Copyright 2017 Google, Inc
+#
+
+import contextlib
+import os
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+
+import gitutil
+import patchstream
+import settings
+
+
+@contextlib.contextmanager
+def capture():
+    import sys
+    from cStringIO import StringIO
+    oldout,olderr = sys.stdout, sys.stderr
+    try:
+        out=[StringIO(), StringIO()]
+        sys.stdout,sys.stderr = out
+        yield out
+    finally:
+        sys.stdout,sys.stderr = oldout, olderr
+        out[0] = out[0].getvalue()
+        out[1] = out[1].getvalue()
+
+
+class TestFunctional(unittest.TestCase):
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp(prefix='patman.')
+
+    def tearDown(self):
+        shutil.rmtree(self.tmpdir)
+
+    @staticmethod
+    def GetPath(fname):
+        return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
+                            'test', fname)
+
+    @classmethod
+    def GetText(self, fname):
+        return open(self.GetPath(fname)).read()
+
+    @classmethod
+    def GetPatchName(self, subject):
+        fname = re.sub('[ :]', '-', subject)
+        return fname.replace('--', '-')
+
+    def CreatePatchesForTest(self, series):
+        cover_fname = None
+        fname_list = []
+        for i, commit in enumerate(series.commits):
+            clean_subject = self.GetPatchName(commit.subject)
+            src_fname = '%04d-%s.patch' % (i + 1, clean_subject[:52])
+            fname = os.path.join(self.tmpdir, src_fname)
+            shutil.copy(self.GetPath(src_fname), fname)
+            fname_list.append(fname)
+        if series.get('cover'):
+            src_fname = '0000-cover-letter.patch'
+            cover_fname = os.path.join(self.tmpdir, src_fname)
+            fname = os.path.join(self.tmpdir, src_fname)
+            shutil.copy(self.GetPath(src_fname), fname)
+
+        return cover_fname, fname_list
+
+    def testBasic(self):
+        """Tests the basic flow of patman
+
+        This creates a series from some hard-coded patches build from a simple
+        tree with the following metadata in the top commit:
+
+            Series-to: u-boot
+            Series-prefix: RFC
+            Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de>
+            Cover-letter-cc: Lord Mëlchett <clergy@palace.gov>
+            Series-version: 2
+            Series-changes: 4
+            - Some changes
+
+            Cover-letter:
+            test: A test patch series
+            This is a test of how the cover
+            leter
+            works
+            END
+
+        and this in the first commit:
+
+            Series-notes:
+            some notes
+            about some things
+            from the first commit
+            END
+
+            Commit-notes:
+            Some notes about
+            the first commit
+            END
+
+        with the following commands:
+
+           git log -n2 --reverse >/path/to/tools/patman/test/test01.txt
+           git format-patch --subject-prefix RFC --cover-letter HEAD~2
+           mv 00* /path/to/tools/patman/test
+
+        It checks these aspects:
+            - git log can be processed by patchstream
+            - emailing patches uses the correct command
+            - CC file has information on each commit
+            - cover letter has the expected text and subject
+            - each patch has the correct subject
+            - dry-run information prints out correctly
+            - unicode is handled correctly
+            - Series-to, Series-cc, Series-prefix, Cover-letter
+            - Cover-letter-cc, Series-version, Series-changes, Series-notes
+            - Commit-notes
+        """
+        process_tags = True
+        ignore_bad_tags = True
+        stefan = u'Stefan Brüns <stefan.bruens@rwth-aachen.de>'
+        rick = 'Richard III <richard@palace.gov>'
+        mel = u'Lord Mëlchett <clergy@palace.gov>'
+        ed = u'Lond Edmund Blackaddër <weasel@blackadder.org'
+        fred = 'Fred Bloggs <f.bloggs@napier.net>'
+        add_maintainers = [stefan, rick]
+        dry_run = True
+        in_reply_to = mel
+        count = 2
+        settings.alias = {
+                'fdt': ['simon'],
+                'u-boot': ['u-boot@lists.denx.de'],
+                'simon': [ed],
+                'fred': [fred],
+        }
+
+        text = self.GetText('test01.txt')
+        series = patchstream.GetMetaDataForTest(text)
+        cover_fname, args = self.CreatePatchesForTest(series)
+        with capture() as out:
+            patchstream.FixPatches(series, args)
+            if cover_fname and series.get('cover'):
+                patchstream.InsertCoverLetter(cover_fname, series, count)
+            series.DoChecks()
+            cc_file = series.MakeCcFile(process_tags, cover_fname,
+                                        not ignore_bad_tags, add_maintainers,
+                                        None)
+            cmd = gitutil.EmailPatches(series, cover_fname, args,
+                    dry_run, not ignore_bad_tags, cc_file,
+                    in_reply_to=in_reply_to, thread=None)
+            series.ShowActions(args, cmd, process_tags)
+        cc_lines = open(cc_file).read().splitlines()
+        os.remove(cc_file)
+
+        lines = out[0].splitlines()
+        #print '\n'.join(lines)
+        self.assertEqual('Cleaned %s patches' % len(series.commits), lines[0])
+        self.assertEqual('Change log missing for v2', lines[1])
+        self.assertEqual('Change log missing for v3', lines[2])
+        self.assertEqual('Change log for unknown version v4', lines[3])
+        self.assertEqual("Alias 'pci' not found", lines[4])
+        self.assertIn('Dry run', lines[5])
+        self.assertIn('Send a total of %d patches' % count, lines[7])
+        line = 8
+        for i, commit in enumerate(series.commits):
+            self.assertEqual('   %s' % args[i], lines[line + 0])
+            line += 1
+            while 'Cc:' in lines[line]:
+                line += 1
+        self.assertEqual('To:	  u-boot@lists.denx.de', lines[line])
+        self.assertEqual('Cc:	  %s' % stefan.encode('utf-8'), lines[line + 1])
+        self.assertEqual('Version:  3', lines[line + 2])
+        self.assertEqual('Prefix:\t  RFC', lines[line + 3])
+        self.assertEqual('Cover: 4 lines', lines[line + 4])
+        line += 5
+        self.assertEqual('      Cc:  %s' % mel.encode('utf-8'), lines[line + 0])
+        self.assertEqual('      Cc:  %s' % rick, lines[line + 1])
+        self.assertEqual('      Cc:  %s' % fred, lines[line + 2])
+        self.assertEqual('      Cc:  %s' % ed.encode('utf-8'), lines[line + 3])
+        expected = ('Git command: git send-email --annotate '
+                    '--in-reply-to="%s" --to "u-boot@lists.denx.de" '
+                    '--cc "%s" --cc-cmd "%s --cc-cmd %s" %s %s'
+                    % (in_reply_to, stefan, sys.argv[0], cc_file, cover_fname,
+                       ' '.join(args))).encode('utf-8')
+        line += 4
+        self.assertEqual(expected, lines[line])
+
+        self.assertEqual(('%s %s, %s' % (args[0], rick, stefan))
+                         .encode('utf-8'), cc_lines[0])
+        self.assertEqual(('%s %s, %s, %s, %s' % (args[1], fred, rick, stefan,
+                                            ed)).encode('utf-8'), cc_lines[1])
+
+        expected = '''
+This is a test of how the cover
+leter
+works
+
+some notes
+about some things
+from the first commit
+
+Changes in v4:
+- Some changes
+
+Simon Glass (2):
+  pci: Correct cast for sandbox
+  fdt: Correct cast for sandbox in fdtdec_setup_mem_size_base()
+
+ cmd/pci.c                   | 3 ++-
+ fs/fat/fat.c                | 1 +
+ lib/efi_loader/efi_memory.c | 1 +
+ lib/fdtdec.c                | 3 ++-
+ 4 files changed, 6 insertions(+), 2 deletions(-)
+
+--\x20
+2.7.4
+
+'''
+        lines = open(cover_fname).read().splitlines()
+        #print '\n'.join(lines)
+        self.assertEqual(
+                'Subject: [RFC PATCH v3 0/2] test: A test patch series',
+                lines[3])
+        self.assertEqual(expected.splitlines(), lines[7:])
+
+        for i, fname in enumerate(args):
+            lines = open(fname).read().splitlines()
+            #print '\n'.join(lines)
+            subject = [line for line in lines if line.startswith('Subject')]
+            self.assertEqual('Subject: [RFC %d/%d]' % (i + 1, count),
+                             subject[0][:18])
+            if i == 0:
+                # Check that we got our commit notes
+                self.assertEqual('---', lines[17])
+                self.assertEqual('Some notes about', lines[18])
+                self.assertEqual('the first commit', lines[19])
diff --git a/tools/u-boot-tools/patman/get_maintainer.py b/tools/u-boot-tools/patman/get_maintainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ffb55a8219ffbcc886b086ffdff0bcad61fd74a
--- /dev/null
+++ b/tools/u-boot-tools/patman/get_maintainer.py
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+import command
+import gitutil
+import os
+
+def FindGetMaintainer():
+    """Look for the get_maintainer.pl script.
+
+    Returns:
+        If the script is found we'll return a path to it; else None.
+    """
+    try_list = [
+        os.path.join(gitutil.GetTopLevel(), 'scripts'),
+        ]
+    # Look in the list
+    for path in try_list:
+        fname = os.path.join(path, 'get_maintainer.pl')
+        if os.path.isfile(fname):
+            return fname
+
+    return None
+
+def GetMaintainer(fname, verbose=False):
+    """Run get_maintainer.pl on a file if we find it.
+
+    We look for get_maintainer.pl in the 'scripts' directory at the top of
+    git.  If we find it we'll run it.  If we don't find get_maintainer.pl
+    then we fail silently.
+
+    Args:
+        fname: Path to the patch file to run get_maintainer.pl on.
+
+    Returns:
+        A list of email addresses to CC to.
+    """
+    get_maintainer = FindGetMaintainer()
+    if not get_maintainer:
+        if verbose:
+            print("WARNING: Couldn't find get_maintainer.pl")
+        return []
+
+    stdout = command.Output(get_maintainer, '--norolestats', fname)
+    lines = stdout.splitlines()
+    return [ x.replace('"', '') for x in lines ]
diff --git a/tools/u-boot-tools/patman/gitutil.py b/tools/u-boot-tools/patman/gitutil.py
new file mode 100644
index 0000000000000000000000000000000000000000..9905bb0bbd8226e8ba9bd67a8241b52a6ebd009e
--- /dev/null
+++ b/tools/u-boot-tools/patman/gitutil.py
@@ -0,0 +1,597 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import command
+import re
+import os
+import series
+import subprocess
+import sys
+import terminal
+
+import checkpatch
+import settings
+
+# True to use --no-decorate - we check this in Setup()
+use_no_decorate = True
+
+def LogCmd(commit_range, git_dir=None, oneline=False, reverse=False,
+           count=None):
+    """Create a command to perform a 'git log'
+
+    Args:
+        commit_range: Range expression to use for log, None for none
+        git_dir: Path to git repositiory (None to use default)
+        oneline: True to use --oneline, else False
+        reverse: True to reverse the log (--reverse)
+        count: Number of commits to list, or None for no limit
+    Return:
+        List containing command and arguments to run
+    """
+    cmd = ['git']
+    if git_dir:
+        cmd += ['--git-dir', git_dir]
+    cmd += ['--no-pager', 'log', '--no-color']
+    if oneline:
+        cmd.append('--oneline')
+    if use_no_decorate:
+        cmd.append('--no-decorate')
+    if reverse:
+        cmd.append('--reverse')
+    if count is not None:
+        cmd.append('-n%d' % count)
+    if commit_range:
+        cmd.append(commit_range)
+
+    # Add this in case we have a branch with the same name as a directory.
+    # This avoids messages like this, for example:
+    #   fatal: ambiguous argument 'test': both revision and filename
+    cmd.append('--')
+    return cmd
+
+def CountCommitsToBranch():
+    """Returns number of commits between HEAD and the tracking branch.
+
+    This looks back to the tracking branch and works out the number of commits
+    since then.
+
+    Return:
+        Number of patches that exist on top of the branch
+    """
+    pipe = [LogCmd('@{upstream}..', oneline=True),
+            ['wc', '-l']]
+    stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
+    patch_count = int(stdout)
+    return patch_count
+
+def NameRevision(commit_hash):
+    """Gets the revision name for a commit
+
+    Args:
+        commit_hash: Commit hash to look up
+
+    Return:
+        Name of revision, if any, else None
+    """
+    pipe = ['git', 'name-rev', commit_hash]
+    stdout = command.RunPipe([pipe], capture=True, oneline=True).stdout
+
+    # We expect a commit, a space, then a revision name
+    name = stdout.split(' ')[1].strip()
+    return name
+
+def GuessUpstream(git_dir, branch):
+    """Tries to guess the upstream for a branch
+
+    This lists out top commits on a branch and tries to find a suitable
+    upstream. It does this by looking for the first commit where
+    'git name-rev' returns a plain branch name, with no ! or ^ modifiers.
+
+    Args:
+        git_dir: Git directory containing repo
+        branch: Name of branch
+
+    Returns:
+        Tuple:
+            Name of upstream branch (e.g. 'upstream/master') or None if none
+            Warning/error message, or None if none
+    """
+    pipe = [LogCmd(branch, git_dir=git_dir, oneline=True, count=100)]
+    result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+                             raise_on_error=False)
+    if result.return_code:
+        return None, "Branch '%s' not found" % branch
+    for line in result.stdout.splitlines()[1:]:
+        commit_hash = line.split(' ')[0]
+        name = NameRevision(commit_hash)
+        if '~' not in name and '^' not in name:
+            if name.startswith('remotes/'):
+                name = name[8:]
+            return name, "Guessing upstream as '%s'" % name
+    return None, "Cannot find a suitable upstream for branch '%s'" % branch
+
+def GetUpstream(git_dir, branch):
+    """Returns the name of the upstream for a branch
+
+    Args:
+        git_dir: Git directory containing repo
+        branch: Name of branch
+
+    Returns:
+        Tuple:
+            Name of upstream branch (e.g. 'upstream/master') or None if none
+            Warning/error message, or None if none
+    """
+    try:
+        remote = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
+                                       'branch.%s.remote' % branch)
+        merge = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
+                                      'branch.%s.merge' % branch)
+    except:
+        upstream, msg = GuessUpstream(git_dir, branch)
+        return upstream, msg
+
+    if remote == '.':
+        return merge, None
+    elif remote and merge:
+        leaf = merge.split('/')[-1]
+        return '%s/%s' % (remote, leaf), None
+    else:
+        raise ValueError("Cannot determine upstream branch for branch "
+                "'%s' remote='%s', merge='%s'" % (branch, remote, merge))
+
+
+def GetRangeInBranch(git_dir, branch, include_upstream=False):
+    """Returns an expression for the commits in the given branch.
+
+    Args:
+        git_dir: Directory containing git repo
+        branch: Name of branch
+    Return:
+        Expression in the form 'upstream..branch' which can be used to
+        access the commits. If the branch does not exist, returns None.
+    """
+    upstream, msg = GetUpstream(git_dir, branch)
+    if not upstream:
+        return None, msg
+    rstr = '%s%s..%s' % (upstream, '~' if include_upstream else '', branch)
+    return rstr, msg
+
+def CountCommitsInRange(git_dir, range_expr):
+    """Returns the number of commits in the given range.
+
+    Args:
+        git_dir: Directory containing git repo
+        range_expr: Range to check
+    Return:
+        Number of patches that exist in the supplied rangem or None if none
+        were found
+    """
+    pipe = [LogCmd(range_expr, git_dir=git_dir, oneline=True)]
+    result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+                             raise_on_error=False)
+    if result.return_code:
+        return None, "Range '%s' not found or is invalid" % range_expr
+    patch_count = len(result.stdout.splitlines())
+    return patch_count, None
+
+def CountCommitsInBranch(git_dir, branch, include_upstream=False):
+    """Returns the number of commits in the given branch.
+
+    Args:
+        git_dir: Directory containing git repo
+        branch: Name of branch
+    Return:
+        Number of patches that exist on top of the branch, or None if the
+        branch does not exist.
+    """
+    range_expr, msg = GetRangeInBranch(git_dir, branch, include_upstream)
+    if not range_expr:
+        return None, msg
+    return CountCommitsInRange(git_dir, range_expr)
+
+def CountCommits(commit_range):
+    """Returns the number of commits in the given range.
+
+    Args:
+        commit_range: Range of commits to count (e.g. 'HEAD..base')
+    Return:
+        Number of patches that exist on top of the branch
+    """
+    pipe = [LogCmd(commit_range, oneline=True),
+            ['wc', '-l']]
+    stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
+    patch_count = int(stdout)
+    return patch_count
+
+def Checkout(commit_hash, git_dir=None, work_tree=None, force=False):
+    """Checkout the selected commit for this build
+
+    Args:
+        commit_hash: Commit hash to check out
+    """
+    pipe = ['git']
+    if git_dir:
+        pipe.extend(['--git-dir', git_dir])
+    if work_tree:
+        pipe.extend(['--work-tree', work_tree])
+    pipe.append('checkout')
+    if force:
+        pipe.append('-f')
+    pipe.append(commit_hash)
+    result = command.RunPipe([pipe], capture=True, raise_on_error=False,
+                             capture_stderr=True)
+    if result.return_code != 0:
+        raise OSError('git checkout (%s): %s' % (pipe, result.stderr))
+
+def Clone(git_dir, output_dir):
+    """Checkout the selected commit for this build
+
+    Args:
+        commit_hash: Commit hash to check out
+    """
+    pipe = ['git', 'clone', git_dir, '.']
+    result = command.RunPipe([pipe], capture=True, cwd=output_dir,
+                             capture_stderr=True)
+    if result.return_code != 0:
+        raise OSError('git clone: %s' % result.stderr)
+
+def Fetch(git_dir=None, work_tree=None):
+    """Fetch from the origin repo
+
+    Args:
+        commit_hash: Commit hash to check out
+    """
+    pipe = ['git']
+    if git_dir:
+        pipe.extend(['--git-dir', git_dir])
+    if work_tree:
+        pipe.extend(['--work-tree', work_tree])
+    pipe.append('fetch')
+    result = command.RunPipe([pipe], capture=True, capture_stderr=True)
+    if result.return_code != 0:
+        raise OSError('git fetch: %s' % result.stderr)
+
+def CreatePatches(start, count, series):
+    """Create a series of patches from the top of the current branch.
+
+    The patch files are written to the current directory using
+    git format-patch.
+
+    Args:
+        start: Commit to start from: 0=HEAD, 1=next one, etc.
+        count: number of commits to include
+    Return:
+        Filename of cover letter
+        List of filenames of patch files
+    """
+    if series.get('version'):
+        version = '%s ' % series['version']
+    cmd = ['git', 'format-patch', '-M', '--signoff']
+    if series.get('cover'):
+        cmd.append('--cover-letter')
+    prefix = series.GetPatchPrefix()
+    if prefix:
+        cmd += ['--subject-prefix=%s' % prefix]
+    cmd += ['HEAD~%d..HEAD~%d' % (start + count, start)]
+
+    stdout = command.RunList(cmd)
+    files = stdout.splitlines()
+
+    # We have an extra file if there is a cover letter
+    if series.get('cover'):
+       return files[0], files[1:]
+    else:
+       return None, files
+
+def BuildEmailList(in_list, tag=None, alias=None, raise_on_error=True):
+    """Build a list of email addresses based on an input list.
+
+    Takes a list of email addresses and aliases, and turns this into a list
+    of only email address, by resolving any aliases that are present.
+
+    If the tag is given, then each email address is prepended with this
+    tag and a space. If the tag starts with a minus sign (indicating a
+    command line parameter) then the email address is quoted.
+
+    Args:
+        in_list:        List of aliases/email addresses
+        tag:            Text to put before each address
+        alias:          Alias dictionary
+        raise_on_error: True to raise an error when an alias fails to match,
+                False to just print a message.
+
+    Returns:
+        List of email addresses
+
+    >>> alias = {}
+    >>> alias['fred'] = ['f.bloggs@napier.co.nz']
+    >>> alias['john'] = ['j.bloggs@napier.co.nz']
+    >>> alias['mary'] = ['Mary Poppins <m.poppins@cloud.net>']
+    >>> alias['boys'] = ['fred', ' john']
+    >>> alias['all'] = ['fred ', 'john', '   mary   ']
+    >>> BuildEmailList(['john', 'mary'], None, alias)
+    ['j.bloggs@napier.co.nz', 'Mary Poppins <m.poppins@cloud.net>']
+    >>> BuildEmailList(['john', 'mary'], '--to', alias)
+    ['--to "j.bloggs@napier.co.nz"', \
+'--to "Mary Poppins <m.poppins@cloud.net>"']
+    >>> BuildEmailList(['john', 'mary'], 'Cc', alias)
+    ['Cc j.bloggs@napier.co.nz', 'Cc Mary Poppins <m.poppins@cloud.net>']
+    """
+    quote = '"' if tag and tag[0] == '-' else ''
+    raw = []
+    for item in in_list:
+        raw += LookupEmail(item, alias, raise_on_error=raise_on_error)
+    result = []
+    for item in raw:
+        if not item in result:
+            result.append(item)
+    if tag:
+        return ['%s %s%s%s' % (tag, quote, email, quote) for email in result]
+    return result
+
+def EmailPatches(series, cover_fname, args, dry_run, raise_on_error, cc_fname,
+        self_only=False, alias=None, in_reply_to=None, thread=False,
+        smtp_server=None):
+    """Email a patch series.
+
+    Args:
+        series: Series object containing destination info
+        cover_fname: filename of cover letter
+        args: list of filenames of patch files
+        dry_run: Just return the command that would be run
+        raise_on_error: True to raise an error when an alias fails to match,
+                False to just print a message.
+        cc_fname: Filename of Cc file for per-commit Cc
+        self_only: True to just email to yourself as a test
+        in_reply_to: If set we'll pass this to git as --in-reply-to.
+            Should be a message ID that this is in reply to.
+        thread: True to add --thread to git send-email (make
+            all patches reply to cover-letter or first patch in series)
+        smtp_server: SMTP server to use to send patches
+
+    Returns:
+        Git command that was/would be run
+
+    # For the duration of this doctest pretend that we ran patman with ./patman
+    >>> _old_argv0 = sys.argv[0]
+    >>> sys.argv[0] = './patman'
+
+    >>> alias = {}
+    >>> alias['fred'] = ['f.bloggs@napier.co.nz']
+    >>> alias['john'] = ['j.bloggs@napier.co.nz']
+    >>> alias['mary'] = ['m.poppins@cloud.net']
+    >>> alias['boys'] = ['fred', ' john']
+    >>> alias['all'] = ['fred ', 'john', '   mary   ']
+    >>> alias[os.getenv('USER')] = ['this-is-me@me.com']
+    >>> series = series.Series()
+    >>> series.to = ['fred']
+    >>> series.cc = ['mary']
+    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+            False, alias)
+    'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
+"m.poppins@cloud.net" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
+    >>> EmailPatches(series, None, ['p1'], True, True, 'cc-fname', False, \
+            alias)
+    'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
+"m.poppins@cloud.net" --cc-cmd "./patman --cc-cmd cc-fname" p1'
+    >>> series.cc = ['all']
+    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+            True, alias)
+    'git send-email --annotate --to "this-is-me@me.com" --cc-cmd "./patman \
+--cc-cmd cc-fname" cover p1 p2'
+    >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+            False, alias)
+    'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
+"f.bloggs@napier.co.nz" --cc "j.bloggs@napier.co.nz" --cc \
+"m.poppins@cloud.net" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
+
+    # Restore argv[0] since we clobbered it.
+    >>> sys.argv[0] = _old_argv0
+    """
+    to = BuildEmailList(series.get('to'), '--to', alias, raise_on_error)
+    if not to:
+        git_config_to = command.Output('git', 'config', 'sendemail.to',
+                                       raise_on_error=False)
+        if not git_config_to:
+            print ("No recipient.\n"
+                   "Please add something like this to a commit\n"
+                   "Series-to: Fred Bloggs <f.blogs@napier.co.nz>\n"
+                   "Or do something like this\n"
+                   "git config sendemail.to u-boot@lists.denx.de")
+            return
+    cc = BuildEmailList(list(set(series.get('cc')) - set(series.get('to'))),
+                        '--cc', alias, raise_on_error)
+    if self_only:
+        to = BuildEmailList([os.getenv('USER')], '--to', alias, raise_on_error)
+        cc = []
+    cmd = ['git', 'send-email', '--annotate']
+    if smtp_server:
+        cmd.append('--smtp-server=%s' % smtp_server)
+    if in_reply_to:
+        if type(in_reply_to) != str:
+            in_reply_to = in_reply_to.encode('utf-8')
+        cmd.append('--in-reply-to="%s"' % in_reply_to)
+    if thread:
+        cmd.append('--thread')
+
+    cmd += to
+    cmd += cc
+    cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
+    if cover_fname:
+        cmd.append(cover_fname)
+    cmd += args
+    cmdstr = ' '.join(cmd)
+    if not dry_run:
+        os.system(cmdstr)
+    return cmdstr
+
+
+def LookupEmail(lookup_name, alias=None, raise_on_error=True, level=0):
+    """If an email address is an alias, look it up and return the full name
+
+    TODO: Why not just use git's own alias feature?
+
+    Args:
+        lookup_name: Alias or email address to look up
+        alias: Dictionary containing aliases (None to use settings default)
+        raise_on_error: True to raise an error when an alias fails to match,
+                False to just print a message.
+
+    Returns:
+        tuple:
+            list containing a list of email addresses
+
+    Raises:
+        OSError if a recursive alias reference was found
+        ValueError if an alias was not found
+
+    >>> alias = {}
+    >>> alias['fred'] = ['f.bloggs@napier.co.nz']
+    >>> alias['john'] = ['j.bloggs@napier.co.nz']
+    >>> alias['mary'] = ['m.poppins@cloud.net']
+    >>> alias['boys'] = ['fred', ' john', 'f.bloggs@napier.co.nz']
+    >>> alias['all'] = ['fred ', 'john', '   mary   ']
+    >>> alias['loop'] = ['other', 'john', '   mary   ']
+    >>> alias['other'] = ['loop', 'john', '   mary   ']
+    >>> LookupEmail('mary', alias)
+    ['m.poppins@cloud.net']
+    >>> LookupEmail('arthur.wellesley@howe.ro.uk', alias)
+    ['arthur.wellesley@howe.ro.uk']
+    >>> LookupEmail('boys', alias)
+    ['f.bloggs@napier.co.nz', 'j.bloggs@napier.co.nz']
+    >>> LookupEmail('all', alias)
+    ['f.bloggs@napier.co.nz', 'j.bloggs@napier.co.nz', 'm.poppins@cloud.net']
+    >>> LookupEmail('odd', alias)
+    Traceback (most recent call last):
+    ...
+    ValueError: Alias 'odd' not found
+    >>> LookupEmail('loop', alias)
+    Traceback (most recent call last):
+    ...
+    OSError: Recursive email alias at 'other'
+    >>> LookupEmail('odd', alias, raise_on_error=False)
+    Alias 'odd' not found
+    []
+    >>> # In this case the loop part will effectively be ignored.
+    >>> LookupEmail('loop', alias, raise_on_error=False)
+    Recursive email alias at 'other'
+    Recursive email alias at 'john'
+    Recursive email alias at 'mary'
+    ['j.bloggs@napier.co.nz', 'm.poppins@cloud.net']
+    """
+    if not alias:
+        alias = settings.alias
+    lookup_name = lookup_name.strip()
+    if '@' in lookup_name: # Perhaps a real email address
+        return [lookup_name]
+
+    lookup_name = lookup_name.lower()
+    col = terminal.Color()
+
+    out_list = []
+    if level > 10:
+        msg = "Recursive email alias at '%s'" % lookup_name
+        if raise_on_error:
+            raise OSError(msg)
+        else:
+            print(col.Color(col.RED, msg))
+            return out_list
+
+    if lookup_name:
+        if not lookup_name in alias:
+            msg = "Alias '%s' not found" % lookup_name
+            if raise_on_error:
+                raise ValueError(msg)
+            else:
+                print(col.Color(col.RED, msg))
+                return out_list
+        for item in alias[lookup_name]:
+            todo = LookupEmail(item, alias, raise_on_error, level + 1)
+            for new_item in todo:
+                if not new_item in out_list:
+                    out_list.append(new_item)
+
+    #print("No match for alias '%s'" % lookup_name)
+    return out_list
+
+def GetTopLevel():
+    """Return name of top-level directory for this git repo.
+
+    Returns:
+        Full path to git top-level directory
+
+    This test makes sure that we are running tests in the right subdir
+
+    >>> os.path.realpath(os.path.dirname(__file__)) == \
+            os.path.join(GetTopLevel(), 'tools', 'patman')
+    True
+    """
+    return command.OutputOneLine('git', 'rev-parse', '--show-toplevel')
+
+def GetAliasFile():
+    """Gets the name of the git alias file.
+
+    Returns:
+        Filename of git alias file, or None if none
+    """
+    fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile',
+            raise_on_error=False)
+    if fname:
+        fname = os.path.join(GetTopLevel(), fname.strip())
+    return fname
+
+def GetDefaultUserName():
+    """Gets the user.name from .gitconfig file.
+
+    Returns:
+        User name found in .gitconfig file, or None if none
+    """
+    uname = command.OutputOneLine('git', 'config', '--global', 'user.name')
+    return uname
+
+def GetDefaultUserEmail():
+    """Gets the user.email from the global .gitconfig file.
+
+    Returns:
+        User's email found in .gitconfig file, or None if none
+    """
+    uemail = command.OutputOneLine('git', 'config', '--global', 'user.email')
+    return uemail
+
+def GetDefaultSubjectPrefix():
+    """Gets the format.subjectprefix from local .git/config file.
+
+    Returns:
+        Subject prefix found in local .git/config file, or None if none
+    """
+    sub_prefix = command.OutputOneLine('git', 'config', 'format.subjectprefix',
+                 raise_on_error=False)
+
+    return sub_prefix
+
+def Setup():
+    """Set up git utils, by reading the alias files."""
+    # Check for a git alias file also
+    global use_no_decorate
+
+    alias_fname = GetAliasFile()
+    if alias_fname:
+        settings.ReadGitAliases(alias_fname)
+    cmd = LogCmd(None, count=0)
+    use_no_decorate = (command.RunPipe([cmd], raise_on_error=False)
+                       .return_code == 0)
+
+def GetHead():
+    """Get the hash of the current HEAD
+
+    Returns:
+        Hash of HEAD
+    """
+    return command.OutputOneLine('git', 'show', '-s', '--pretty=format:%H')
+
+if __name__ == "__main__":
+    import doctest
+
+    doctest.testmod()
diff --git a/tools/u-boot-tools/patman/patchstream.py b/tools/u-boot-tools/patman/patchstream.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6455b0fa3839e3c26ea633d9f268aeeaf78f201
--- /dev/null
+++ b/tools/u-boot-tools/patman/patchstream.py
@@ -0,0 +1,526 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import math
+import os
+import re
+import shutil
+import tempfile
+
+import command
+import commit
+import gitutil
+from series import Series
+
+# Tags that we detect and remove
+re_remove = re.compile('^BUG=|^TEST=|^BRANCH=|^Change-Id:|^Review URL:'
+    '|Reviewed-on:|Commit-\w*:')
+
+# Lines which are allowed after a TEST= line
+re_allowed_after_test = re.compile('^Signed-off-by:')
+
+# Signoffs
+re_signoff = re.compile('^Signed-off-by: *(.*)')
+
+# The start of the cover letter
+re_cover = re.compile('^Cover-letter:')
+
+# A cover letter Cc
+re_cover_cc = re.compile('^Cover-letter-cc: *(.*)')
+
+# Patch series tag
+re_series_tag = re.compile('^Series-([a-z-]*): *(.*)')
+
+# Commit series tag
+re_commit_tag = re.compile('^Commit-([a-z-]*): *(.*)')
+
+# Commit tags that we want to collect and keep
+re_tag = re.compile('^(Tested-by|Acked-by|Reviewed-by|Patch-cc): (.*)')
+
+# The start of a new commit in the git log
+re_commit = re.compile('^commit ([0-9a-f]*)$')
+
+# We detect these since checkpatch doesn't always do it
+re_space_before_tab = re.compile('^[+].* \t')
+
+# States we can be in - can we use range() and still have comments?
+STATE_MSG_HEADER = 0        # Still in the message header
+STATE_PATCH_SUBJECT = 1     # In patch subject (first line of log for a commit)
+STATE_PATCH_HEADER = 2      # In patch header (after the subject)
+STATE_DIFFS = 3             # In the diff part (past --- line)
+
+class PatchStream:
+    """Class for detecting/injecting tags in a patch or series of patches
+
+    We support processing the output of 'git log' to read out the tags we
+    are interested in. We can also process a patch file in order to remove
+    unwanted tags or inject additional ones. These correspond to the two
+    phases of processing.
+    """
+    def __init__(self, series, name=None, is_log=False):
+        self.skip_blank = False          # True to skip a single blank line
+        self.found_test = False          # Found a TEST= line
+        self.lines_after_test = 0        # MNumber of lines found after TEST=
+        self.warn = []                   # List of warnings we have collected
+        self.linenum = 1                 # Output line number we are up to
+        self.in_section = None           # Name of start...END section we are in
+        self.notes = []                  # Series notes
+        self.section = []                # The current section...END section
+        self.series = series             # Info about the patch series
+        self.is_log = is_log             # True if indent like git log
+        self.in_change = 0               # Non-zero if we are in a change list
+        self.blank_count = 0             # Number of blank lines stored up
+        self.state = STATE_MSG_HEADER    # What state are we in?
+        self.signoff = []                # Contents of signoff line
+        self.commit = None               # Current commit
+
+    def AddToSeries(self, line, name, value):
+        """Add a new Series-xxx tag.
+
+        When a Series-xxx tag is detected, we come here to record it, if we
+        are scanning a 'git log'.
+
+        Args:
+            line: Source line containing tag (useful for debug/error messages)
+            name: Tag name (part after 'Series-')
+            value: Tag value (part after 'Series-xxx: ')
+        """
+        if name == 'notes':
+            self.in_section = name
+            self.skip_blank = False
+        if self.is_log:
+            self.series.AddTag(self.commit, line, name, value)
+
+    def AddToCommit(self, line, name, value):
+        """Add a new Commit-xxx tag.
+
+        When a Commit-xxx tag is detected, we come here to record it.
+
+        Args:
+            line: Source line containing tag (useful for debug/error messages)
+            name: Tag name (part after 'Commit-')
+            value: Tag value (part after 'Commit-xxx: ')
+        """
+        if name == 'notes':
+            self.in_section = 'commit-' + name
+            self.skip_blank = False
+
+    def CloseCommit(self):
+        """Save the current commit into our commit list, and reset our state"""
+        if self.commit and self.is_log:
+            self.series.AddCommit(self.commit)
+            self.commit = None
+        # If 'END' is missing in a 'Cover-letter' section, and that section
+        # happens to show up at the very end of the commit message, this is
+        # the chance for us to fix it up.
+        if self.in_section == 'cover' and self.is_log:
+            self.series.cover = self.section
+            self.in_section = None
+            self.skip_blank = True
+            self.section = []
+
+    def ProcessLine(self, line):
+        """Process a single line of a patch file or commit log
+
+        This process a line and returns a list of lines to output. The list
+        may be empty or may contain multiple output lines.
+
+        This is where all the complicated logic is located. The class's
+        state is used to move between different states and detect things
+        properly.
+
+        We can be in one of two modes:
+            self.is_log == True: This is 'git log' mode, where most output is
+                indented by 4 characters and we are scanning for tags
+
+            self.is_log == False: This is 'patch' mode, where we already have
+                all the tags, and are processing patches to remove junk we
+                don't want, and add things we think are required.
+
+        Args:
+            line: text line to process
+
+        Returns:
+            list of output lines, or [] if nothing should be output
+        """
+        # Initially we have no output. Prepare the input line string
+        out = []
+        line = line.rstrip('\n')
+
+        commit_match = re_commit.match(line) if self.is_log else None
+
+        if self.is_log:
+            if line[:4] == '    ':
+                line = line[4:]
+
+        # Handle state transition and skipping blank lines
+        series_tag_match = re_series_tag.match(line)
+        commit_tag_match = re_commit_tag.match(line)
+        cover_match = re_cover.match(line)
+        cover_cc_match = re_cover_cc.match(line)
+        signoff_match = re_signoff.match(line)
+        tag_match = None
+        if self.state == STATE_PATCH_HEADER:
+            tag_match = re_tag.match(line)
+        is_blank = not line.strip()
+        if is_blank:
+            if (self.state == STATE_MSG_HEADER
+                    or self.state == STATE_PATCH_SUBJECT):
+                self.state += 1
+
+            # We don't have a subject in the text stream of patch files
+            # It has its own line with a Subject: tag
+            if not self.is_log and self.state == STATE_PATCH_SUBJECT:
+                self.state += 1
+        elif commit_match:
+            self.state = STATE_MSG_HEADER
+
+        # If a tag is detected, or a new commit starts
+        if series_tag_match or commit_tag_match or \
+           cover_match or cover_cc_match or signoff_match or \
+           self.state == STATE_MSG_HEADER:
+            # but we are already in a section, this means 'END' is missing
+            # for that section, fix it up.
+            if self.in_section:
+                self.warn.append("Missing 'END' in section '%s'" % self.in_section)
+                if self.in_section == 'cover':
+                    self.series.cover = self.section
+                elif self.in_section == 'notes':
+                    if self.is_log:
+                        self.series.notes += self.section
+                elif self.in_section == 'commit-notes':
+                    if self.is_log:
+                        self.commit.notes += self.section
+                else:
+                    self.warn.append("Unknown section '%s'" % self.in_section)
+                self.in_section = None
+                self.skip_blank = True
+                self.section = []
+            # but we are already in a change list, that means a blank line
+            # is missing, fix it up.
+            if self.in_change:
+                self.warn.append("Missing 'blank line' in section 'Series-changes'")
+                self.in_change = 0
+
+        # If we are in a section, keep collecting lines until we see END
+        if self.in_section:
+            if line == 'END':
+                if self.in_section == 'cover':
+                    self.series.cover = self.section
+                elif self.in_section == 'notes':
+                    if self.is_log:
+                        self.series.notes += self.section
+                elif self.in_section == 'commit-notes':
+                    if self.is_log:
+                        self.commit.notes += self.section
+                else:
+                    self.warn.append("Unknown section '%s'" % self.in_section)
+                self.in_section = None
+                self.skip_blank = True
+                self.section = []
+            else:
+                self.section.append(line)
+
+        # Detect the commit subject
+        elif not is_blank and self.state == STATE_PATCH_SUBJECT:
+            self.commit.subject = line
+
+        # Detect the tags we want to remove, and skip blank lines
+        elif re_remove.match(line) and not commit_tag_match:
+            self.skip_blank = True
+
+            # TEST= should be the last thing in the commit, so remove
+            # everything after it
+            if line.startswith('TEST='):
+                self.found_test = True
+        elif self.skip_blank and is_blank:
+            self.skip_blank = False
+
+        # Detect the start of a cover letter section
+        elif cover_match:
+            self.in_section = 'cover'
+            self.skip_blank = False
+
+        elif cover_cc_match:
+            value = cover_cc_match.group(1)
+            self.AddToSeries(line, 'cover-cc', value)
+
+        # If we are in a change list, key collected lines until a blank one
+        elif self.in_change:
+            if is_blank:
+                # Blank line ends this change list
+                self.in_change = 0
+            elif line == '---':
+                self.in_change = 0
+                out = self.ProcessLine(line)
+            else:
+                if self.is_log:
+                    self.series.AddChange(self.in_change, self.commit, line)
+            self.skip_blank = False
+
+        # Detect Series-xxx tags
+        elif series_tag_match:
+            name = series_tag_match.group(1)
+            value = series_tag_match.group(2)
+            if name == 'changes':
+                # value is the version number: e.g. 1, or 2
+                try:
+                    value = int(value)
+                except ValueError as str:
+                    raise ValueError("%s: Cannot decode version info '%s'" %
+                        (self.commit.hash, line))
+                self.in_change = int(value)
+            else:
+                self.AddToSeries(line, name, value)
+                self.skip_blank = True
+
+        # Detect Commit-xxx tags
+        elif commit_tag_match:
+            name = commit_tag_match.group(1)
+            value = commit_tag_match.group(2)
+            if name == 'notes':
+                self.AddToCommit(line, name, value)
+                self.skip_blank = True
+
+        # Detect the start of a new commit
+        elif commit_match:
+            self.CloseCommit()
+            self.commit = commit.Commit(commit_match.group(1))
+
+        # Detect tags in the commit message
+        elif tag_match:
+            # Remove Tested-by self, since few will take much notice
+            if (tag_match.group(1) == 'Tested-by' and
+                    tag_match.group(2).find(os.getenv('USER') + '@') != -1):
+                self.warn.append("Ignoring %s" % line)
+            elif tag_match.group(1) == 'Patch-cc':
+                self.commit.AddCc(tag_match.group(2).split(','))
+            else:
+                out = [line]
+
+        # Suppress duplicate signoffs
+        elif signoff_match:
+            if (self.is_log or not self.commit or
+                self.commit.CheckDuplicateSignoff(signoff_match.group(1))):
+                out = [line]
+
+        # Well that means this is an ordinary line
+        else:
+            # Look for space before tab
+            m = re_space_before_tab.match(line)
+            if m:
+                self.warn.append('Line %d/%d has space before tab' %
+                    (self.linenum, m.start()))
+
+            # OK, we have a valid non-blank line
+            out = [line]
+            self.linenum += 1
+            self.skip_blank = False
+            if self.state == STATE_DIFFS:
+                pass
+
+            # If this is the start of the diffs section, emit our tags and
+            # change log
+            elif line == '---':
+                self.state = STATE_DIFFS
+
+                # Output the tags (signeoff first), then change list
+                out = []
+                log = self.series.MakeChangeLog(self.commit)
+                out += [line]
+                if self.commit:
+                    out += self.commit.notes
+                out += [''] + log
+            elif self.found_test:
+                if not re_allowed_after_test.match(line):
+                    self.lines_after_test += 1
+
+        return out
+
+    def Finalize(self):
+        """Close out processing of this patch stream"""
+        self.CloseCommit()
+        if self.lines_after_test:
+            self.warn.append('Found %d lines after TEST=' %
+                    self.lines_after_test)
+
+    def ProcessStream(self, infd, outfd):
+        """Copy a stream from infd to outfd, filtering out unwanting things.
+
+        This is used to process patch files one at a time.
+
+        Args:
+            infd: Input stream file object
+            outfd: Output stream file object
+        """
+        # Extract the filename from each diff, for nice warnings
+        fname = None
+        last_fname = None
+        re_fname = re.compile('diff --git a/(.*) b/.*')
+        while True:
+            line = infd.readline()
+            if not line:
+                break
+            out = self.ProcessLine(line)
+
+            # Try to detect blank lines at EOF
+            for line in out:
+                match = re_fname.match(line)
+                if match:
+                    last_fname = fname
+                    fname = match.group(1)
+                if line == '+':
+                    self.blank_count += 1
+                else:
+                    if self.blank_count and (line == '-- ' or match):
+                        self.warn.append("Found possible blank line(s) at "
+                                "end of file '%s'" % last_fname)
+                    outfd.write('+\n' * self.blank_count)
+                    outfd.write(line + '\n')
+                    self.blank_count = 0
+        self.Finalize()
+
+
+def GetMetaDataForList(commit_range, git_dir=None, count=None,
+                       series = None, allow_overwrite=False):
+    """Reads out patch series metadata from the commits
+
+    This does a 'git log' on the relevant commits and pulls out the tags we
+    are interested in.
+
+    Args:
+        commit_range: Range of commits to count (e.g. 'HEAD..base')
+        git_dir: Path to git repositiory (None to use default)
+        count: Number of commits to list, or None for no limit
+        series: Series object to add information into. By default a new series
+            is started.
+        allow_overwrite: Allow tags to overwrite an existing tag
+    Returns:
+        A Series object containing information about the commits.
+    """
+    if not series:
+        series = Series()
+    series.allow_overwrite = allow_overwrite
+    params = gitutil.LogCmd(commit_range, reverse=True, count=count,
+                            git_dir=git_dir)
+    stdout = command.RunPipe([params], capture=True).stdout
+    ps = PatchStream(series, is_log=True)
+    for line in stdout.splitlines():
+        ps.ProcessLine(line)
+    ps.Finalize()
+    return series
+
+def GetMetaData(start, count):
+    """Reads out patch series metadata from the commits
+
+    This does a 'git log' on the relevant commits and pulls out the tags we
+    are interested in.
+
+    Args:
+        start: Commit to start from: 0=HEAD, 1=next one, etc.
+        count: Number of commits to list
+    """
+    return GetMetaDataForList('HEAD~%d' % start, None, count)
+
+def GetMetaDataForTest(text):
+    """Process metadata from a file containing a git log. Used for tests
+
+    Args:
+        text:
+    """
+    series = Series()
+    ps = PatchStream(series, is_log=True)
+    for line in text.splitlines():
+        ps.ProcessLine(line)
+    ps.Finalize()
+    return series
+
+def FixPatch(backup_dir, fname, series, commit):
+    """Fix up a patch file, by adding/removing as required.
+
+    We remove our tags from the patch file, insert changes lists, etc.
+    The patch file is processed in place, and overwritten.
+
+    A backup file is put into backup_dir (if not None).
+
+    Args:
+        fname: Filename to patch file to process
+        series: Series information about this patch set
+        commit: Commit object for this patch file
+    Return:
+        A list of errors, or [] if all ok.
+    """
+    handle, tmpname = tempfile.mkstemp()
+    outfd = os.fdopen(handle, 'w')
+    infd = open(fname, 'r')
+    ps = PatchStream(series)
+    ps.commit = commit
+    ps.ProcessStream(infd, outfd)
+    infd.close()
+    outfd.close()
+
+    # Create a backup file if required
+    if backup_dir:
+        shutil.copy(fname, os.path.join(backup_dir, os.path.basename(fname)))
+    shutil.move(tmpname, fname)
+    return ps.warn
+
+def FixPatches(series, fnames):
+    """Fix up a list of patches identified by filenames
+
+    The patch files are processed in place, and overwritten.
+
+    Args:
+        series: The series object
+        fnames: List of patch files to process
+    """
+    # Current workflow creates patches, so we shouldn't need a backup
+    backup_dir = None  #tempfile.mkdtemp('clean-patch')
+    count = 0
+    for fname in fnames:
+        commit = series.commits[count]
+        commit.patch = fname
+        result = FixPatch(backup_dir, fname, series, commit)
+        if result:
+            print('%d warnings for %s:' % (len(result), fname))
+            for warn in result:
+                print('\t', warn)
+            print
+        count += 1
+    print('Cleaned %d patches' % count)
+
+def InsertCoverLetter(fname, series, count):
+    """Inserts a cover letter with the required info into patch 0
+
+    Args:
+        fname: Input / output filename of the cover letter file
+        series: Series object
+        count: Number of patches in the series
+    """
+    fd = open(fname, 'r')
+    lines = fd.readlines()
+    fd.close()
+
+    fd = open(fname, 'w')
+    text = series.cover
+    prefix = series.GetPatchPrefix()
+    for line in lines:
+        if line.startswith('Subject:'):
+            # if more than 10 or 100 patches, it should say 00/xx, 000/xxx, etc
+            zero_repeat = int(math.log10(count)) + 1
+            zero = '0' * zero_repeat
+            line = 'Subject: [%s %s/%d] %s\n' % (prefix, zero, count, text[0])
+
+        # Insert our cover letter
+        elif line.startswith('*** BLURB HERE ***'):
+            # First the blurb test
+            line = '\n'.join(text[1:]) + '\n'
+            if series.get('notes'):
+                line += '\n'.join(series.notes) + '\n'
+
+            # Now the change list
+            out = series.MakeChangeLog(None)
+            line += '\n' + '\n'.join(out)
+        fd.write(line)
+    fd.close()
diff --git a/tools/u-boot-tools/patman/patman b/tools/u-boot-tools/patman/patman
new file mode 120000
index 0000000000000000000000000000000000000000..6cc3d7a56a54b88c36534e232952b58d2c053e34
--- /dev/null
+++ b/tools/u-boot-tools/patman/patman
@@ -0,0 +1 @@
+patman.py
\ No newline at end of file
diff --git a/tools/u-boot-tools/patman/patman.py b/tools/u-boot-tools/patman/patman.py
new file mode 100755
index 0000000000000000000000000000000000000000..27a2febf70460374ac79ed4d7977b83e7c43f77d
--- /dev/null
+++ b/tools/u-boot-tools/patman/patman.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python2
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+"""See README for more information"""
+
+from optparse import OptionParser
+import os
+import re
+import sys
+import unittest
+
+# Our modules
+try:
+    from patman import checkpatch, command, gitutil, patchstream, \
+        project, settings, terminal, test
+except ImportError:
+    import checkpatch
+    import command
+    import gitutil
+    import patchstream
+    import project
+    import settings
+    import terminal
+    import test
+
+
+parser = OptionParser()
+parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
+       default=False, help='Display the README file')
+parser.add_option('-c', '--count', dest='count', type='int',
+       default=-1, help='Automatically create patches from top n commits')
+parser.add_option('-i', '--ignore-errors', action='store_true',
+       dest='ignore_errors', default=False,
+       help='Send patches email even if patch errors are found')
+parser.add_option('-m', '--no-maintainers', action='store_false',
+       dest='add_maintainers', default=True,
+       help="Don't cc the file maintainers automatically")
+parser.add_option('-l', '--limit-cc', dest='limit', type='int',
+       default=None, help='Limit the cc list to LIMIT entries [default: %default]')
+parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
+       default=False, help="Do a dry run (create but don't email patches)")
+parser.add_option('-p', '--project', default=project.DetectProject(),
+                  help="Project name; affects default option values and "
+                  "aliases [default: %default]")
+parser.add_option('-r', '--in-reply-to', type='string', action='store',
+                  help="Message ID that this series is in reply to")
+parser.add_option('-s', '--start', dest='start', type='int',
+       default=0, help='Commit to start creating patches from (0 = HEAD)')
+parser.add_option('-t', '--ignore-bad-tags', action='store_true',
+                  default=False, help='Ignore bad tags / aliases')
+parser.add_option('--test', action='store_true', dest='test',
+                  default=False, help='run tests')
+parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+       default=False, help='Verbose output of errors and warnings')
+parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store',
+       default=None, help='Output cc list for patch file (used by git)')
+parser.add_option('--no-check', action='store_false', dest='check_patch',
+                  default=True,
+                  help="Don't check for patch compliance")
+parser.add_option('--no-tags', action='store_false', dest='process_tags',
+                  default=True, help="Don't process subject tags as aliaes")
+parser.add_option('--smtp-server', type='str',
+                  help="Specify the SMTP server to 'git send-email'")
+parser.add_option('-T', '--thread', action='store_true', dest='thread',
+                  default=False, help='Create patches as a single thread')
+
+parser.usage += """
+
+Create patches from commits in a branch, check them and email them as
+specified by tags you place in the commits. Use -n to do a dry run first."""
+
+
+# Parse options twice: first to get the project and second to handle
+# defaults properly (which depends on project).
+(options, args) = parser.parse_args()
+settings.Setup(parser, options.project, '')
+(options, args) = parser.parse_args()
+
+if __name__ != "__main__":
+    pass
+
+# Run our meagre tests
+elif options.test:
+    import doctest
+    import func_test
+
+    sys.argv = [sys.argv[0]]
+    result = unittest.TestResult()
+    for module in (test.TestPatch, func_test.TestFunctional):
+        suite = unittest.TestLoader().loadTestsFromTestCase(module)
+        suite.run(result)
+
+    for module in ['gitutil', 'settings']:
+        suite = doctest.DocTestSuite(module)
+        suite.run(result)
+
+    # TODO: Surely we can just 'print' result?
+    print(result)
+    for test, err in result.errors:
+        print(err)
+    for test, err in result.failures:
+        print(err)
+
+# Called from git with a patch filename as argument
+# Printout a list of additional CC recipients for this patch
+elif options.cc_cmd:
+    fd = open(options.cc_cmd, 'r')
+    re_line = re.compile('(\S*) (.*)')
+    for line in fd.readlines():
+        match = re_line.match(line)
+        if match and match.group(1) == args[0]:
+            for cc in match.group(2).split(', '):
+                cc = cc.strip()
+                if cc:
+                    print(cc)
+    fd.close()
+
+elif options.full_help:
+    pager = os.getenv('PAGER')
+    if not pager:
+        pager = 'more'
+    fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
+                         'README')
+    command.Run(pager, fname)
+
+# Process commits, produce patches files, check them, email them
+else:
+    gitutil.Setup()
+
+    if options.count == -1:
+        # Work out how many patches to send if we can
+        options.count = gitutil.CountCommitsToBranch() - options.start
+
+    col = terminal.Color()
+    if not options.count:
+        str = 'No commits found to process - please use -c flag'
+        sys.exit(col.Color(col.RED, str))
+
+    # Read the metadata from the commits
+    if options.count:
+        series = patchstream.GetMetaData(options.start, options.count)
+        cover_fname, args = gitutil.CreatePatches(options.start, options.count,
+                series)
+
+    # Fix up the patch files to our liking, and insert the cover letter
+    patchstream.FixPatches(series, args)
+    if cover_fname and series.get('cover'):
+        patchstream.InsertCoverLetter(cover_fname, series, options.count)
+
+    # Do a few checks on the series
+    series.DoChecks()
+
+    # Check the patches, and run them through 'git am' just to be sure
+    if options.check_patch:
+        ok = checkpatch.CheckPatches(options.verbose, args)
+    else:
+        ok = True
+
+    cc_file = series.MakeCcFile(options.process_tags, cover_fname,
+                                not options.ignore_bad_tags,
+                                options.add_maintainers, options.limit)
+
+    # Email the patches out (giving the user time to check / cancel)
+    cmd = ''
+    its_a_go = ok or options.ignore_errors
+    if its_a_go:
+        cmd = gitutil.EmailPatches(series, cover_fname, args,
+                options.dry_run, not options.ignore_bad_tags, cc_file,
+                in_reply_to=options.in_reply_to, thread=options.thread,
+                smtp_server=options.smtp_server)
+    else:
+        print(col.Color(col.RED, "Not sending emails due to errors/warnings"))
+
+    # For a dry run, just show our actions as a sanity check
+    if options.dry_run:
+        series.ShowActions(args, cmd, options.process_tags)
+        if not its_a_go:
+            print(col.Color(col.RED, "Email would not be sent"))
+
+    os.remove(cc_file)
diff --git a/tools/u-boot-tools/patman/project.py b/tools/u-boot-tools/patman/project.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d9cfc06252b4c83ca6c08d03e4d523485567abc
--- /dev/null
+++ b/tools/u-boot-tools/patman/project.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+import os.path
+
+import gitutil
+
+def DetectProject():
+    """Autodetect the name of the current project.
+
+    This looks for signature files/directories that are unlikely to exist except
+    in the given project.
+
+    Returns:
+        The name of the project, like "linux" or "u-boot".  Returns "unknown"
+        if we can't detect the project.
+    """
+    top_level = gitutil.GetTopLevel()
+
+    if os.path.exists(os.path.join(top_level, "include", "u-boot")):
+        return "u-boot"
+    elif os.path.exists(os.path.join(top_level, "kernel")):
+        return "linux"
+
+    return "unknown"
diff --git a/tools/u-boot-tools/patman/series.py b/tools/u-boot-tools/patman/series.py
new file mode 100644
index 0000000000000000000000000000000000000000..2735afaf88feab0eb3e4546dac7912f088fe37bc
--- /dev/null
+++ b/tools/u-boot-tools/patman/series.py
@@ -0,0 +1,292 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+from __future__ import print_function
+
+import itertools
+import os
+
+import get_maintainer
+import gitutil
+import settings
+import terminal
+
+# Series-xxx tags that we understand
+valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name',
+                'cover_cc', 'process_log']
+
+class Series(dict):
+    """Holds information about a patch series, including all tags.
+
+    Vars:
+        cc: List of aliases/emails to Cc all patches to
+        commits: List of Commit objects, one for each patch
+        cover: List of lines in the cover letter
+        notes: List of lines in the notes
+        changes: (dict) List of changes for each version, The key is
+            the integer version number
+        allow_overwrite: Allow tags to overwrite an existing tag
+    """
+    def __init__(self):
+        self.cc = []
+        self.to = []
+        self.cover_cc = []
+        self.commits = []
+        self.cover = None
+        self.notes = []
+        self.changes = {}
+        self.allow_overwrite = False
+
+        # Written in MakeCcFile()
+        #  key: name of patch file
+        #  value: list of email addresses
+        self._generated_cc = {}
+
+    # These make us more like a dictionary
+    def __setattr__(self, name, value):
+        self[name] = value
+
+    def __getattr__(self, name):
+        return self[name]
+
+    def AddTag(self, commit, line, name, value):
+        """Add a new Series-xxx tag along with its value.
+
+        Args:
+            line: Source line containing tag (useful for debug/error messages)
+            name: Tag name (part after 'Series-')
+            value: Tag value (part after 'Series-xxx: ')
+        """
+        # If we already have it, then add to our list
+        name = name.replace('-', '_')
+        if name in self and not self.allow_overwrite:
+            values = value.split(',')
+            values = [str.strip() for str in values]
+            if type(self[name]) != type([]):
+                raise ValueError("In %s: line '%s': Cannot add another value "
+                        "'%s' to series '%s'" %
+                            (commit.hash, line, values, self[name]))
+            self[name] += values
+
+        # Otherwise just set the value
+        elif name in valid_series:
+            if name=="notes":
+                self[name] = [value]
+            else:
+                self[name] = value
+        else:
+            raise ValueError("In %s: line '%s': Unknown 'Series-%s': valid "
+                        "options are %s" % (commit.hash, line, name,
+                            ', '.join(valid_series)))
+
+    def AddCommit(self, commit):
+        """Add a commit into our list of commits
+
+        We create a list of tags in the commit subject also.
+
+        Args:
+            commit: Commit object to add
+        """
+        commit.CheckTags()
+        self.commits.append(commit)
+
+    def ShowActions(self, args, cmd, process_tags):
+        """Show what actions we will/would perform
+
+        Args:
+            args: List of patch files we created
+            cmd: The git command we would have run
+            process_tags: Process tags as if they were aliases
+        """
+        to_set = set(gitutil.BuildEmailList(self.to));
+        cc_set = set(gitutil.BuildEmailList(self.cc));
+
+        col = terminal.Color()
+        print('Dry run, so not doing much. But I would do this:')
+        print()
+        print('Send a total of %d patch%s with %scover letter.' % (
+                len(args), '' if len(args) == 1 else 'es',
+                self.get('cover') and 'a ' or 'no '))
+
+        # TODO: Colour the patches according to whether they passed checks
+        for upto in range(len(args)):
+            commit = self.commits[upto]
+            print(col.Color(col.GREEN, '   %s' % args[upto]))
+            cc_list = list(self._generated_cc[commit.patch])
+            for email in set(cc_list) - to_set - cc_set:
+                if email == None:
+                    email = col.Color(col.YELLOW, "<alias '%s' not found>"
+                            % tag)
+                if email:
+                    print('      Cc: ', email)
+        print
+        for item in to_set:
+            print('To:\t ', item)
+        for item in cc_set - to_set:
+            print('Cc:\t ', item)
+        print('Version: ', self.get('version'))
+        print('Prefix:\t ', self.get('prefix'))
+        if self.cover:
+            print('Cover: %d lines' % len(self.cover))
+            cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
+            all_ccs = itertools.chain(cover_cc, *self._generated_cc.values())
+            for email in set(all_ccs) - to_set - cc_set:
+                    print('      Cc: ', email)
+        if cmd:
+            print('Git command: %s' % cmd)
+
+    def MakeChangeLog(self, commit):
+        """Create a list of changes for each version.
+
+        Return:
+            The change log as a list of strings, one per line
+
+            Changes in v4:
+            - Jog the dial back closer to the widget
+
+            Changes in v3: None
+            Changes in v2:
+            - Fix the widget
+            - Jog the dial
+
+            etc.
+        """
+        final = []
+        process_it = self.get('process_log', '').split(',')
+        process_it = [item.strip() for item in process_it]
+        need_blank = False
+        for change in sorted(self.changes, reverse=True):
+            out = []
+            for this_commit, text in self.changes[change]:
+                if commit and this_commit != commit:
+                    continue
+                if 'uniq' not in process_it or text not in out:
+                    out.append(text)
+            line = 'Changes in v%d:' % change
+            have_changes = len(out) > 0
+            if 'sort' in process_it:
+                out = sorted(out)
+            if have_changes:
+                out.insert(0, line)
+            else:
+                out = [line + ' None']
+            if need_blank:
+                out.insert(0, '')
+            final += out
+            need_blank = have_changes
+        if self.changes:
+            final.append('')
+        return final
+
+    def DoChecks(self):
+        """Check that each version has a change log
+
+        Print an error if something is wrong.
+        """
+        col = terminal.Color()
+        if self.get('version'):
+            changes_copy = dict(self.changes)
+            for version in range(1, int(self.version) + 1):
+                if self.changes.get(version):
+                    del changes_copy[version]
+                else:
+                    if version > 1:
+                        str = 'Change log missing for v%d' % version
+                        print(col.Color(col.RED, str))
+            for version in changes_copy:
+                str = 'Change log for unknown version v%d' % version
+                print(col.Color(col.RED, str))
+        elif self.changes:
+            str = 'Change log exists, but no version is set'
+            print(col.Color(col.RED, str))
+
+    def MakeCcFile(self, process_tags, cover_fname, raise_on_error,
+                   add_maintainers, limit):
+        """Make a cc file for us to use for per-commit Cc automation
+
+        Also stores in self._generated_cc to make ShowActions() faster.
+
+        Args:
+            process_tags: Process tags as if they were aliases
+            cover_fname: If non-None the name of the cover letter.
+            raise_on_error: True to raise an error when an alias fails to match,
+                False to just print a message.
+            add_maintainers: Either:
+                True/False to call the get_maintainers to CC maintainers
+                List of maintainers to include (for testing)
+            limit: Limit the length of the Cc list
+        Return:
+            Filename of temp file created
+        """
+        col = terminal.Color()
+        # Look for commit tags (of the form 'xxx:' at the start of the subject)
+        fname = '/tmp/patman.%d' % os.getpid()
+        fd = open(fname, 'w')
+        all_ccs = []
+        for commit in self.commits:
+            cc = []
+            if process_tags:
+                cc += gitutil.BuildEmailList(commit.tags,
+                                               raise_on_error=raise_on_error)
+            cc += gitutil.BuildEmailList(commit.cc_list,
+                                           raise_on_error=raise_on_error)
+            if type(add_maintainers) == type(cc):
+                cc += add_maintainers
+            elif add_maintainers:
+                cc += get_maintainer.GetMaintainer(commit.patch)
+            for x in set(cc) & set(settings.bounces):
+                print(col.Color(col.YELLOW, 'Skipping "%s"' % x))
+            cc = set(cc) - set(settings.bounces)
+            cc = [m.encode('utf-8') if type(m) != str else m for m in cc]
+            if limit is not None:
+                cc = cc[:limit]
+            all_ccs += cc
+            print(commit.patch, ', '.join(set(cc)), file=fd)
+            self._generated_cc[commit.patch] = cc
+
+        if cover_fname:
+            cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
+            cover_cc = [m.encode('utf-8') if type(m) != str else m
+                        for m in cover_cc]
+            cc_list = ', '.join([x.decode('utf-8')
+                                 for x in set(cover_cc + all_ccs)])
+            print(cover_fname, cc_list.encode('utf-8'), file=fd)
+
+        fd.close()
+        return fname
+
+    def AddChange(self, version, commit, info):
+        """Add a new change line to a version.
+
+        This will later appear in the change log.
+
+        Args:
+            version: version number to add change list to
+            info: change line for this version
+        """
+        if not self.changes.get(version):
+            self.changes[version] = []
+        self.changes[version].append([commit, info])
+
+    def GetPatchPrefix(self):
+        """Get the patch version string
+
+        Return:
+            Patch string, like 'RFC PATCH v5' or just 'PATCH'
+        """
+        git_prefix = gitutil.GetDefaultSubjectPrefix()
+        if git_prefix:
+            git_prefix = '%s][' % git_prefix
+        else:
+            git_prefix = ''
+
+        version = ''
+        if self.get('version'):
+            version = ' v%s' % self['version']
+
+        # Get patch name prefix
+        prefix = ''
+        if self.get('prefix'):
+            prefix = '%s ' % self['prefix']
+        return '%s%sPATCH%s' % (git_prefix, prefix, version)
diff --git a/tools/u-boot-tools/patman/settings.py b/tools/u-boot-tools/patman/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea2bc74f759b2aa3a9d931a4444393a33546da5e
--- /dev/null
+++ b/tools/u-boot-tools/patman/settings.py
@@ -0,0 +1,356 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+from __future__ import print_function
+
+try:
+    import configparser as ConfigParser
+except:
+    import ConfigParser
+
+import os
+import re
+
+import command
+import gitutil
+
+"""Default settings per-project.
+
+These are used by _ProjectConfigParser.  Settings names should match
+the "dest" of the option parser from patman.py.
+"""
+_default_settings = {
+    "u-boot": {},
+    "linux": {
+        "process_tags": "False",
+    }
+}
+
+class _ProjectConfigParser(ConfigParser.SafeConfigParser):
+    """ConfigParser that handles projects.
+
+    There are two main goals of this class:
+    - Load project-specific default settings.
+    - Merge general default settings/aliases with project-specific ones.
+
+    # Sample config used for tests below...
+    >>> try:
+    ...     from StringIO import StringIO
+    ... except ImportError:
+    ...     from io import StringIO
+    >>> sample_config = '''
+    ... [alias]
+    ... me: Peter P. <likesspiders@example.com>
+    ... enemies: Evil <evil@example.com>
+    ...
+    ... [sm_alias]
+    ... enemies: Green G. <ugly@example.com>
+    ...
+    ... [sm2_alias]
+    ... enemies: Doc O. <pus@example.com>
+    ...
+    ... [settings]
+    ... am_hero: True
+    ... '''
+
+    # Check to make sure that bogus project gets general alias.
+    >>> config = _ProjectConfigParser("zzz")
+    >>> config.readfp(StringIO(sample_config))
+    >>> config.get("alias", "enemies")
+    u'Evil <evil@example.com>'
+
+    # Check to make sure that alias gets overridden by project.
+    >>> config = _ProjectConfigParser("sm")
+    >>> config.readfp(StringIO(sample_config))
+    >>> config.get("alias", "enemies")
+    u'Green G. <ugly@example.com>'
+
+    # Check to make sure that settings get merged with project.
+    >>> config = _ProjectConfigParser("linux")
+    >>> config.readfp(StringIO(sample_config))
+    >>> sorted(config.items("settings"))
+    [(u'am_hero', u'True'), (u'process_tags', u'False')]
+
+    # Check to make sure that settings works with unknown project.
+    >>> config = _ProjectConfigParser("unknown")
+    >>> config.readfp(StringIO(sample_config))
+    >>> sorted(config.items("settings"))
+    [(u'am_hero', u'True')]
+    """
+    def __init__(self, project_name):
+        """Construct _ProjectConfigParser.
+
+        In addition to standard SafeConfigParser initialization, this also loads
+        project defaults.
+
+        Args:
+            project_name: The name of the project.
+        """
+        self._project_name = project_name
+        ConfigParser.SafeConfigParser.__init__(self)
+
+        # Update the project settings in the config based on
+        # the _default_settings global.
+        project_settings = "%s_settings" % project_name
+        if not self.has_section(project_settings):
+            self.add_section(project_settings)
+        project_defaults = _default_settings.get(project_name, {})
+        for setting_name, setting_value in project_defaults.items():
+            self.set(project_settings, setting_name, setting_value)
+
+    def _to_unicode(self, val):
+        """Make sure a value is of type 'unicode'
+
+        Args:
+            val: string or unicode object
+
+        Returns:
+            unicode version of val
+        """
+        return val if isinstance(val, unicode) else val.decode('utf-8')
+
+    def get(self, section, option, *args, **kwargs):
+        """Extend SafeConfigParser to try project_section before section.
+
+        Args:
+            See SafeConfigParser.
+        Returns:
+            See SafeConfigParser.
+        """
+        try:
+            val = ConfigParser.SafeConfigParser.get(
+                self, "%s_%s" % (self._project_name, section), option,
+                *args, **kwargs
+            )
+        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+            val = ConfigParser.SafeConfigParser.get(
+                self, section, option, *args, **kwargs
+            )
+        return self._to_unicode(val)
+
+    def items(self, section, *args, **kwargs):
+        """Extend SafeConfigParser to add project_section to section.
+
+        Args:
+            See SafeConfigParser.
+        Returns:
+            See SafeConfigParser.
+        """
+        project_items = []
+        has_project_section = False
+        top_items = []
+
+        # Get items from the project section
+        try:
+            project_items = ConfigParser.SafeConfigParser.items(
+                self, "%s_%s" % (self._project_name, section), *args, **kwargs
+            )
+            has_project_section = True
+        except ConfigParser.NoSectionError:
+            pass
+
+        # Get top-level items
+        try:
+            top_items = ConfigParser.SafeConfigParser.items(
+                self, section, *args, **kwargs
+            )
+        except ConfigParser.NoSectionError:
+            # If neither section exists raise the error on...
+            if not has_project_section:
+                raise
+
+        item_dict = dict(top_items)
+        item_dict.update(project_items)
+        return {(self._to_unicode(item), self._to_unicode(val))
+                for item, val in item_dict.iteritems()}
+
+def ReadGitAliases(fname):
+    """Read a git alias file. This is in the form used by git:
+
+    alias uboot  u-boot@lists.denx.de
+    alias wd     Wolfgang Denk <wd@denx.de>
+
+    Args:
+        fname: Filename to read
+    """
+    try:
+        fd = open(fname, 'r')
+    except IOError:
+        print("Warning: Cannot find alias file '%s'" % fname)
+        return
+
+    re_line = re.compile('alias\s+(\S+)\s+(.*)')
+    for line in fd.readlines():
+        line = line.strip()
+        if not line or line[0] == '#':
+            continue
+
+        m = re_line.match(line)
+        if not m:
+            print("Warning: Alias file line '%s' not understood" % line)
+            continue
+
+        list = alias.get(m.group(1), [])
+        for item in m.group(2).split(','):
+            item = item.strip()
+            if item:
+                list.append(item)
+        alias[m.group(1)] = list
+
+    fd.close()
+
+def CreatePatmanConfigFile(config_fname):
+    """Creates a config file under $(HOME)/.patman if it can't find one.
+
+    Args:
+        config_fname: Default config filename i.e., $(HOME)/.patman
+
+    Returns:
+        None
+    """
+    name = gitutil.GetDefaultUserName()
+    if name == None:
+        name = raw_input("Enter name: ")
+
+    email = gitutil.GetDefaultUserEmail()
+
+    if email == None:
+        email = raw_input("Enter email: ")
+
+    try:
+        f = open(config_fname, 'w')
+    except IOError:
+        print("Couldn't create patman config file\n")
+        raise
+
+    print('''[alias]
+me: %s <%s>
+
+[bounces]
+nxp = Zhikang Zhang <zhikang.zhang@nxp.com>
+''' % (name, email), file=f)
+    f.close();
+
+def _UpdateDefaults(parser, config):
+    """Update the given OptionParser defaults based on config.
+
+    We'll walk through all of the settings from the parser
+    For each setting we'll look for a default in the option parser.
+    If it's found we'll update the option parser default.
+
+    The idea here is that the .patman file should be able to update
+    defaults but that command line flags should still have the final
+    say.
+
+    Args:
+        parser: An instance of an OptionParser whose defaults will be
+            updated.
+        config: An instance of _ProjectConfigParser that we will query
+            for settings.
+    """
+    defaults = parser.get_default_values()
+    for name, val in config.items('settings'):
+        if hasattr(defaults, name):
+            default_val = getattr(defaults, name)
+            if isinstance(default_val, bool):
+                val = config.getboolean('settings', name)
+            elif isinstance(default_val, int):
+                val = config.getint('settings', name)
+            parser.set_default(name, val)
+        else:
+            print("WARNING: Unknown setting %s" % name)
+
+def _ReadAliasFile(fname):
+    """Read in the U-Boot git alias file if it exists.
+
+    Args:
+        fname: Filename to read.
+    """
+    if os.path.exists(fname):
+        bad_line = None
+        with open(fname) as fd:
+            linenum = 0
+            for line in fd:
+                linenum += 1
+                line = line.strip()
+                if not line or line.startswith('#'):
+                    continue
+                words = line.split(None, 2)
+                if len(words) < 3 or words[0] != 'alias':
+                    if not bad_line:
+                        bad_line = "%s:%d:Invalid line '%s'" % (fname, linenum,
+                                                                line)
+                    continue
+                alias[words[1]] = [s.strip() for s in words[2].split(',')]
+        if bad_line:
+            print(bad_line)
+
+def _ReadBouncesFile(fname):
+    """Read in the bounces file if it exists
+
+    Args:
+        fname: Filename to read.
+    """
+    if os.path.exists(fname):
+        with open(fname) as fd:
+            for line in fd:
+                if line.startswith('#'):
+                    continue
+                bounces.add(line.strip())
+
+def GetItems(config, section):
+    """Get the items from a section of the config.
+
+    Args:
+        config: _ProjectConfigParser object containing settings
+        section: name of section to retrieve
+
+    Returns:
+        List of (name, value) tuples for the section
+    """
+    try:
+        return config.items(section)
+    except ConfigParser.NoSectionError as e:
+        return []
+    except:
+        raise
+
+def Setup(parser, project_name, config_fname=''):
+    """Set up the settings module by reading config files.
+
+    Args:
+        parser:         The parser to update
+        project_name:   Name of project that we're working on; we'll look
+            for sections named "project_section" as well.
+        config_fname:   Config filename to read ('' for default)
+    """
+    # First read the git alias file if available
+    _ReadAliasFile('doc/git-mailrc')
+    config = _ProjectConfigParser(project_name)
+    if config_fname == '':
+        config_fname = '%s/.patman' % os.getenv('HOME')
+
+    if not os.path.exists(config_fname):
+        print("No config file found ~/.patman\nCreating one...\n")
+        CreatePatmanConfigFile(config_fname)
+
+    config.read(config_fname)
+
+    for name, value in GetItems(config, 'alias'):
+        alias[name] = value.split(',')
+
+    _ReadBouncesFile('doc/bounces')
+    for name, value in GetItems(config, 'bounces'):
+        bounces.add(value)
+
+    _UpdateDefaults(parser, config)
+
+# These are the aliases we understand, indexed by alias. Each member is a list.
+alias = {}
+bounces = set()
+
+if __name__ == "__main__":
+    import doctest
+
+    doctest.testmod()
diff --git a/tools/u-boot-tools/patman/setup.py b/tools/u-boot-tools/patman/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..43fdc00ce6f43dce600b1f333c71de34562c9ac0
--- /dev/null
+++ b/tools/u-boot-tools/patman/setup.py
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+from distutils.core import setup
+setup(name='patman',
+      version='1.0',
+      license='GPL-2.0+',
+      scripts=['patman'],
+      packages=['patman'],
+      package_dir={'patman': ''},
+      package_data={'patman': ['README']},
+      classifiers=['Environment :: Console',
+                   'Topic :: Software Development'])
diff --git a/tools/u-boot-tools/patman/terminal.py b/tools/u-boot-tools/patman/terminal.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ceab189bf3ccf1fe8797b8708b381d1ac8ee9e8
--- /dev/null
+++ b/tools/u-boot-tools/patman/terminal.py
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+"""Terminal utilities
+
+This module handles terminal interaction including ANSI color codes.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+
+# Selection of when we want our output to be colored
+COLOR_IF_TERMINAL, COLOR_ALWAYS, COLOR_NEVER = range(3)
+
+# Initially, we are set up to print to the terminal
+print_test_mode = False
+print_test_list = []
+
+class PrintLine:
+    """A line of text output
+
+    Members:
+        text: Text line that was printed
+        newline: True to output a newline after the text
+        colour: Text colour to use
+    """
+    def __init__(self, text, newline, colour):
+        self.text = text
+        self.newline = newline
+        self.colour = colour
+
+    def __str__(self):
+        return 'newline=%s, colour=%s, text=%s' % (self.newline, self.colour,
+                self.text)
+
+def Print(text='', newline=True, colour=None):
+    """Handle a line of output to the terminal.
+
+    In test mode this is recorded in a list. Otherwise it is output to the
+    terminal.
+
+    Args:
+        text: Text to print
+        newline: True to add a new line at the end of the text
+        colour: Colour to use for the text
+    """
+    if print_test_mode:
+        print_test_list.append(PrintLine(text, newline, colour))
+    else:
+        if colour:
+            col = Color()
+            text = col.Color(colour, text)
+        print(text, end='')
+        if newline:
+            print()
+        else:
+            sys.stdout.flush()
+
+def SetPrintTestMode():
+    """Go into test mode, where all printing is recorded"""
+    global print_test_mode
+
+    print_test_mode = True
+
+def GetPrintTestLines():
+    """Get a list of all lines output through Print()
+
+    Returns:
+        A list of PrintLine objects
+    """
+    global print_test_list
+
+    ret = print_test_list
+    print_test_list = []
+    return ret
+
+def EchoPrintTestLines():
+    """Print out the text lines collected"""
+    for line in print_test_list:
+        if line.colour:
+            col = Color()
+            print(col.Color(line.colour, line.text), end='')
+        else:
+            print(line.text, end='')
+        if line.newline:
+            print()
+
+
+class Color(object):
+    """Conditionally wraps text in ANSI color escape sequences."""
+    BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+    BOLD = -1
+    BRIGHT_START = '\033[1;%dm'
+    NORMAL_START = '\033[22;%dm'
+    BOLD_START = '\033[1m'
+    RESET = '\033[0m'
+
+    def __init__(self, colored=COLOR_IF_TERMINAL):
+        """Create a new Color object, optionally disabling color output.
+
+        Args:
+          enabled: True if color output should be enabled. If False then this
+            class will not add color codes at all.
+        """
+        try:
+            self._enabled = (colored == COLOR_ALWAYS or
+                    (colored == COLOR_IF_TERMINAL and
+                     os.isatty(sys.stdout.fileno())))
+        except:
+            self._enabled = False
+
+    def Start(self, color, bright=True):
+        """Returns a start color code.
+
+        Args:
+          color: Color to use, .e.g BLACK, RED, etc.
+
+        Returns:
+          If color is enabled, returns an ANSI sequence to start the given
+          color, otherwise returns empty string
+        """
+        if self._enabled:
+            base = self.BRIGHT_START if bright else self.NORMAL_START
+            return base % (color + 30)
+        return ''
+
+    def Stop(self):
+        """Retruns a stop color code.
+
+        Returns:
+          If color is enabled, returns an ANSI color reset sequence,
+          otherwise returns empty string
+        """
+        if self._enabled:
+            return self.RESET
+        return ''
+
+    def Color(self, color, text, bright=True):
+        """Returns text with conditionally added color escape sequences.
+
+        Keyword arguments:
+          color: Text color -- one of the color constants defined in this
+                  class.
+          text: The text to color.
+
+        Returns:
+          If self._enabled is False, returns the original text. If it's True,
+          returns text with color escape sequences based on the value of
+          color.
+        """
+        if not self._enabled:
+            return text
+        if color == self.BOLD:
+            start = self.BOLD_START
+        else:
+            base = self.BRIGHT_START if bright else self.NORMAL_START
+            start = base % (color + 30)
+        return start + text + self.RESET
diff --git a/tools/u-boot-tools/patman/test.py b/tools/u-boot-tools/patman/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1b94bd1a7db44c1fff252e546596af251017941
--- /dev/null
+++ b/tools/u-boot-tools/patman/test.py
@@ -0,0 +1,276 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import os
+import tempfile
+import unittest
+
+import checkpatch
+import gitutil
+import patchstream
+import series
+
+
+class TestPatch(unittest.TestCase):
+    """Test this program
+
+    TODO: Write tests for the rest of the functionality
+    """
+
+    def testBasic(self):
+        """Test basic filter operation"""
+        data='''
+
+From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Thu, 28 Apr 2011 09:58:51 -0700
+Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
+
+This adds functions to enable/disable clocks and reset to on-chip peripherals.
+
+cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
+   ‘long long unsigned int’, but argument 3 has type
+   ‘u64 {aka long unsigned int}’ [-Wformat=]
+
+BUG=chromium-os:13875
+TEST=build U-Boot for Seaboard, boot
+
+Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413
+
+Review URL: http://codereview.chromium.org/6900006
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+---
+ arch/arm/cpu/armv7/tegra2/Makefile         |    2 +-
+ arch/arm/cpu/armv7/tegra2/ap20.c           |   57 ++----
+ arch/arm/cpu/armv7/tegra2/clock.c          |  163 +++++++++++++++++
+'''
+        expected='''
+
+From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Thu, 28 Apr 2011 09:58:51 -0700
+Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
+
+This adds functions to enable/disable clocks and reset to on-chip peripherals.
+
+cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
+   ‘long long unsigned int’, but argument 3 has type
+   ‘u64 {aka long unsigned int}’ [-Wformat=]
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+---
+
+ arch/arm/cpu/armv7/tegra2/Makefile         |    2 +-
+ arch/arm/cpu/armv7/tegra2/ap20.c           |   57 ++----
+ arch/arm/cpu/armv7/tegra2/clock.c          |  163 +++++++++++++++++
+'''
+        out = ''
+        inhandle, inname = tempfile.mkstemp()
+        infd = os.fdopen(inhandle, 'w')
+        infd.write(data)
+        infd.close()
+
+        exphandle, expname = tempfile.mkstemp()
+        expfd = os.fdopen(exphandle, 'w')
+        expfd.write(expected)
+        expfd.close()
+
+        patchstream.FixPatch(None, inname, series.Series(), None)
+        rc = os.system('diff -u %s %s' % (inname, expname))
+        self.assertEqual(rc, 0)
+
+        os.remove(inname)
+        os.remove(expname)
+
+    def GetData(self, data_type):
+        data='''From 4924887af52713cabea78420eff03badea8f0035 Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Thu, 7 Apr 2011 10:14:41 -0700
+Subject: [PATCH 1/4] Add microsecond boot time measurement
+
+This defines the basics of a new boot time measurement feature. This allows
+logging of very accurate time measurements as the boot proceeds, by using
+an available microsecond counter.
+
+%s
+---
+ README              |   11 ++++++++
+ MAINTAINERS         |    3 ++
+ common/bootstage.c  |   50 ++++++++++++++++++++++++++++++++++++
+ include/bootstage.h |   71 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ include/common.h    |    8 ++++++
+ 5 files changed, 141 insertions(+), 0 deletions(-)
+ create mode 100644 common/bootstage.c
+ create mode 100644 include/bootstage.h
+
+diff --git a/README b/README
+index 6f3748d..f9e4e65 100644
+--- a/README
++++ b/README
+@@ -2026,6 +2026,17 @@ The following options need to be configured:
+ 		example, some LED's) on your board. At the moment,
+ 		the following checkpoints are implemented:
+
++- Time boot progress
++		CONFIG_BOOTSTAGE
++
++		Define this option to enable microsecond boot stage timing
++		on supported platforms. For this to work your platform
++		needs to define a function timer_get_us() which returns the
++		number of microseconds since reset. This would normally
++		be done in your SOC or board timer.c file.
++
++		You can add calls to bootstage_mark() to set time markers.
++
+ - Standalone program support:
+ 		CONFIG_STANDALONE_LOAD_ADDR
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index b167b028ec..beb7dc634f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -474,3 +474,8 @@ S:	Maintained
+ T:	git git://git.denx.de/u-boot.git
+ F:	*
+ F:	*/
++
++BOOTSTAGE
++M:	Simon Glass <sjg@chromium.org>
++L:	u-boot@lists.denx.de
++F:	common/bootstage.c
+diff --git a/common/bootstage.c b/common/bootstage.c
+new file mode 100644
+index 0000000..2234c87
+--- /dev/null
++++ b/common/bootstage.c
+@@ -0,0 +1,37 @@
++%s
++/*
++ * Copyright (c) 2011, Google Inc. All rights reserved.
++ *
++ */
++
++/*
++ * This module records the progress of boot and arbitrary commands, and
++ * permits accurate timestamping of each. The records can optionally be
++ * passed to kernel in the ATAGs
++ */
++
++#include <common.h>
++
++struct bootstage_record {
++	u32 time_us;
++	const char *name;
++};
++
++static struct bootstage_record record[BOOTSTAGE_COUNT];
++
++u32 bootstage_mark(enum bootstage_id id, const char *name)
++{
++	struct bootstage_record *rec = &record[id];
++
++	/* Only record the first event for each */
++%sif (!rec->name) {
++		rec->time_us = (u32)timer_get_us();
++		rec->name = name;
++	}
++	if (!rec->name &&
++	%ssomething_else) {
++		rec->time_us = (u32)timer_get_us();
++		rec->name = name;
++	}
++%sreturn rec->time_us;
++}
+--
+1.7.3.1
+'''
+        signoff = 'Signed-off-by: Simon Glass <sjg@chromium.org>\n'
+        license = '// SPDX-License-Identifier: GPL-2.0+'
+        tab = '	'
+        indent = '    '
+        if data_type == 'good':
+            pass
+        elif data_type == 'no-signoff':
+            signoff = ''
+        elif data_type == 'no-license':
+            license = ''
+        elif data_type == 'spaces':
+            tab = '   '
+        elif data_type == 'indent':
+            indent = tab
+        else:
+            print('not implemented')
+        return data % (signoff, license, tab, indent, tab)
+
+    def SetupData(self, data_type):
+        inhandle, inname = tempfile.mkstemp()
+        infd = os.fdopen(inhandle, 'w')
+        data = self.GetData(data_type)
+        infd.write(data)
+        infd.close()
+        return inname
+
+    def testGood(self):
+        """Test checkpatch operation"""
+        inf = self.SetupData('good')
+        result = checkpatch.CheckPatch(inf)
+        self.assertEqual(result.ok, True)
+        self.assertEqual(result.problems, [])
+        self.assertEqual(result.errors, 0)
+        self.assertEqual(result.warnings, 0)
+        self.assertEqual(result.checks, 0)
+        self.assertEqual(result.lines, 62)
+        os.remove(inf)
+
+    def testNoSignoff(self):
+        inf = self.SetupData('no-signoff')
+        result = checkpatch.CheckPatch(inf)
+        self.assertEqual(result.ok, False)
+        self.assertEqual(len(result.problems), 1)
+        self.assertEqual(result.errors, 1)
+        self.assertEqual(result.warnings, 0)
+        self.assertEqual(result.checks, 0)
+        self.assertEqual(result.lines, 62)
+        os.remove(inf)
+
+    def testNoLicense(self):
+        inf = self.SetupData('no-license')
+        result = checkpatch.CheckPatch(inf)
+        self.assertEqual(result.ok, False)
+        self.assertEqual(len(result.problems), 1)
+        self.assertEqual(result.errors, 0)
+        self.assertEqual(result.warnings, 1)
+        self.assertEqual(result.checks, 0)
+        self.assertEqual(result.lines, 62)
+        os.remove(inf)
+
+    def testSpaces(self):
+        inf = self.SetupData('spaces')
+        result = checkpatch.CheckPatch(inf)
+        self.assertEqual(result.ok, False)
+        self.assertEqual(len(result.problems), 3)
+        self.assertEqual(result.errors, 0)
+        self.assertEqual(result.warnings, 3)
+        self.assertEqual(result.checks, 0)
+        self.assertEqual(result.lines, 62)
+        os.remove(inf)
+
+    def testIndent(self):
+        inf = self.SetupData('indent')
+        result = checkpatch.CheckPatch(inf)
+        self.assertEqual(result.ok, False)
+        self.assertEqual(len(result.problems), 1)
+        self.assertEqual(result.errors, 0)
+        self.assertEqual(result.warnings, 0)
+        self.assertEqual(result.checks, 1)
+        self.assertEqual(result.lines, 62)
+        os.remove(inf)
+
+
+if __name__ == "__main__":
+    unittest.main()
+    gitutil.RunTests()
diff --git a/tools/u-boot-tools/patman/test/test01.txt b/tools/u-boot-tools/patman/test/test01.txt
new file mode 100644
index 0000000000000000000000000000000000000000..478ea93674d0082275e3d68848d65fedda1fa3d0
--- /dev/null
+++ b/tools/u-boot-tools/patman/test/test01.txt
@@ -0,0 +1,56 @@
+commit b9da5f937bd5ea4931ea17459bf79b2905d9594d
+Author: Simon Glass <sjg@chromium.org>
+Date:   Sat Apr 15 15:39:08 2017 -0600
+
+    pci: Correct cast for sandbox
+    
+    This gives a warning with some native compilers:
+    
+    cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
+       ‘long long unsigned int’, but argument 3 has type
+       ‘u64 {aka long unsigned int}’ [-Wformat=]
+    
+    Fix it with a cast.
+    
+    Signed-off-by: Simon Glass <sjg@chromium.org>
+    Series-notes:
+    some notes
+    about some things
+    from the first commit
+    END
+    
+    Commit-notes:
+    Some notes about
+    the first commit
+    END
+
+commit 5ab48490f03051875ab13d288a4bf32b507d76fd
+Author: Simon Glass <sjg@chromium.org>
+Date:   Sat Apr 15 15:39:08 2017 -0600
+
+    fdt: Correct cast for sandbox in fdtdec_setup_mem_size_base()
+    
+    This gives a warning with some native compilers:
+    
+    lib/fdtdec.c:1203:8: warning: format ‘%llx’ expects argument of type
+       ‘long long unsigned int’, but argument 3 has type
+       ‘long unsigned int’ [-Wformat=]
+    
+    Fix it with a cast.
+    
+    Signed-off-by: Simon Glass <sjg@chromium.org>
+    Series-to: u-boot
+    Series-prefix: RFC
+    Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de>
+    Cover-letter-cc: Lord Mëlchett <clergy@palace.gov>
+    Series-version: 3
+    Patch-cc: fred
+    Series-changes: 4
+    - Some changes
+    
+    Cover-letter:
+    test: A test patch series
+    This is a test of how the cover
+    leter
+    works
+    END
diff --git a/tools/u-boot-tools/patman/test_util.py b/tools/u-boot-tools/patman/test_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..687d40704abd8aef9e64f8a8995d0f40f0f1de08
--- /dev/null
+++ b/tools/u-boot-tools/patman/test_util.py
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2016 Google, Inc
+#
+
+from contextlib import contextmanager
+import glob
+import os
+import sys
+
+import command
+
+try:
+  from StringIO import StringIO
+except ImportError:
+  from io import StringIO
+
+
+def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
+    """Run tests and check that we get 100% coverage
+
+    Args:
+        prog: Program to run (with be passed a '-t' argument to run tests
+        filter_fname: Normally all *.py files in the program's directory will
+            be included. If this is not None, then it is used to filter the
+            list so that only filenames that don't contain filter_fname are
+            included.
+        exclude_list: List of file patterns to exclude from the coverage
+            calculation
+        build_dir: Build directory, used to locate libfdt.py
+        required: List of modules which must be in the coverage report
+
+    Raises:
+        ValueError if the code coverage is not 100%
+    """
+    # This uses the build output from sandbox_spl to get _libfdt.so
+    path = os.path.dirname(prog)
+    if filter_fname:
+        glob_list = glob.glob(os.path.join(path, '*.py'))
+        glob_list = [fname for fname in glob_list if filter_fname in fname]
+    else:
+        glob_list = []
+    glob_list += exclude_list
+    glob_list += ['*libfdt.py', '*site-packages*']
+    cmd = ('PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools python-coverage run '
+           '--omit "%s" %s -P1 -t' % (build_dir, ','.join(glob_list), prog))
+    os.system(cmd)
+    stdout = command.Output('python-coverage', 'report')
+    lines = stdout.splitlines()
+    if required:
+        # Convert '/path/to/name.py' just the module name 'name'
+        test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
+                        for line in lines if '/etype/' in line])
+        missing_list = required
+        missing_list.difference_update(test_set)
+        if missing_list:
+            print 'Missing tests for %s' % (', '.join(missing_list))
+            print stdout
+            ok = False
+
+    coverage = lines[-1].split(' ')[-1]
+    ok = True
+    print coverage
+    if coverage != '100%':
+        print stdout
+        print ("Type 'python-coverage html' to get a report in "
+               'htmlcov/index.html')
+        print 'Coverage error: %s, but should be 100%%' % coverage
+        ok = False
+    if not ok:
+        raise ValueError('Test coverage failure')
+
+
+# Use this to suppress stdout/stderr output:
+# with capture_sys_output() as (stdout, stderr)
+#   ...do something...
+@contextmanager
+def capture_sys_output():
+    capture_out, capture_err = StringIO(), StringIO()
+    old_out, old_err = sys.stdout, sys.stderr
+    try:
+        sys.stdout, sys.stderr = capture_out, capture_err
+        yield capture_out, capture_err
+    finally:
+        sys.stdout, sys.stderr = old_out, old_err
diff --git a/tools/u-boot-tools/patman/tools.py b/tools/u-boot-tools/patman/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf099798e65006fcae290cc0cc95ae0244d78bd3
--- /dev/null
+++ b/tools/u-boot-tools/patman/tools.py
@@ -0,0 +1,241 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2016 Google, Inc
+#
+
+import command
+import glob
+import os
+import shutil
+import tempfile
+
+import tout
+
+# Output directly (generally this is temporary)
+outdir = None
+
+# True to keep the output directory around after exiting
+preserve_outdir = False
+
+# Path to the Chrome OS chroot, if we know it
+chroot_path = None
+
+# Search paths to use for Filename(), used to find files
+search_paths = []
+
+# Tools and the packages that contain them, on debian
+packages = {
+    'lz4': 'liblz4-tool',
+    }
+
+# List of paths to use when looking for an input file
+indir = []
+
+def PrepareOutputDir(dirname, preserve=False):
+    """Select an output directory, ensuring it exists.
+
+    This either creates a temporary directory or checks that the one supplied
+    by the user is valid. For a temporary directory, it makes a note to
+    remove it later if required.
+
+    Args:
+        dirname: a string, name of the output directory to use to store
+                intermediate and output files. If is None - create a temporary
+                directory.
+        preserve: a Boolean. If outdir above is None and preserve is False, the
+                created temporary directory will be destroyed on exit.
+
+    Raises:
+        OSError: If it cannot create the output directory.
+    """
+    global outdir, preserve_outdir
+
+    preserve_outdir = dirname or preserve
+    if dirname:
+        outdir = dirname
+        if not os.path.isdir(outdir):
+            try:
+                os.makedirs(outdir)
+            except OSError as err:
+                raise CmdError("Cannot make output directory '%s': '%s'" %
+                                (outdir, err.strerror))
+        tout.Debug("Using output directory '%s'" % outdir)
+    else:
+        outdir = tempfile.mkdtemp(prefix='binman.')
+        tout.Debug("Using temporary directory '%s'" % outdir)
+
+def _RemoveOutputDir():
+    global outdir
+
+    shutil.rmtree(outdir)
+    tout.Debug("Deleted temporary directory '%s'" % outdir)
+    outdir = None
+
+def FinaliseOutputDir():
+    global outdir, preserve_outdir
+
+    """Tidy up: delete output directory if temporary and not preserved."""
+    if outdir and not preserve_outdir:
+        _RemoveOutputDir()
+
+def GetOutputFilename(fname):
+    """Return a filename within the output directory.
+
+    Args:
+        fname: Filename to use for new file
+
+    Returns:
+        The full path of the filename, within the output directory
+    """
+    return os.path.join(outdir, fname)
+
+def _FinaliseForTest():
+    """Remove the output directory (for use by tests)"""
+    global outdir
+
+    if outdir:
+        _RemoveOutputDir()
+
+def SetInputDirs(dirname):
+    """Add a list of input directories, where input files are kept.
+
+    Args:
+        dirname: a list of paths to input directories to use for obtaining
+                files needed by binman to place in the image.
+    """
+    global indir
+
+    indir = dirname
+    tout.Debug("Using input directories %s" % indir)
+
+def GetInputFilename(fname):
+    """Return a filename for use as input.
+
+    Args:
+        fname: Filename to use for new file
+
+    Returns:
+        The full path of the filename, within the input directory
+    """
+    if not indir:
+        return fname
+    for dirname in indir:
+        pathname = os.path.join(dirname, fname)
+        if os.path.exists(pathname):
+            return pathname
+
+    raise ValueError("Filename '%s' not found in input path (%s) (cwd='%s')" %
+                     (fname, ','.join(indir), os.getcwd()))
+
+def GetInputFilenameGlob(pattern):
+    """Return a list of filenames for use as input.
+
+    Args:
+        pattern: Filename pattern to search for
+
+    Returns:
+        A list of matching files in all input directories
+    """
+    if not indir:
+        return glob.glob(fname)
+    files = []
+    for dirname in indir:
+        pathname = os.path.join(dirname, pattern)
+        files += glob.glob(pathname)
+    return sorted(files)
+
+def Align(pos, align):
+    if align:
+        mask = align - 1
+        pos = (pos + mask) & ~mask
+    return pos
+
+def NotPowerOfTwo(num):
+    return num and (num & (num - 1))
+
+def PathHasFile(fname):
+    """Check if a given filename is in the PATH
+
+    Args:
+        fname: Filename to check
+
+    Returns:
+        True if found, False if not
+    """
+    for dir in os.environ['PATH'].split(':'):
+        if os.path.exists(os.path.join(dir, fname)):
+            return True
+    return False
+
+def Run(name, *args):
+    try:
+        return command.Run(name, *args, cwd=outdir, capture=True)
+    except:
+        if not PathHasFile(name):
+            msg = "Plesae install tool '%s'" % name
+            package = packages.get(name)
+            if package:
+                 msg += " (e.g. from package '%s')" % package
+            raise ValueError(msg)
+        raise
+
+def Filename(fname):
+    """Resolve a file path to an absolute path.
+
+    If fname starts with ##/ and chroot is available, ##/ gets replaced with
+    the chroot path. If chroot is not available, this file name can not be
+    resolved, `None' is returned.
+
+    If fname is not prepended with the above prefix, and is not an existing
+    file, the actual file name is retrieved from the passed in string and the
+    search_paths directories (if any) are searched to for the file. If found -
+    the path to the found file is returned, `None' is returned otherwise.
+
+    Args:
+      fname: a string,  the path to resolve.
+
+    Returns:
+      Absolute path to the file or None if not found.
+    """
+    if fname.startswith('##/'):
+      if chroot_path:
+        fname = os.path.join(chroot_path, fname[3:])
+      else:
+        return None
+
+    # Search for a pathname that exists, and return it if found
+    if fname and not os.path.exists(fname):
+        for path in search_paths:
+            pathname = os.path.join(path, os.path.basename(fname))
+            if os.path.exists(pathname):
+                return pathname
+
+    # If not found, just return the standard, unchanged path
+    return fname
+
+def ReadFile(fname):
+    """Read and return the contents of a file.
+
+    Args:
+      fname: path to filename to read, where ## signifiies the chroot.
+
+    Returns:
+      data read from file, as a string.
+    """
+    with open(Filename(fname), 'rb') as fd:
+        data = fd.read()
+    #self._out.Info("Read file '%s' size %d (%#0x)" %
+                   #(fname, len(data), len(data)))
+    return data
+
+def WriteFile(fname, data):
+    """Write data into a file.
+
+    Args:
+        fname: path to filename to write
+        data: data to write to file, as a string
+    """
+    #self._out.Info("Write file '%s' size %d (%#0x)" %
+                   #(fname, len(data), len(data)))
+    with open(Filename(fname), 'wb') as fd:
+        fd.write(data)
diff --git a/tools/u-boot-tools/patman/tout.py b/tools/u-boot-tools/patman/tout.py
new file mode 100644
index 0000000000000000000000000000000000000000..4957c7ae1df1ed645303e51382769d5efcead970
--- /dev/null
+++ b/tools/u-boot-tools/patman/tout.py
@@ -0,0 +1,171 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+#
+# Terminal output logging.
+#
+
+import sys
+
+import terminal
+
+# Output verbosity levels that we support
+ERROR = 0
+WARNING = 1
+NOTICE = 2
+INFO = 3
+DEBUG = 4
+
+in_progress = False
+
+"""
+This class handles output of progress and other useful information
+to the user. It provides for simple verbosity level control and can
+output nothing but errors at verbosity zero.
+
+The idea is that modules set up an Output object early in their years and pass
+it around to other modules that need it. This keeps the output under control
+of a single class.
+
+Public properties:
+    verbose: Verbosity level: 0=silent, 1=progress, 3=full, 4=debug
+"""
+def __enter__():
+    return
+
+def __exit__(unused1, unused2, unused3):
+    """Clean up and remove any progress message."""
+    ClearProgress()
+    return False
+
+def UserIsPresent():
+    """This returns True if it is likely that a user is present.
+
+    Sometimes we want to prompt the user, but if no one is there then this
+    is a waste of time, and may lock a script which should otherwise fail.
+
+    Returns:
+        True if it thinks the user is there, and False otherwise
+    """
+    return stdout_is_tty and verbose > 0
+
+def ClearProgress():
+    """Clear any active progress message on the terminal."""
+    global in_progress
+    if verbose > 0 and stdout_is_tty and in_progress:
+        _stdout.write('\r%s\r' % (" " * len (_progress)))
+        _stdout.flush()
+        in_progress = False
+
+def Progress(msg, warning=False, trailer='...'):
+    """Display progress information.
+
+    Args:
+        msg: Message to display.
+        warning: True if this is a warning."""
+    global in_progress
+    ClearProgress()
+    if verbose > 0:
+        _progress = msg + trailer
+        if stdout_is_tty:
+            col = _color.YELLOW if warning else _color.GREEN
+            _stdout.write('\r' + _color.Color(col, _progress))
+            _stdout.flush()
+            in_progress = True
+        else:
+            _stdout.write(_progress + '\n')
+
+def _Output(level, msg, color=None):
+    """Output a message to the terminal.
+
+    Args:
+        level: Verbosity level for this message. It will only be displayed if
+                this as high as the currently selected level.
+        msg; Message to display.
+        error: True if this is an error message, else False.
+    """
+    if verbose >= level:
+        ClearProgress()
+        if color:
+            msg = _color.Color(color, msg)
+        _stdout.write(msg + '\n')
+
+def DoOutput(level, msg):
+    """Output a message to the terminal.
+
+    Args:
+        level: Verbosity level for this message. It will only be displayed if
+                this as high as the currently selected level.
+        msg; Message to display.
+    """
+    _Output(level, msg)
+
+def Error(msg):
+    """Display an error message
+
+    Args:
+        msg; Message to display.
+    """
+    _Output(0, msg, _color.RED)
+
+def Warning(msg):
+    """Display a warning message
+
+    Args:
+        msg; Message to display.
+    """
+    _Output(1, msg, _color.YELLOW)
+
+def Notice(msg):
+    """Display an important infomation message
+
+    Args:
+        msg; Message to display.
+    """
+    _Output(2, msg)
+
+def Info(msg):
+    """Display an infomation message
+
+    Args:
+        msg; Message to display.
+    """
+    _Output(3, msg)
+
+def Debug(msg):
+    """Display a debug message
+
+    Args:
+        msg; Message to display.
+    """
+    _Output(4, msg)
+
+def UserOutput(msg):
+    """Display a message regardless of the current output level.
+
+    This is used when the output was specifically requested by the user.
+    Args:
+        msg; Message to display.
+    """
+    _Output(0, msg)
+
+def Init(_verbose=WARNING, stdout=sys.stdout):
+    """Initialize a new output object.
+
+    Args:
+        verbose: Verbosity level (0-4).
+        stdout: File to use for stdout.
+    """
+    global verbose, _progress, _color, _stdout, stdout_is_tty
+
+    verbose = _verbose
+    _progress = ''                    # Our last progress message
+    _color = terminal.Color()
+    _stdout = stdout
+
+    # TODO(sjg): Move this into Chromite libraries when we have them
+    stdout_is_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
+
+def Uninit():
+    ClearProgress()
+
+Init()
diff --git a/tools/u-boot-tools/pbl_crc32.c b/tools/u-boot-tools/pbl_crc32.c
new file mode 100644
index 0000000000000000000000000000000000000000..06da1d92ffdd69a1406e989c78ae1501e774ff07
--- /dev/null
+++ b/tools/u-boot-tools/pbl_crc32.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Cleaned up and refactored by Charles Manning.
+ */
+#include "pblimage.h"
+
+static uint32_t crc_table[256];
+static int crc_table_valid;
+
+static void make_crc_table(void)
+{
+	uint32_t mask;
+	int i, j;
+	uint32_t poly; /* polynomial exclusive-or pattern */
+
+	if (crc_table_valid)
+		return;
+
+	/*
+	 * the polynomial used by PBL is 1 + x1 + x2 + x4 + x5 + x7 + x8 + x10
+	 * + x11 + x12 + x16 + x22 + x23 + x26 + x32.
+	 */
+	poly = 0x04c11db7;
+
+	for (i = 0; i < 256; i++) {
+		mask = i << 24;
+		for (j = 0; j < 8; j++) {
+			if (mask & 0x80000000)
+				mask = (mask << 1) ^ poly;
+			else
+				mask <<= 1;
+		}
+		crc_table[i] = mask;
+	}
+
+	crc_table_valid = 1;
+}
+
+uint32_t pbl_crc32(uint32_t in_crc, const char *buf, uint32_t len)
+{
+	uint32_t crc32_val;
+	int i;
+
+	make_crc_table();
+
+	crc32_val = ~in_crc;
+
+	for (i = 0; i < len; i++)
+		crc32_val = (crc32_val << 8) ^
+			crc_table[(crc32_val >> 24) ^ (*buf++ & 0xff)];
+
+	return crc32_val;
+}
diff --git a/tools/u-boot-tools/pbl_crc32.h b/tools/u-boot-tools/pbl_crc32.h
new file mode 100644
index 0000000000000000000000000000000000000000..4320a47d4c887bdb511491634ffd8f8d0fc39ddd
--- /dev/null
+++ b/tools/u-boot-tools/pbl_crc32.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef PBLCRC32_H
+#define PBLCRC32_H
+
+#include <stdint.h>
+uint32_t pbl_crc32(uint32_t in_crc, const char *buf, uint32_t len);
+
+#endif
diff --git a/tools/u-boot-tools/pbl_crc32.o b/tools/u-boot-tools/pbl_crc32.o
new file mode 100644
index 0000000000000000000000000000000000000000..9862217786477537f218dcef560e0bebdc1a65bc
Binary files /dev/null and b/tools/u-boot-tools/pbl_crc32.o differ
diff --git a/tools/u-boot-tools/pblimage.c b/tools/u-boot-tools/pblimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..d11f9afe24521cb78c2298c31c4700595b76f397
--- /dev/null
+++ b/tools/u-boot-tools/pblimage.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012-2014 Freescale Semiconductor, Inc.
+ */
+#include "imagetool.h"
+#include <image.h>
+#include "pblimage.h"
+#include "pbl_crc32.h"
+
+#define roundup(x, y)		((((x) + ((y) - 1)) / (y)) * (y))
+#define PBL_ACS_CONT_CMD	0x81000000
+#define PBL_ADDR_24BIT_MASK	0x00ffffff
+
+/*
+ * Initialize to an invalid value.
+ */
+static uint32_t next_pbl_cmd = 0x82000000;
+/*
+ * need to store all bytes in memory for calculating crc32, then write the
+ * bytes to image file for PBL boot.
+ */
+static unsigned char mem_buf[1000000];
+static unsigned char *pmem_buf = mem_buf;
+static int pbl_size;
+static char *fname = "Unknown";
+static int lineno = -1;
+static struct pbl_header pblimage_header;
+static int uboot_size;
+static int arch_flag;
+
+static uint32_t pbl_cmd_initaddr;
+static uint32_t pbi_crc_cmd1;
+static uint32_t pbi_crc_cmd2;
+static uint32_t pbl_end_cmd[4];
+
+static union
+{
+	char c[4];
+	unsigned char l;
+} endian_test = { {'l', '?', '?', 'b'} };
+
+#define ENDIANNESS ((char)endian_test.l)
+
+/*
+ * The PBL can load up to 64 bytes at a time, so we split the U-Boot
+ * image into 64 byte chunks. PBL needs a command for each piece, of
+ * the form "81xxxxxx", where "xxxxxx" is the offset. Calculate the
+ * start offset by subtracting the size of the u-boot image from the
+ * top of the allowable 24-bit range.
+ */
+static void generate_pbl_cmd(void)
+{
+	uint32_t val = next_pbl_cmd;
+	next_pbl_cmd += 0x40;
+	int i;
+
+	for (i = 3; i >= 0; i--) {
+		*pmem_buf++ = (val >> (i * 8)) & 0xff;
+		pbl_size++;
+	}
+}
+
+static void pbl_fget(size_t size, FILE *stream)
+{
+	unsigned char c = 0xff;
+	int c_temp;
+
+	while (size) {
+		c_temp = fgetc(stream);
+		if (c_temp != EOF)
+			c = (unsigned char)c_temp;
+		else if ((c_temp == EOF) && (arch_flag == IH_ARCH_ARM))
+			c = 0xff;
+		*pmem_buf++ = c;
+		pbl_size++;
+		size--;
+	}
+}
+
+/* load split u-boot with PBI command 81xxxxxx. */
+static void load_uboot(FILE *fp_uboot)
+{
+	next_pbl_cmd = pbl_cmd_initaddr - uboot_size;
+	while (next_pbl_cmd < pbl_cmd_initaddr) {
+		generate_pbl_cmd();
+		pbl_fget(64, fp_uboot);
+	}
+}
+
+static void check_get_hexval(char *token)
+{
+	uint32_t hexval;
+	int i;
+
+	if (!sscanf(token, "%x", &hexval)) {
+		printf("Error:%s[%d] - Invalid hex data(%s)\n", fname,
+			lineno, token);
+		exit(EXIT_FAILURE);
+	}
+	for (i = 3; i >= 0; i--) {
+		*pmem_buf++ = (hexval >> (i * 8)) & 0xff;
+		pbl_size++;
+	}
+}
+
+static void pbl_parser(char *name)
+{
+	FILE *fd = NULL;
+	char *line = NULL;
+	char *token, *saveptr1, *saveptr2;
+	size_t len = 0;
+
+	fname = name;
+	fd = fopen(name, "r");
+	if (fd == NULL) {
+		printf("Error:%s - Can't open\n", fname);
+		exit(EXIT_FAILURE);
+	}
+
+	while ((getline(&line, &len, fd)) > 0) {
+		lineno++;
+		token = strtok_r(line, "\r\n", &saveptr1);
+		/* drop all lines with zero tokens (= empty lines) */
+		if (token == NULL)
+			continue;
+		for (line = token;; line = NULL) {
+			token = strtok_r(line, " \t", &saveptr2);
+			if (token == NULL)
+				break;
+			/* Drop all text starting with '#' as comments */
+			if (token[0] == '#')
+				break;
+			check_get_hexval(token);
+		}
+	}
+	if (line)
+		free(line);
+	fclose(fd);
+}
+
+static uint32_t reverse_byte(uint32_t val)
+{
+	uint32_t temp;
+	unsigned char *p1;
+	int j;
+
+	temp = val;
+	p1 = (unsigned char *)&temp;
+	for (j = 3; j >= 0; j--)
+		*p1++ = (val >> (j * 8)) & 0xff;
+	return temp;
+}
+
+/* write end command and crc command to memory. */
+static void add_end_cmd(void)
+{
+	uint32_t crc32_pbl;
+	int i;
+	unsigned char *p = (unsigned char *)&pbl_end_cmd;
+
+	if (ENDIANNESS == 'l') {
+		for (i = 0; i < 4; i++)
+			pbl_end_cmd[i] = reverse_byte(pbl_end_cmd[i]);
+	}
+
+	for (i = 0; i < 16; i++) {
+		*pmem_buf++ = *p++;
+		pbl_size++;
+	}
+
+	/* Add PBI CRC command. */
+	*pmem_buf++ = 0x08;
+	*pmem_buf++ = pbi_crc_cmd1;
+	*pmem_buf++ = pbi_crc_cmd2;
+	*pmem_buf++ = 0x40;
+	pbl_size += 4;
+
+	/* calculated CRC32 and write it to memory. */
+	crc32_pbl = pbl_crc32(0, (const char *)mem_buf, pbl_size);
+	*pmem_buf++ = (crc32_pbl >> 24) & 0xff;
+	*pmem_buf++ = (crc32_pbl >> 16) & 0xff;
+	*pmem_buf++ = (crc32_pbl >> 8) & 0xff;
+	*pmem_buf++ = (crc32_pbl) & 0xff;
+	pbl_size += 4;
+}
+
+void pbl_load_uboot(int ifd, struct image_tool_params *params)
+{
+	FILE *fp_uboot;
+	int size;
+
+	/* parse the rcw.cfg file. */
+	pbl_parser(params->imagename);
+
+	/* parse the pbi.cfg file. */
+	if (params->imagename2[0] != '\0')
+		pbl_parser(params->imagename2);
+
+	if (params->datafile) {
+		fp_uboot = fopen(params->datafile, "r");
+		if (fp_uboot == NULL) {
+			printf("Error: %s open failed\n", params->datafile);
+			exit(EXIT_FAILURE);
+		}
+
+		load_uboot(fp_uboot);
+		fclose(fp_uboot);
+	}
+	add_end_cmd();
+	lseek(ifd, 0, SEEK_SET);
+
+	size = pbl_size;
+	if (write(ifd, (const void *)&mem_buf, size) != size) {
+		fprintf(stderr, "Write error on %s: %s\n",
+			params->imagefile, strerror(errno));
+		exit(EXIT_FAILURE);
+	}
+}
+
+static int pblimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_PBLIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int pblimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct pbl_header *pbl_hdr = (struct pbl_header *) ptr;
+
+	/* Only a few checks can be done: search for magic numbers */
+	if (ENDIANNESS == 'l') {
+		if (pbl_hdr->preamble != reverse_byte(RCW_PREAMBLE))
+			return -FDT_ERR_BADSTRUCTURE;
+
+		if (pbl_hdr->rcwheader != reverse_byte(RCW_HEADER))
+			return -FDT_ERR_BADSTRUCTURE;
+	} else {
+		if (pbl_hdr->preamble != RCW_PREAMBLE)
+			return -FDT_ERR_BADSTRUCTURE;
+
+		if (pbl_hdr->rcwheader != RCW_HEADER)
+			return -FDT_ERR_BADSTRUCTURE;
+	}
+	return 0;
+}
+
+static void pblimage_print_header(const void *ptr)
+{
+	printf("Image Type:   Freescale PBL Boot Image\n");
+}
+
+static void pblimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	/*nothing need to do, pbl_load_uboot takes care of whole file. */
+}
+
+int pblimage_check_params(struct image_tool_params *params)
+{
+	FILE *fp_uboot;
+	int fd;
+	struct stat st;
+
+	if (!params)
+		return EXIT_FAILURE;
+
+	if (params->datafile) {
+		fp_uboot = fopen(params->datafile, "r");
+		if (fp_uboot == NULL) {
+			printf("Error: %s open failed\n", params->datafile);
+			exit(EXIT_FAILURE);
+		}
+		fd = fileno(fp_uboot);
+
+		if (fstat(fd, &st) == -1) {
+			printf("Error: Could not determine u-boot image size. %s\n",
+			       strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+
+		/* For the variable size, pad it to 64 byte boundary */
+		uboot_size = roundup(st.st_size, 64);
+		fclose(fp_uboot);
+	}
+
+	if (params->arch == IH_ARCH_ARM) {
+		arch_flag = IH_ARCH_ARM;
+		pbi_crc_cmd1 = 0x61;
+		pbi_crc_cmd2 = 0;
+		pbl_cmd_initaddr = params->addr & PBL_ADDR_24BIT_MASK;
+		pbl_cmd_initaddr |= PBL_ACS_CONT_CMD;
+		pbl_cmd_initaddr += uboot_size;
+		pbl_end_cmd[0] = 0x09610000;
+		pbl_end_cmd[1] = 0x00000000;
+		pbl_end_cmd[2] = 0x096100c0;
+		pbl_end_cmd[3] = 0x00000000;
+	} else if (params->arch == IH_ARCH_PPC) {
+		arch_flag = IH_ARCH_PPC;
+		pbi_crc_cmd1 = 0x13;
+		pbi_crc_cmd2 = 0x80;
+		pbl_cmd_initaddr = 0x82000000;
+		pbl_end_cmd[0] = 0x091380c0;
+		pbl_end_cmd[1] = 0x00000000;
+		pbl_end_cmd[2] = 0x091380c0;
+		pbl_end_cmd[3] = 0x00000000;
+	}
+
+	next_pbl_cmd = pbl_cmd_initaddr;
+	return 0;
+};
+
+/* pblimage parameters */
+U_BOOT_IMAGE_TYPE(
+	pblimage,
+	"Freescale PBL Boot Image support",
+	sizeof(struct pbl_header),
+	(void *)&pblimage_header,
+	pblimage_check_params,
+	pblimage_verify_header,
+	pblimage_print_header,
+	pblimage_set_header,
+	NULL,
+	pblimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/pblimage.h b/tools/u-boot-tools/pblimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..81c5492926bde4b846392b7adc3a43257f07efe4
--- /dev/null
+++ b/tools/u-boot-tools/pblimage.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef PBLIMAGE_H
+#define PBLIMAGE_H
+
+#define RCW_BYTES	64
+#define RCW_PREAMBLE	0xaa55aa55
+#define RCW_HEADER	0x010e0100
+
+struct pbl_header {
+	uint32_t preamble;
+	uint32_t rcwheader;
+	uint8_t rcw_data[RCW_BYTES];
+};
+
+#endif /* PBLIMAGE_H */
diff --git a/tools/u-boot-tools/pblimage.o b/tools/u-boot-tools/pblimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..c22d3bbc95ff09e4ecdc46ce15ecd7caa51bb65d
Binary files /dev/null and b/tools/u-boot-tools/pblimage.o differ
diff --git a/tools/u-boot-tools/prelink-riscv.c b/tools/u-boot-tools/prelink-riscv.c
new file mode 100644
index 0000000000000000000000000000000000000000..52eb78e9d061298cf8fd0253c3ee2dcc6f6da3db
--- /dev/null
+++ b/tools/u-boot-tools/prelink-riscv.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Andes Technology
+ * Chih-Mao Chen <cmchen@andestech.com>
+ *
+ * Statically process runtime relocations on RISC-V ELF images
+ * so that it can be directly executed when loaded at LMA
+ * without fixup. Both RV32 and RV64 are supported.
+ */
+
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+#error "Only little-endian host is supported"
+#endif
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <elf.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef EM_RISCV
+#define EM_RISCV 243
+#endif
+
+#ifndef R_RISCV_32
+#define R_RISCV_32 1
+#endif
+
+#ifndef R_RISCV_64
+#define R_RISCV_64 2
+#endif
+
+#ifndef R_RISCV_RELATIVE
+#define R_RISCV_RELATIVE 3
+#endif
+
+const char *argv0;
+
+#define die(fmt, ...) \
+	do { \
+		fprintf(stderr, "%s: " fmt "\n", argv0, ## __VA_ARGS__); \
+		exit(EXIT_FAILURE); \
+	} while (0)
+
+#define PRELINK_INC_BITS 32
+#include "prelink-riscv.inc"
+#undef PRELINK_INC_BITS
+
+#define PRELINK_INC_BITS 64
+#include "prelink-riscv.inc"
+#undef PRELINK_INC_BITS
+
+int main(int argc, const char *const *argv)
+{
+	argv0 = argv[0];
+
+	if (argc < 2) {
+		fprintf(stderr, "Usage: %s <u-boot>\n", argv0);
+		exit(EXIT_FAILURE);
+	}
+
+	int fd = open(argv[1], O_RDWR, 0);
+
+	if (fd < 0)
+		die("Cannot open %s: %s", argv[1], strerror(errno));
+
+	struct stat st;
+
+	if (fstat(fd, &st) < 0)
+		die("Cannot stat %s: %s", argv[1], strerror(errno));
+
+	void *data =
+		mmap(0, st.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+	if (data == MAP_FAILED)
+		die("Cannot mmap %s: %s", argv[1], strerror(errno));
+
+	close(fd);
+
+	unsigned char *e_ident = (unsigned char *)data;
+
+	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0)
+		die("Invalid ELF file %s", argv[1]);
+
+	bool is64 = e_ident[EI_CLASS] == ELFCLASS64;
+
+	if (is64)
+		prelink64(data);
+	else
+		prelink32(data);
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/prelink-riscv.inc b/tools/u-boot-tools/prelink-riscv.inc
new file mode 100644
index 0000000000000000000000000000000000000000..d49258707da4b7b1c32eae05c68b4c042b9fd200
--- /dev/null
+++ b/tools/u-boot-tools/prelink-riscv.inc
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Andes Technology
+ * Chih-Mao Chen <cmchen@andestech.com>
+ *
+ * Statically process runtime relocations on RISC-V ELF images
+ * so that it can be directly executed when loaded at LMA
+ * without fixup. Both RV32 and RV64 are supported.
+ */
+
+#define CONCAT_IMPL(x, y) x##y
+#define CONCAT(x, y) CONCAT_IMPL(x, y)
+#define CONCAT3(x, y, z) CONCAT(CONCAT(x, y), z)
+
+#define prelink_nn      CONCAT(prelink, PRELINK_INC_BITS)
+#define uintnn_t        CONCAT3(uint, PRELINK_INC_BITS, _t)
+#define get_offset_nn   CONCAT(get_offset_, PRELINK_INC_BITS)
+#define Elf_Ehdr        CONCAT3(Elf, PRELINK_INC_BITS, _Ehdr)
+#define Elf_Phdr        CONCAT3(Elf, PRELINK_INC_BITS, _Phdr)
+#define Elf_Rela        CONCAT3(Elf, PRELINK_INC_BITS, _Rela)
+#define Elf_Sym         CONCAT3(Elf, PRELINK_INC_BITS, _Sym)
+#define Elf_Dyn         CONCAT3(Elf, PRELINK_INC_BITS, _Dyn)
+#define Elf_Addr        CONCAT3(Elf, PRELINK_INC_BITS, _Addr)
+#define ELF_R_TYPE      CONCAT3(ELF, PRELINK_INC_BITS, _R_TYPE)
+#define ELF_R_SYM       CONCAT3(ELF, PRELINK_INC_BITS, _R_SYM)
+
+static void* get_offset_nn (void* data, Elf_Phdr* phdrs, size_t phnum, Elf_Addr addr)
+{
+	Elf_Phdr *p;
+
+	for (p = phdrs; p < phdrs + phnum; ++p)
+		if (p->p_vaddr <= addr && p->p_vaddr + p->p_memsz > addr)
+			return data + p->p_offset + (addr - p->p_vaddr);
+
+	return NULL;
+}
+
+static void prelink_nn(void *data)
+{
+	Elf_Ehdr *ehdr = data;
+	Elf_Phdr *p;
+	Elf_Dyn *dyn;
+	Elf_Rela *r;
+
+	if (ehdr->e_machine != EM_RISCV)
+		die("Machine type is not RISC-V");
+
+	Elf_Phdr *phdrs = data + ehdr->e_phoff;
+
+	Elf_Dyn *dyns = NULL;
+	for (p = phdrs; p < phdrs + ehdr->e_phnum; ++p) {
+		if (p->p_type == PT_DYNAMIC) {
+			dyns = data + p->p_offset;
+			break;
+		}
+	}
+
+	if (dyns == NULL)
+		die("No dynamic section found");
+
+	Elf_Rela *rela_dyn = NULL;
+	size_t rela_count = 0;
+	Elf_Sym *dynsym = NULL;
+	for (dyn = dyns;; ++dyn) {
+		if (dyn->d_tag == DT_NULL)
+			break;
+		else if (dyn->d_tag == DT_RELA)
+			rela_dyn = get_offset_nn(data, phdrs, ehdr->e_phnum, + dyn->d_un.d_ptr);
+		else if (dyn->d_tag == DT_RELASZ)
+			rela_count = dyn->d_un.d_val / sizeof(Elf_Rela);
+		else if (dyn->d_tag == DT_SYMTAB)
+			dynsym = get_offset_nn(data, phdrs, ehdr->e_phnum, + dyn->d_un.d_ptr);
+
+	}
+
+	if (rela_dyn == NULL)
+		die("No .rela.dyn found");
+
+	if (dynsym == NULL)
+		die("No .dynsym found");
+
+	for (r = rela_dyn; r < rela_dyn + rela_count; ++r) {
+		void* buf = get_offset_nn(data, phdrs, ehdr->e_phnum, r->r_offset);
+
+		if (buf == NULL)
+			continue;
+
+		if (ELF_R_TYPE(r->r_info) == R_RISCV_RELATIVE)
+			*((uintnn_t*) buf) = r->r_addend;
+		else if (ELF_R_TYPE(r->r_info) == R_RISCV_32)
+			*((uint32_t*) buf) = dynsym[ELF_R_SYM(r->r_info)].st_value;
+		else if (ELF_R_TYPE(r->r_info) == R_RISCV_64)
+			*((uint64_t*) buf) = dynsym[ELF_R_SYM(r->r_info)].st_value;
+	}
+}
+
+#undef prelink_nn
+#undef uintnn_t
+#undef get_offset_nn
+#undef Elf_Ehdr
+#undef Elf_Phdr
+#undef Elf_Rela
+#undef Elf_Sym
+#undef Elf_Dyn
+#undef Elf_Addr
+#undef ELF_R_TYPE
+#undef ELF_R_SYM
+
+#undef CONCAT_IMPL
+#undef CONCAT
+#undef CONCAT3
diff --git a/tools/u-boot-tools/proftool b/tools/u-boot-tools/proftool
new file mode 100755
index 0000000000000000000000000000000000000000..729ea29df237dc367205374bafeab8d2799c38f8
Binary files /dev/null and b/tools/u-boot-tools/proftool differ
diff --git a/tools/u-boot-tools/proftool.c b/tools/u-boot-tools/proftool.c
new file mode 100644
index 0000000000000000000000000000000000000000..c1803fa78a761b7770af7a031442f1e057c4001d
--- /dev/null
+++ b/tools/u-boot-tools/proftool.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2013 Google, Inc
+ */
+
+/* Decode and dump U-Boot profiling information */
+
+#include <assert.h>
+#include <ctype.h>
+#include <limits.h>
+#include <regex.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include <compiler.h>
+#include <trace.h>
+
+#define MAX_LINE_LEN 500
+
+enum {
+	FUNCF_TRACE	= 1 << 0,	/* Include this function in trace */
+};
+
+struct func_info {
+	unsigned long offset;
+	const char *name;
+	unsigned long code_size;
+	unsigned long call_count;
+	unsigned flags;
+	/* the section this function is in */
+	struct objsection_info *objsection;
+};
+
+enum trace_line_type {
+	TRACE_LINE_INCLUDE,
+	TRACE_LINE_EXCLUDE,
+};
+
+struct trace_configline_info {
+	struct trace_configline_info *next;
+	enum trace_line_type type;
+	const char *name;	/* identifier name / wildcard */
+	regex_t regex;		/* Regex to use if name starts with / */
+};
+
+/* The contents of the trace config file */
+struct trace_configline_info *trace_config_head;
+
+struct func_info *func_list;
+int func_count;
+struct trace_call *call_list;
+int call_count;
+int verbose;	/* Verbosity level 0=none, 1=warn, 2=notice, 3=info, 4=debug */
+unsigned long text_offset;		/* text address of first function */
+
+static void outf(int level, const char *fmt, ...)
+		__attribute__ ((format (__printf__, 2, 3)));
+#define error(fmt, b...) outf(0, fmt, ##b)
+#define warn(fmt, b...) outf(1, fmt, ##b)
+#define notice(fmt, b...) outf(2, fmt, ##b)
+#define info(fmt, b...) outf(3, fmt, ##b)
+#define debug(fmt, b...) outf(4, fmt, ##b)
+
+
+static void outf(int level, const char *fmt, ...)
+{
+	if (verbose >= level) {
+		va_list args;
+
+		va_start(args, fmt);
+		vfprintf(stderr, fmt, args);
+		va_end(args);
+	}
+}
+
+static void usage(void)
+{
+	fprintf(stderr,
+		"Usage: proftool -cds -v3 <cmd> <profdata>\n"
+		"\n"
+		"Commands\n"
+		"   dump-ftrace\t\tDump out textual data in ftrace format\n"
+		"\n"
+		"Options:\n"
+		"   -m <map>\tSpecify Systen.map file\n"
+		"   -t <trace>\tSpecific trace data file (from U-Boot)\n"
+		"   -v <0-4>\tSpecify verbosity\n");
+	exit(EXIT_FAILURE);
+}
+
+static int h_cmp_offset(const void *v1, const void *v2)
+{
+	const struct func_info *f1 = v1, *f2 = v2;
+
+	return (f1->offset / FUNC_SITE_SIZE) - (f2->offset / FUNC_SITE_SIZE);
+}
+
+static int read_system_map(FILE *fin)
+{
+	unsigned long offset, start = 0;
+	struct func_info *func;
+	char buff[MAX_LINE_LEN];
+	char symtype;
+	char symname[MAX_LINE_LEN + 1];
+	int linenum;
+	int alloced;
+
+	for (linenum = 1, alloced = func_count = 0;; linenum++) {
+		int fields = 0;
+
+		if (fgets(buff, sizeof(buff), fin))
+			fields = sscanf(buff, "%lx %c %100s\n", &offset,
+				&symtype, symname);
+		if (fields == 2) {
+			continue;
+		} else if (feof(fin)) {
+			break;
+		} else if (fields < 2) {
+			error("Map file line %d: invalid format\n", linenum);
+			return 1;
+		}
+
+		/* Must be a text symbol */
+		symtype = tolower(symtype);
+		if (symtype != 't' && symtype != 'w')
+			continue;
+
+		if (func_count == alloced) {
+			alloced += 256;
+			func_list = realloc(func_list,
+					sizeof(struct func_info) * alloced);
+			assert(func_list);
+		}
+		if (!func_count)
+			start = offset;
+
+		func = &func_list[func_count++];
+		memset(func, '\0', sizeof(*func));
+		func->offset = offset - start;
+		func->name = strdup(symname);
+		func->flags = FUNCF_TRACE;	/* trace by default */
+
+		/* Update previous function's code size */
+		if (func_count > 1)
+			func[-1].code_size = func->offset - func[-1].offset;
+	}
+	notice("%d functions found in map file\n", func_count);
+	text_offset = start;
+	return 0;
+}
+
+static int read_data(FILE *fin, void *buff, int size)
+{
+	int err;
+
+	err = fread(buff, 1, size, fin);
+	if (!err)
+		return 1;
+	if (err != size) {
+		error("Cannot read profile file at pos %ld\n", ftell(fin));
+		return -1;
+	}
+	return 0;
+}
+
+static struct func_info *find_func_by_offset(uint32_t offset)
+{
+	struct func_info key, *found;
+
+	key.offset = offset;
+	found = bsearch(&key, func_list, func_count, sizeof(struct func_info),
+			h_cmp_offset);
+
+	return found;
+}
+
+/* This finds the function which contains the given offset */
+static struct func_info *find_caller_by_offset(uint32_t offset)
+{
+	int low;	/* least function that could be a match */
+	int high;	/* greated function that could be a match */
+	struct func_info key;
+
+	low = 0;
+	high = func_count - 1;
+	key.offset = offset;
+	while (high > low + 1) {
+		int mid = (low + high) / 2;
+		int result;
+
+		result = h_cmp_offset(&key, &func_list[mid]);
+		if (result > 0)
+			low = mid;
+		else if (result < 0)
+			high = mid;
+		else
+			return &func_list[mid];
+	}
+
+	return low >= 0 ? &func_list[low] : NULL;
+}
+
+static int read_calls(FILE *fin, int count)
+{
+	struct trace_call *call_data;
+	int i;
+
+	notice("call count: %d\n", count);
+	call_list = (struct trace_call *)calloc(count, sizeof(*call_data));
+	if (!call_list) {
+		error("Cannot allocate call_list\n");
+		return -1;
+	}
+	call_count = count;
+
+	call_data = call_list;
+	for (i = 0; i < count; i++, call_data++) {
+		if (read_data(fin, call_data, sizeof(*call_data)))
+			return 1;
+	}
+	return 0;
+}
+
+static int read_profile(FILE *fin, int *not_found)
+{
+	struct trace_output_hdr hdr;
+
+	*not_found = 0;
+	while (!feof(fin)) {
+		int err;
+
+		err = read_data(fin, &hdr, sizeof(hdr));
+		if (err == 1)
+			break; /* EOF */
+		else if (err)
+			return 1;
+
+		switch (hdr.type) {
+		case TRACE_CHUNK_FUNCS:
+			/* Ignored at present */
+			break;
+
+		case TRACE_CHUNK_CALLS:
+			if (read_calls(fin, hdr.rec_count))
+				return 1;
+			break;
+		}
+	}
+	return 0;
+}
+
+static int read_map_file(const char *fname)
+{
+	FILE *fmap;
+	int err = 0;
+
+	fmap = fopen(fname, "r");
+	if (!fmap) {
+		error("Cannot open map file '%s'\n", fname);
+		return 1;
+	}
+	if (fmap) {
+		err = read_system_map(fmap);
+		fclose(fmap);
+	}
+	return err;
+}
+
+static int read_profile_file(const char *fname)
+{
+	int not_found = INT_MAX;
+	FILE *fprof;
+	int err;
+
+	fprof = fopen(fname, "rb");
+	if (!fprof) {
+		error("Cannot open profile data file '%s'\n",
+		      fname);
+		return 1;
+	} else {
+		err = read_profile(fprof, &not_found);
+		fclose(fprof);
+		if (err)
+			return err;
+
+		if (not_found) {
+			warn("%d profile functions could not be found in the map file - are you sure that your profile data and map file correspond?\n",
+			     not_found);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int regex_report_error(regex_t *regex, int err, const char *op,
+			      const char *name)
+{
+	char buf[200];
+
+	regerror(err, regex, buf, sizeof(buf));
+	error("Regex error '%s' in %s '%s'\n", buf, op, name);
+	return -1;
+}
+
+static void check_trace_config_line(struct trace_configline_info *item)
+{
+	struct func_info *func, *end;
+	int err;
+
+	debug("Checking trace config line '%s'\n", item->name);
+	for (func = func_list, end = func + func_count; func < end; func++) {
+		err = regexec(&item->regex, func->name, 0, NULL, 0);
+		debug("   - regex '%s', string '%s': %d\n", item->name,
+		      func->name, err);
+		if (err == REG_NOMATCH)
+			continue;
+
+		if (err) {
+			regex_report_error(&item->regex, err, "match",
+					   item->name);
+			break;
+		}
+
+		/* It matches, so perform the action */
+		switch (item->type) {
+		case TRACE_LINE_INCLUDE:
+			info("      include %s at %lx\n", func->name,
+			     text_offset + func->offset);
+			func->flags |= FUNCF_TRACE;
+			break;
+
+		case TRACE_LINE_EXCLUDE:
+			info("      exclude %s at %lx\n", func->name,
+			     text_offset + func->offset);
+			func->flags &= ~FUNCF_TRACE;
+			break;
+		}
+	}
+}
+
+static void check_trace_config(void)
+{
+	struct trace_configline_info *line;
+
+	for (line = trace_config_head; line; line = line->next)
+		check_trace_config_line(line);
+}
+
+/**
+ * Check the functions to see if they each have an objsection. If not, then
+ * the linker must have eliminated them.
+ */
+static void check_functions(void)
+{
+	struct func_info *func, *end;
+	unsigned long removed_code_size = 0;
+	int not_found = 0;
+
+	/* Look for missing functions */
+	for (func = func_list, end = func + func_count; func < end; func++) {
+		if (!func->objsection) {
+			removed_code_size += func->code_size;
+			not_found++;
+		}
+	}
+
+	/* Figure out what functions we want to trace */
+	check_trace_config();
+
+	warn("%d functions removed by linker, %ld code size\n",
+	     not_found, removed_code_size);
+}
+
+static int read_trace_config(FILE *fin)
+{
+	char buff[200];
+	int linenum = 0;
+	struct trace_configline_info **tailp = &trace_config_head;
+
+	while (fgets(buff, sizeof(buff), fin)) {
+		int len = strlen(buff);
+		struct trace_configline_info *line;
+		char *saveptr;
+		char *s, *tok;
+		int err;
+
+		linenum++;
+		if (len && buff[len - 1] == '\n')
+			buff[len - 1] = '\0';
+
+		/* skip blank lines and comments */
+		for (s = buff; *s == ' ' || *s == '\t'; s++)
+			;
+		if (!*s || *s == '#')
+			continue;
+
+		line = (struct trace_configline_info *)calloc(1,
+							      sizeof(*line));
+		if (!line) {
+			error("Cannot allocate config line\n");
+			return -1;
+		}
+
+		tok = strtok_r(s, " \t", &saveptr);
+		if (!tok) {
+			error("Invalid trace config data on line %d\n",
+			      linenum);
+			return -1;
+		}
+		if (0 == strcmp(tok, "include-func")) {
+			line->type = TRACE_LINE_INCLUDE;
+		} else if (0 == strcmp(tok, "exclude-func")) {
+			line->type = TRACE_LINE_EXCLUDE;
+		} else {
+			error("Unknown command in trace config data line %d\n",
+			      linenum);
+			return -1;
+		}
+
+		tok = strtok_r(NULL, " \t", &saveptr);
+		if (!tok) {
+			error("Missing pattern in trace config data line %d\n",
+			      linenum);
+			return -1;
+		}
+
+		err = regcomp(&line->regex, tok, REG_NOSUB);
+		if (err) {
+			int r = regex_report_error(&line->regex, err,
+						   "compile", tok);
+			free(line);
+			return r;
+		}
+
+		/* link this new one to the end of the list */
+		line->name = strdup(tok);
+		line->next = NULL;
+		*tailp = line;
+		tailp = &line->next;
+	}
+
+	if (!feof(fin)) {
+		error("Cannot read from trace config file at position %ld\n",
+		      ftell(fin));
+		return -1;
+	}
+	return 0;
+}
+
+static int read_trace_config_file(const char *fname)
+{
+	FILE *fin;
+	int err;
+
+	fin = fopen(fname, "r");
+	if (!fin) {
+		error("Cannot open trace_config file '%s'\n", fname);
+		return -1;
+	}
+	err = read_trace_config(fin);
+	fclose(fin);
+	return err;
+}
+
+static void out_func(ulong func_offset, int is_caller, const char *suffix)
+{
+	struct func_info *func;
+
+	func = (is_caller ? find_caller_by_offset : find_func_by_offset)
+		(func_offset);
+
+	if (func)
+		printf("%s%s", func->name, suffix);
+	else
+		printf("%lx%s", func_offset, suffix);
+}
+
+/*
+ * # tracer: function
+ * #
+ * #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
+ * #              | |      |          |         |
+ * #           bash-4251  [01] 10152.583854: path_put <-path_walk
+ * #           bash-4251  [01] 10152.583855: dput <-path_put
+ * #           bash-4251  [01] 10152.583855: _atomic_dec_and_lock <-dput
+ */
+static int make_ftrace(void)
+{
+	struct trace_call *call;
+	int missing_count = 0, skip_count = 0;
+	int i;
+
+	printf("# tracer: ftrace\n"
+		"#\n"
+		"#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n"
+		"#              | |      |          |         |\n");
+	for (i = 0, call = call_list; i < call_count; i++, call++) {
+		struct func_info *func = find_func_by_offset(call->func);
+		ulong time = call->flags & FUNCF_TIMESTAMP_MASK;
+
+		if (TRACE_CALL_TYPE(call) != FUNCF_ENTRY &&
+		    TRACE_CALL_TYPE(call) != FUNCF_EXIT)
+			continue;
+		if (!func) {
+			warn("Cannot find function at %lx\n",
+			     text_offset + call->func);
+			missing_count++;
+			continue;
+		}
+
+		if (!(func->flags & FUNCF_TRACE)) {
+			debug("Funcion '%s' is excluded from trace\n",
+			      func->name);
+			skip_count++;
+			continue;
+		}
+
+		printf("%16s-%-5d [01] %lu.%06lu: ", "uboot", 1,
+		       time / 1000000, time % 1000000);
+
+		out_func(call->func, 0, " <- ");
+		out_func(call->caller, 1, "\n");
+	}
+	info("ftrace: %d functions not found, %d excluded\n", missing_count,
+	     skip_count);
+
+	return 0;
+}
+
+static int prof_tool(int argc, char * const argv[],
+		     const char *prof_fname, const char *map_fname,
+		     const char *trace_config_fname)
+{
+	int err = 0;
+
+	if (read_map_file(map_fname))
+		return -1;
+	if (prof_fname && read_profile_file(prof_fname))
+		return -1;
+	if (trace_config_fname && read_trace_config_file(trace_config_fname))
+		return -1;
+
+	check_functions();
+
+	for (; argc; argc--, argv++) {
+		const char *cmd = *argv;
+
+		if (0 == strcmp(cmd, "dump-ftrace"))
+			err = make_ftrace();
+		else
+			warn("Unknown command '%s'\n", cmd);
+	}
+
+	return err;
+}
+
+int main(int argc, char *argv[])
+{
+	const char *map_fname = "System.map";
+	const char *prof_fname = NULL;
+	const char *trace_config_fname = NULL;
+	int opt;
+
+	verbose = 2;
+	while ((opt = getopt(argc, argv, "m:p:t:v:")) != -1) {
+		switch (opt) {
+		case 'm':
+			map_fname = optarg;
+			break;
+
+		case 'p':
+			prof_fname = optarg;
+			break;
+
+		case 't':
+			trace_config_fname = optarg;
+			break;
+
+		case 'v':
+			verbose = atoi(optarg);
+			break;
+
+		default:
+			usage();
+		}
+	}
+	argc -= optind; argv += optind;
+	if (argc < 1)
+		usage();
+
+	debug("Debug enabled\n");
+	return prof_tool(argc, argv, prof_fname, map_fname,
+			 trace_config_fname);
+}
diff --git a/tools/u-boot-tools/relocate-rela.c b/tools/u-boot-tools/relocate-rela.c
new file mode 100644
index 0000000000000000000000000000000000000000..6a524014b73c9e94fb6edfe20afe8b22c3e7149c
--- /dev/null
+++ b/tools/u-boot-tools/relocate-rela.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * 64-bit and little-endian target only until we need to support a different
+ * arch that needs this.
+ */
+
+#include <elf.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "compiler.h"
+
+#ifndef R_AARCH64_RELATIVE
+#define R_AARCH64_RELATIVE	1027
+#endif
+
+static const bool debug_en;
+
+static void debug(const char *fmt, ...)
+{
+	va_list args;
+
+	if (debug_en) {
+		va_start(args, fmt);
+		vprintf(fmt, args);
+		va_end(args);
+	}
+}
+
+static bool supported_rela(Elf64_Rela *rela)
+{
+	uint64_t mask = 0xffffffffULL; /* would be different on 32-bit */
+	uint32_t type = rela->r_info & mask;
+
+	switch (type) {
+#ifdef R_AARCH64_RELATIVE
+	case R_AARCH64_RELATIVE:
+		return true;
+#endif
+	default:
+		fprintf(stderr, "warning: unsupported relocation type %"
+				PRIu32 " at %" PRIx64 "\n",
+			type, rela->r_offset);
+
+		return false;
+	}
+}
+
+static bool read_num(const char *str, uint64_t *num)
+{
+	char *endptr;
+	*num = strtoull(str, &endptr, 16);
+	return str[0] && !endptr[0];
+}
+
+int main(int argc, char **argv)
+{
+	FILE *f;
+	int i, num;
+	uint64_t rela_start, rela_end, text_base;
+
+	if (argc != 5) {
+		fprintf(stderr, "Statically apply ELF rela relocations\n");
+		fprintf(stderr, "Usage: %s <bin file> <text base> " \
+				"<rela start> <rela end>\n", argv[0]);
+		fprintf(stderr, "All numbers in hex.\n");
+		return 1;
+	}
+
+	f = fopen(argv[1], "r+b");
+	if (!f) {
+		fprintf(stderr, "%s: Cannot open %s: %s\n",
+			argv[0], argv[1], strerror(errno));
+		return 2;
+	}
+
+	if (!read_num(argv[2], &text_base) ||
+	    !read_num(argv[3], &rela_start) ||
+	    !read_num(argv[4], &rela_end)) {
+		fprintf(stderr, "%s: bad number\n", argv[0]);
+		return 3;
+	}
+
+	if (rela_start > rela_end || rela_start < text_base ||
+	    (rela_end - rela_start) % sizeof(Elf64_Rela)) {
+		fprintf(stderr, "%s: bad rela bounds\n", argv[0]);
+		return 3;
+	}
+
+	rela_start -= text_base;
+	rela_end -= text_base;
+
+	num = (rela_end - rela_start) / sizeof(Elf64_Rela);
+
+	for (i = 0; i < num; i++) {
+		Elf64_Rela rela, swrela;
+		uint64_t pos = rela_start + sizeof(Elf64_Rela) * i;
+		uint64_t addr;
+
+		if (fseek(f, pos, SEEK_SET) < 0) {
+			fprintf(stderr, "%s: %s: seek to %" PRIx64
+					" failed: %s\n",
+				argv[0], argv[1], pos, strerror(errno));
+		}
+
+		if (fread(&rela, sizeof(rela), 1, f) != 1) {
+			fprintf(stderr, "%s: %s: read rela failed at %"
+					PRIx64 "\n",
+				argv[0], argv[1], pos);
+			return 4;
+		}
+
+		swrela.r_offset = cpu_to_le64(rela.r_offset);
+		swrela.r_info = cpu_to_le64(rela.r_info);
+		swrela.r_addend = cpu_to_le64(rela.r_addend);
+
+		if (!supported_rela(&swrela))
+			continue;
+
+		debug("Rela %" PRIx64 " %" PRIu64 " %" PRIx64 "\n",
+		      swrela.r_offset, swrela.r_info, swrela.r_addend);
+
+		if (swrela.r_offset < text_base) {
+			fprintf(stderr, "%s: %s: bad rela at %" PRIx64 "\n",
+				argv[0], argv[1], pos);
+			return 4;
+		}
+
+		addr = swrela.r_offset - text_base;
+
+		if (fseek(f, addr, SEEK_SET) < 0) {
+			fprintf(stderr, "%s: %s: seek to %"
+					PRIx64 " failed: %s\n",
+				argv[0], argv[1], addr, strerror(errno));
+		}
+
+		if (fwrite(&rela.r_addend, sizeof(rela.r_addend), 1, f) != 1) {
+			fprintf(stderr, "%s: %s: write failed at %" PRIx64 "\n",
+				argv[0], argv[1], addr);
+			return 4;
+		}
+	}
+
+	if (fclose(f) < 0) {
+		fprintf(stderr, "%s: %s: close failed: %s\n",
+			argv[0], argv[1], strerror(errno));
+		return 4;
+	}
+
+	return 0;
+}
diff --git a/tools/u-boot-tools/rkcommon.c b/tools/u-boot-tools/rkcommon.c
new file mode 100644
index 0000000000000000000000000000000000000000..831c2ad8207ac2ca2513cccb69f879769854d518
--- /dev/null
+++ b/tools/u-boot-tools/rkcommon.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2015 Google,  Inc
+ * Written by Simon Glass <sjg@chromium.org>
+ *
+ * (C) 2017 Theobroma Systems Design und Consulting GmbH
+ *
+ * Helper functions for Rockchip images
+ */
+
+#include "imagetool.h"
+#include <image.h>
+#include <rc4.h>
+#include "mkimage.h"
+#include "rkcommon.h"
+
+#define DIV_ROUND_UP(n, d)	(((n) + (d) - 1) / (d))
+
+enum {
+	RK_SIGNATURE		= 0x0ff0aa55,
+};
+
+/**
+ * struct header0_info - header block for boot ROM
+ *
+ * This is stored at SD card block 64 (where each block is 512 bytes, or at
+ * the start of SPI flash. It is encoded with RC4.
+ *
+ * @signature:		Signature (must be RKSD_SIGNATURE)
+ * @disable_rc4:	0 to use rc4 for boot image,  1 to use plain binary
+ * @init_offset:	Offset in blocks of the SPL code from this header
+ *			block. E.g. 4 means 2KB after the start of this header.
+ * Other fields are not used by U-Boot
+ */
+struct header0_info {
+	uint32_t signature;
+	uint8_t reserved[4];
+	uint32_t disable_rc4;
+	uint16_t init_offset;
+	uint8_t reserved1[492];
+	uint16_t init_size;
+	uint16_t init_boot_size;
+	uint8_t reserved2[2];
+};
+
+/**
+ * struct header1_info
+ */
+struct header1_info {
+	uint32_t magic;
+};
+
+/**
+ * struct spl_info - spl info for each chip
+ *
+ * @imagename:		Image name(passed by "mkimage -n")
+ * @spl_hdr:		Boot ROM requires a 4-bytes spl header
+ * @spl_size:		Spl size(include extra 4-bytes spl header)
+ * @spl_rc4:		RC4 encode the SPL binary (same key as header)
+ */
+
+struct spl_info {
+	const char *imagename;
+	const char *spl_hdr;
+	const uint32_t spl_size;
+	const bool spl_rc4;
+};
+
+static struct spl_info spl_infos[] = {
+	{ "rk3036", "RK30", 0x1000, false },
+	{ "rk3128", "RK31", 0x1800, false },
+	{ "rk3188", "RK31", 0x8000 - 0x800, true },
+	{ "rk322x", "RK32", 0x8000 - 0x1000, false },
+	{ "rk3288", "RK32", 0x8000, false },
+	{ "rk3328", "RK32", 0x8000 - 0x1000, false },
+	{ "rk3368", "RK33", 0x8000 - 0x1000, false },
+	{ "rk3399", "RK33", 0x30000 - 0x2000, false },
+	{ "rv1108", "RK11", 0x1800, false },
+};
+
+static unsigned char rc4_key[16] = {
+	124, 78, 3, 4, 85, 5, 9, 7,
+	45, 44, 123, 56, 23, 13, 23, 17
+};
+
+static struct spl_info *rkcommon_get_spl_info(char *imagename)
+{
+	int i;
+
+	if (!imagename)
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(spl_infos); i++)
+		if (!strncmp(imagename, spl_infos[i].imagename, 6))
+			return spl_infos + i;
+
+	return NULL;
+}
+
+int rkcommon_check_params(struct image_tool_params *params)
+{
+	int i;
+
+	if (rkcommon_get_spl_info(params->imagename) != NULL)
+		return EXIT_SUCCESS;
+
+	/*
+	 * If this is a operation (list or extract), the don't require
+	 * imagename to be set.
+	 */
+	if (params->lflag || params->iflag)
+		return EXIT_SUCCESS;
+
+	fprintf(stderr, "ERROR: imagename (%s) is not supported!\n",
+		params->imagename ? params->imagename : "NULL");
+
+	fprintf(stderr, "Available imagename:");
+	for (i = 0; i < ARRAY_SIZE(spl_infos); i++)
+		fprintf(stderr, "\t%s", spl_infos[i].imagename);
+	fprintf(stderr, "\n");
+
+	return EXIT_FAILURE;
+}
+
+const char *rkcommon_get_spl_hdr(struct image_tool_params *params)
+{
+	struct spl_info *info = rkcommon_get_spl_info(params->imagename);
+
+	/*
+	 * info would not be NULL, because of we checked params before.
+	 */
+	return info->spl_hdr;
+}
+
+
+int rkcommon_get_spl_size(struct image_tool_params *params)
+{
+	struct spl_info *info = rkcommon_get_spl_info(params->imagename);
+
+	/*
+	 * info would not be NULL, because of we checked params before.
+	 */
+	return info->spl_size;
+}
+
+bool rkcommon_need_rc4_spl(struct image_tool_params *params)
+{
+	struct spl_info *info = rkcommon_get_spl_info(params->imagename);
+
+	/*
+	 * info would not be NULL, because of we checked params before.
+	 */
+	return info->spl_rc4;
+}
+
+static void rkcommon_set_header0(void *buf, uint file_size,
+				 struct image_tool_params *params)
+{
+	struct header0_info *hdr = buf;
+
+	memset(buf, '\0', RK_INIT_OFFSET * RK_BLK_SIZE);
+	hdr->signature = RK_SIGNATURE;
+	hdr->disable_rc4 = !rkcommon_need_rc4_spl(params);
+	hdr->init_offset = RK_INIT_OFFSET;
+
+	hdr->init_size = DIV_ROUND_UP(file_size, RK_BLK_SIZE);
+	/*
+	 * The init_size has to be a multiple of 4 blocks (i.e. of 2K)
+	 * or the BootROM will not boot the image.
+	 *
+	 * Note: To verify that this is not a legacy constraint, we
+	 *       rechecked this against the RK3399 BootROM.
+	 */
+	hdr->init_size = ROUND(hdr->init_size, 4);
+	/*
+	 * init_boot_size needs to be set, as it is read by the BootROM
+	 * to determine the size of the next-stage bootloader (e.g. U-Boot
+	 * proper), when used with the back-to-bootrom functionality.
+	 *
+	 * see https://lists.denx.de/pipermail/u-boot/2017-May/293267.html
+	 * for a more detailed explanation by Andy Yan
+	 */
+	hdr->init_boot_size = hdr->init_size + RK_MAX_BOOT_SIZE / RK_BLK_SIZE;
+
+	rc4_encode(buf, RK_BLK_SIZE, rc4_key);
+}
+
+int rkcommon_set_header(void *buf, uint file_size,
+			struct image_tool_params *params)
+{
+	struct header1_info *hdr = buf + RK_SPL_HDR_START;
+
+	if (file_size > rkcommon_get_spl_size(params))
+		return -ENOSPC;
+
+	rkcommon_set_header0(buf, file_size, params);
+
+	/* Set up the SPL name (i.e. copy spl_hdr over) */
+	memcpy(&hdr->magic, rkcommon_get_spl_hdr(params), RK_SPL_HDR_SIZE);
+
+	if (rkcommon_need_rc4_spl(params))
+		rkcommon_rc4_encode_spl(buf, RK_SPL_HDR_START,
+					params->file_size - RK_SPL_HDR_START);
+
+	return 0;
+}
+
+static inline unsigned rkcommon_offset_to_spi(unsigned offset)
+{
+	/*
+	 * While SD/MMC images use a flat addressing, SPI images are padded
+	 * to use the first 2K of every 4K sector only.
+	 */
+	return ((offset & ~0x7ff) << 1) + (offset & 0x7ff);
+}
+
+static int rkcommon_parse_header(const void *buf, struct header0_info *header0,
+				 struct spl_info **spl_info)
+{
+	unsigned hdr1_offset;
+	struct header1_info *hdr1_sdmmc, *hdr1_spi;
+	int i;
+
+	if (spl_info)
+		*spl_info = NULL;
+
+	/*
+	 * The first header (hdr0) is always RC4 encoded, so try to decrypt
+	 * with the well-known key.
+	 */
+	memcpy((void *)header0, buf, sizeof(struct header0_info));
+	rc4_encode((void *)header0, sizeof(struct header0_info), rc4_key);
+
+	if (header0->signature != RK_SIGNATURE)
+		return -EPROTO;
+
+	/* We don't support RC4 encoded image payloads here, yet... */
+	if (header0->disable_rc4 == 0)
+		return -ENOSYS;
+
+	hdr1_offset = header0->init_offset * RK_BLK_SIZE;
+	hdr1_sdmmc = (struct header1_info *)(buf + hdr1_offset);
+	hdr1_spi = (struct header1_info *)(buf +
+					   rkcommon_offset_to_spi(hdr1_offset));
+
+	for (i = 0; i < ARRAY_SIZE(spl_infos); i++) {
+		if (!memcmp(&hdr1_sdmmc->magic, spl_infos[i].spl_hdr, 4)) {
+			if (spl_info)
+				*spl_info = &spl_infos[i];
+			return IH_TYPE_RKSD;
+		} else if (!memcmp(&hdr1_spi->magic, spl_infos[i].spl_hdr, 4)) {
+			if (spl_info)
+				*spl_info = &spl_infos[i];
+			return IH_TYPE_RKSPI;
+		}
+	}
+
+	return -1;
+}
+
+int rkcommon_verify_header(unsigned char *buf, int size,
+			   struct image_tool_params *params)
+{
+	struct header0_info header0;
+	struct spl_info *img_spl_info, *spl_info;
+	int ret;
+
+	ret = rkcommon_parse_header(buf, &header0, &img_spl_info);
+
+	/* If this is the (unimplemented) RC4 case, then rewrite the result */
+	if (ret == -ENOSYS)
+		return 0;
+
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * If no 'imagename' is specified via the commandline (e.g. if this is
+	 * 'dumpimage -l' w/o any further constraints), we accept any spl_info.
+	 */
+	if (params->imagename == NULL)
+		return 0;
+
+	/* Match the 'imagename' against the 'spl_hdr' found */
+	spl_info = rkcommon_get_spl_info(params->imagename);
+	if (spl_info && img_spl_info)
+		return strcmp(spl_info->spl_hdr, img_spl_info->spl_hdr);
+
+	return -ENOENT;
+}
+
+void rkcommon_print_header(const void *buf)
+{
+	struct header0_info header0;
+	struct spl_info *spl_info;
+	uint8_t image_type;
+	int ret;
+
+	ret = rkcommon_parse_header(buf, &header0, &spl_info);
+
+	/* If this is the (unimplemented) RC4 case, then fail silently */
+	if (ret == -ENOSYS)
+		return;
+
+	if (ret < 0) {
+		fprintf(stderr, "Error: image verification failed\n");
+		return;
+	}
+
+	image_type = ret;
+
+	printf("Image Type:   Rockchip %s (%s) boot image\n",
+	       spl_info->spl_hdr,
+	       (image_type == IH_TYPE_RKSD) ? "SD/MMC" : "SPI");
+	printf("Data Size:    %d bytes\n", header0.init_size * RK_BLK_SIZE);
+}
+
+void rkcommon_rc4_encode_spl(void *buf, unsigned int offset, unsigned int size)
+{
+	unsigned int remaining = size;
+
+	while (remaining > 0) {
+		int step = (remaining > RK_BLK_SIZE) ? RK_BLK_SIZE : remaining;
+
+		rc4_encode(buf + offset, step, rc4_key);
+		offset += RK_BLK_SIZE;
+		remaining -= step;
+	}
+}
+
+int rkcommon_vrec_header(struct image_tool_params *params,
+			 struct image_type_params *tparams,
+			 unsigned int alignment)
+{
+	unsigned int  unpadded_size;
+	unsigned int  padded_size;
+
+	/*
+	 * The SPL image looks as follows:
+	 *
+	 * 0x0    header0 (see rkcommon.c)
+	 * 0x800  spl_name ('RK30', ..., 'RK33')
+	 *        (start of the payload for AArch64 payloads: we expect the
+	 *        first 4 bytes to be available for overwriting with our
+	 *        spl_name)
+	 * 0x804  first instruction to be executed
+	 *        (start of the image/payload for 32bit payloads)
+	 *
+	 * For AArch64 (ARMv8) payloads, natural alignment (8-bytes) is
+	 * required for its sections (so the image we receive needs to
+	 * have the first 4 bytes reserved for the spl_name).  Reserving
+	 * these 4 bytes is done using the BOOT0_HOOK infrastructure.
+	 *
+	 * The header is always at 0x800 (as we now use a payload
+	 * prepadded using the boot0 hook for all targets): the first
+	 * 4 bytes of these images can safely be overwritten using the
+	 * boot magic.
+	 */
+	tparams->header_size = RK_SPL_HDR_START;
+
+	/* Allocate, clear and install the header */
+	tparams->hdr = malloc(tparams->header_size);
+	if (!tparams->hdr)
+		return -ENOMEM;
+	memset(tparams->hdr, 0, tparams->header_size);
+
+	/*
+	 * If someone passed in 0 for the alignment, we'd better handle
+	 * it correctly...
+	 */
+	if (!alignment)
+		alignment = 1;
+
+	unpadded_size = tparams->header_size + params->file_size;
+	padded_size = ROUND(unpadded_size, alignment);
+
+	return padded_size - unpadded_size;
+}
diff --git a/tools/u-boot-tools/rkcommon.h b/tools/u-boot-tools/rkcommon.h
new file mode 100644
index 0000000000000000000000000000000000000000..47f47a52aaf2208cca1cbf91bc1b547cd6f85af8
--- /dev/null
+++ b/tools/u-boot-tools/rkcommon.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2015 Google,  Inc
+ * Written by Simon Glass <sjg@chromium.org>
+ */
+
+#ifndef _RKCOMMON_H
+#define _RKCOMMON_H
+
+enum {
+	RK_BLK_SIZE		= 512,
+	RK_INIT_SIZE_ALIGN      = 2048,
+	RK_INIT_OFFSET		= 4,
+	RK_MAX_BOOT_SIZE	= 512 << 10,
+	RK_SPL_HDR_START	= RK_INIT_OFFSET * RK_BLK_SIZE,
+	RK_SPL_HDR_SIZE		= 4,
+	RK_SPL_START		= RK_SPL_HDR_START + RK_SPL_HDR_SIZE,
+	RK_IMAGE_HEADER_LEN	= RK_SPL_START,
+};
+
+/**
+ * rkcommon_check_params() - check params
+ *
+ * @return 0 if OK, -1 if ERROR.
+ */
+int rkcommon_check_params(struct image_tool_params *params);
+
+/**
+ * rkcommon_get_spl_hdr() - get 4-bytes spl hdr for a Rockchip boot image
+ *
+ * Rockchip's bootrom requires the spl loader to start with a 4-bytes
+ * header. The content of this header depends on the chip type.
+ */
+const char *rkcommon_get_spl_hdr(struct image_tool_params *params);
+
+/**
+ * rkcommon_get_spl_size() - get spl size for a Rockchip boot image
+ *
+ * Different chip may have different sram size. And if we want to jump
+ * back to the bootrom after spl, we may need to reserve some sram space
+ * for the bootrom.
+ * The spl loader size should be sram size minus reserved size(if needed)
+ */
+int rkcommon_get_spl_size(struct image_tool_params *params);
+
+/**
+ * rkcommon_set_header() - set up the header for a Rockchip boot image
+ *
+ * This sets up a 2KB header which can be interpreted by the Rockchip boot ROM.
+ *
+ * @buf:	Pointer to header place (must be at least 2KB in size)
+ * @file_size:	Size of the file we want the boot ROM to load, in bytes
+ * @return 0 if OK, -ENOSPC if too large
+ */
+int rkcommon_set_header(void *buf, uint file_size,
+			struct image_tool_params *params);
+
+/**
+ * rkcommon_verify_header() - verify the header for a Rockchip boot image
+ *
+ * @buf:	Pointer to the image file
+ * @file_size:	Size of entire bootable image file (incl. all padding)
+ * @return 0 if OK
+ */
+int rkcommon_verify_header(unsigned char *buf, int size,
+			   struct image_tool_params *params);
+
+/**
+ * rkcommon_print_header() - print the header for a Rockchip boot image
+ *
+ * This prints the header, spl_name and whether this is a SD/MMC or SPI image.
+ *
+ * @buf:	Pointer to the image (can be a read-only file-mapping)
+ */
+void rkcommon_print_header(const void *buf);
+
+/**
+ * rkcommon_need_rc4_spl() - check if rc4 encoded spl is required
+ *
+ * Some socs cannot disable the rc4-encryption of the spl binary.
+ * rc4 encryption is disabled normally except on socs that cannot
+ * handle unencrypted binaries.
+ * @return true or false depending on rc4 being required.
+ */
+bool rkcommon_need_rc4_spl(struct image_tool_params *params);
+
+/**
+ * rkcommon_rc4_encode_spl() - encode the spl binary
+ *
+ * Encrypts the SPL binary using the generic rc4 key as required
+ * by some socs.
+ *
+ * @buf:	Pointer to the SPL data (header and SPL binary)
+ * @offset:	offset inside buf to start at
+ * @size:	number of bytes to encode
+ */
+void rkcommon_rc4_encode_spl(void *buf, unsigned int offset, unsigned int size);
+
+/**
+ * rkcommon_vrec_header() - allocate memory for the header
+ *
+ * @params:     Pointer to the tool params structure
+ * @tparams:    Pointer tot the image type structure (for setting
+ *              the header and header_size)
+ * @alignment:  Alignment (a power of two) that the image should be
+ *              padded to (e.g. 512 if we want to align with SD/MMC
+ *              blocksizes or 2048 for the SPI format)
+ *
+ * @return bytes of padding required/added (does not include the header_size)
+ */
+int rkcommon_vrec_header(struct image_tool_params *params,
+			 struct image_type_params *tparams,
+			 unsigned int alignment);
+
+#endif
diff --git a/tools/u-boot-tools/rkcommon.o b/tools/u-boot-tools/rkcommon.o
new file mode 100644
index 0000000000000000000000000000000000000000..592ab723c6af71b831bd01bbbb9488ccb1f2f7c7
Binary files /dev/null and b/tools/u-boot-tools/rkcommon.o differ
diff --git a/tools/u-boot-tools/rkimage.c b/tools/u-boot-tools/rkimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..ae50de55c93b1fe32a3072d7ef1403ea5c966bda
--- /dev/null
+++ b/tools/u-boot-tools/rkimage.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2015 Google, Inc
+ * Written by Simon Glass <sjg@chromium.org>
+ *
+ * See README.rockchip for details of the rkimage format
+ */
+
+#include "imagetool.h"
+#include <image.h>
+#include "rkcommon.h"
+
+static uint32_t header;
+
+static void rkimage_set_header(void *buf, struct stat *sbuf, int ifd,
+			       struct image_tool_params *params)
+{
+	memcpy(buf, rkcommon_get_spl_hdr(params), RK_SPL_HDR_SIZE);
+
+	if (rkcommon_need_rc4_spl(params))
+		rkcommon_rc4_encode_spl(buf, 4, params->file_size);
+}
+
+static int rkimage_check_image_type(uint8_t type)
+{
+	if (type == IH_TYPE_RKIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+/*
+ * rk_image parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	rkimage,
+	"Rockchip Boot Image support",
+	0,
+	&header,
+	rkcommon_check_params,
+	NULL,
+	NULL,
+	rkimage_set_header,
+	NULL,
+	rkimage_check_image_type,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/rkimage.o b/tools/u-boot-tools/rkimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..e4b5491911133ff2d1f9c030c00d360c4d08bce4
Binary files /dev/null and b/tools/u-boot-tools/rkimage.o differ
diff --git a/tools/u-boot-tools/rkmux.py b/tools/u-boot-tools/rkmux.py
new file mode 100755
index 0000000000000000000000000000000000000000..11c192a07375d8cf3409ec2e15ec692d35f48e43
--- /dev/null
+++ b/tools/u-boot-tools/rkmux.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python2
+
+# Script to create enums from datasheet register tables
+#
+# Usage:
+#
+# First, create a text file from the datasheet:
+#    pdftotext -layout /path/to/rockchip-3288-trm.pdf /tmp/asc
+#
+# Then use this script to output the #defines for a particular register:
+#    ./tools/rkmux.py GRF_GPIO4C_IOMUX
+#
+# It will create output suitable for putting in a header file, with SHIFT and
+# MASK values for each bitfield in the register.
+#
+# Note: this tool is not perfect and you may need to edit the resulting code.
+# But it should speed up the process.
+
+import csv
+import re
+import sys
+
+tab_to_col = 3
+
+class RegField:
+    def __init__(self, cols=None):
+        if cols:
+            self.bits, self.attr, self.reset_val, self.desc = (
+                [x.strip() for x in cols])
+            self.desc = [self.desc]
+        else:
+            self.bits = ''
+            self.attr = ''
+            self.reset_val = ''
+            self.desc = []
+
+    def Setup(self, cols):
+        self.bits, self.attr, self.reset_val = cols[0:3]
+        if len(cols) > 3:
+            self.desc.append(cols[3])
+
+    def AddDesc(self, desc):
+        self.desc.append(desc)
+
+    def Show(self):
+        print self
+        print
+        self.__init__()
+
+    def __str__(self):
+        return '%s,%s,%s,%s' % (self.bits, self.attr, self.reset_val,
+                                '\n'.join(self.desc))
+
+class Printer:
+    def __init__(self, name):
+        self.first = True
+        self.name = name
+        self.re_sel = re.compile("[1-9]'b([01]+): (.*)")
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        if not self.first:
+            self.output_footer()
+
+    def output_header(self):
+        print '/* %s */' % self.name
+        print 'enum {'
+
+    def output_footer(self):
+        print '};';
+
+    def output_regfield(self, regfield):
+        lines = regfield.desc
+        field = lines[0]
+        #print 'field:', field
+        if field in ['reserved', 'reserve', 'write_enable', 'write_mask']:
+            return
+        if field.endswith('_sel') or field.endswith('_con'):
+            field = field[:-4]
+        elif field.endswith(' iomux'):
+            field = field[:-6]
+        elif field.endswith('_mode') or field.endswith('_mask'):
+            field = field[:-5]
+        #else:
+            #print 'bad field %s' % field
+            #return
+        field = field.upper()
+        if ':' in regfield.bits:
+            bit_high, bit_low = [int(x) for x in regfield.bits.split(':')]
+        else:
+            bit_high = bit_low = int(regfield.bits)
+        bit_width = bit_high - bit_low + 1
+        mask = (1 << bit_width) - 1
+        if self.first:
+            self.first = False
+            self.output_header()
+        else:
+            print
+        out_enum(field, 'shift', bit_low)
+        out_enum(field, 'mask', mask)
+        next_val = -1
+        #print 'lines: %s', lines
+        for line in lines:
+            m = self.re_sel.match(line)
+            if m:
+                val, enum = int(m.group(1), 2), m.group(2)
+                if enum not in ['reserved', 'reserve']:
+                    out_enum(field, enum, val, val == next_val)
+                    next_val = val + 1
+
+
+def process_file(name, fd):
+    field = RegField()
+    reg = ''
+
+    fields = []
+
+    def add_it(field):
+        if field.bits:
+            if reg == name:
+                fields.append(field)
+            field = RegField()
+        return field
+
+    def is_field_start(line):
+       if '=' in line or '+' in line:
+           return False
+       if (line.startswith('gpio') or line.startswith('peri_') or
+                line.endswith('_sel') or line.endswith('_con')):
+           return True
+       if not ' ' in line: # and '_' in line:
+           return True
+       return False
+
+    for line in fd:
+        line = line.rstrip()
+        if line[:4] in ['GRF_', 'PMU_', 'CRU_']:
+            field = add_it(field)
+            reg = line
+            do_this = name == reg
+        elif not line or not line.startswith(' '):
+            continue
+        line = line.replace('\xe2\x80\x99', "'")
+        leading = len(line) - len(line.lstrip())
+        line = line.lstrip()
+        cols = re.split(' *', line, 3)
+        if leading > 15 or (len(cols) > 3 and is_field_start(cols[3])):
+            if is_field_start(line):
+                field = add_it(field)
+            field.AddDesc(line)
+        else:
+            if cols[0] == 'Bit' or len(cols) < 3:
+                continue
+            #print
+            #print field
+            field = add_it(field)
+            field.Setup(cols)
+    field = add_it(field)
+
+    with Printer(name) as printer:
+        for field in fields:
+            #print field
+            printer.output_regfield(field)
+            #print
+
+def out_enum(field, suffix, value, skip_val=False):
+    str = '%s_%s' % (field.upper(), suffix.upper())
+    if not skip_val:
+        tabs = tab_to_col - len(str) / 8
+        if value > 9:
+            val_str = '%#x' % value
+        else:
+            val_str = '%d' % value
+
+        str += '%s= %s' % ('\t' * tabs, val_str)
+    print '\t%s,' % str
+
+# Process a CSV file, e.g. from tabula
+def process_csv(name, fd):
+    reader = csv.reader(fd)
+
+    rows = []
+
+    field = RegField()
+    for row in reader:
+        #print field.desc
+        if not row[0]:
+            field.desc.append(row[3])
+            continue
+        if field.bits:
+            if field.bits != 'Bit':
+                rows.append(field)
+        #print row
+        field = RegField(row)
+
+    with Printer(name) as printer:
+        for row in rows:
+            #print field
+            printer.output_regfield(row)
+            #print
+
+fname = sys.argv[1]
+name = sys.argv[2]
+
+# Read output from pdftotext -layout
+if 1:
+    with open(fname, 'r') as fd:
+        process_file(name, fd)
+
+# Use tabula
+# It seems to be better at outputting text for an entire cell in one cell.
+# But it does not always work. E.g. GRF_GPIO7CH_IOMUX.
+# So there is no point in using it.
+if 0:
+    with open(fname, 'r') as fd:
+        process_csv(name, fd)
diff --git a/tools/u-boot-tools/rksd.c b/tools/u-boot-tools/rksd.c
new file mode 100644
index 0000000000000000000000000000000000000000..24411d863a0aa7ec161bf12d19d1d0374567f344
--- /dev/null
+++ b/tools/u-boot-tools/rksd.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2015 Google,  Inc
+ * Written by Simon Glass <sjg@chromium.org>
+ *
+ * See README.rockchip for details of the rksd format
+ */
+
+#include "imagetool.h"
+#include <image.h>
+#include <rc4.h>
+#include "mkimage.h"
+#include "rkcommon.h"
+
+static void rksd_set_header(void *buf,  struct stat *sbuf,  int ifd,
+			    struct image_tool_params *params)
+{
+	unsigned int size;
+	int ret;
+
+	/*
+	 * We need to calculate this using 'RK_SPL_HDR_START' and not using
+	 * 'tparams->header_size', as the additional byte inserted when
+	 * 'is_boot0' is true counts towards the payload (and not towards the
+	 * header).
+	 */
+	size = params->file_size - RK_SPL_HDR_START;
+	ret = rkcommon_set_header(buf, size, params);
+	if (ret) {
+		/* TODO(sjg@chromium.org): This method should return an error */
+		printf("Warning: SPL image is too large (size %#x) and will "
+		       "not boot\n", size);
+	}
+}
+
+static int rksd_check_image_type(uint8_t type)
+{
+	if (type == IH_TYPE_RKSD)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int rksd_vrec_header(struct image_tool_params *params,
+			    struct image_type_params *tparams)
+{
+	/*
+	 * Pad to a 2KB alignment, as required for init_size by the ROM
+	 * (see https://lists.denx.de/pipermail/u-boot/2017-May/293268.html)
+	 */
+	return rkcommon_vrec_header(params, tparams, RK_INIT_SIZE_ALIGN);
+}
+
+/*
+ * rk_sd parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	rksd,
+	"Rockchip SD Boot Image support",
+	0,
+	NULL,
+	rkcommon_check_params,
+	rkcommon_verify_header,
+	rkcommon_print_header,
+	rksd_set_header,
+	NULL,
+	rksd_check_image_type,
+	NULL,
+	rksd_vrec_header
+);
diff --git a/tools/u-boot-tools/rksd.o b/tools/u-boot-tools/rksd.o
new file mode 100644
index 0000000000000000000000000000000000000000..516932dabc2e55e2513ce68235e2bb8b5fd3af4f
Binary files /dev/null and b/tools/u-boot-tools/rksd.o differ
diff --git a/tools/u-boot-tools/rkspi.c b/tools/u-boot-tools/rkspi.c
new file mode 100644
index 0000000000000000000000000000000000000000..faa18fcd183c3de3fc464b9cfc205c6124d80bea
--- /dev/null
+++ b/tools/u-boot-tools/rkspi.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2015 Google,  Inc
+ * Written by Simon Glass <sjg@chromium.org>
+ *
+ * See README.rockchip for details of the rkspi format
+ */
+
+#include "imagetool.h"
+#include <image.h>
+#include <rc4.h>
+#include "mkimage.h"
+#include "rkcommon.h"
+
+enum {
+	RKSPI_SECT_LEN		= RK_BLK_SIZE * 4,
+};
+
+static void rkspi_set_header(void *buf, struct stat *sbuf, int ifd,
+			     struct image_tool_params *params)
+{
+	int sector;
+	unsigned int size;
+	int ret;
+
+	size = params->orig_file_size;
+	ret = rkcommon_set_header(buf, size, params);
+	debug("size %x\n", size);
+	if (ret) {
+		/* TODO(sjg@chromium.org): This method should return an error */
+		printf("Warning: SPL image is too large (size %#x) and will "
+		       "not boot\n", size);
+	}
+
+	/*
+	 * Spread the image out so we only use the first 2KB of each 4KB
+	 * region. This is a feature of the SPI format required by the Rockchip
+	 * boot ROM. Its rationale is unknown.
+	 */
+	for (sector = size / RKSPI_SECT_LEN - 1; sector >= 0; sector--) {
+		debug("sector %u\n", sector);
+		memmove(buf + sector * RKSPI_SECT_LEN * 2,
+			buf + sector * RKSPI_SECT_LEN,
+			RKSPI_SECT_LEN);
+		memset(buf + sector * RKSPI_SECT_LEN * 2 + RKSPI_SECT_LEN,
+		       '\0', RKSPI_SECT_LEN);
+	}
+}
+
+static int rkspi_check_image_type(uint8_t type)
+{
+	if (type == IH_TYPE_RKSPI)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+/*
+ * The SPI payload needs to be padded out to make space for odd half-sector
+ * layout used in flash (i.e. only the first 2K of each 4K sector is used).
+ */
+static int rkspi_vrec_header(struct image_tool_params *params,
+			     struct image_type_params *tparams)
+{
+	int padding = rkcommon_vrec_header(params, tparams, RK_INIT_SIZE_ALIGN);
+	/*
+	 * The file size has not been adjusted at this point (our caller will
+	 * eventually add the header/padding to the file_size), so we need to
+	 * add up the header_size, file_size and padding ourselves.
+	 */
+	int padded_size = tparams->header_size + params->file_size + padding;
+
+	/*
+	 * We need to store the original file-size (i.e. before padding), as
+	 * imagetool does not set this during its adjustment of file_size.
+	 */
+	params->orig_file_size = padded_size;
+
+	/*
+	 * Converting to the SPI format (i.e. splitting each 4K page into two
+	 * 2K subpages and then padding these 2K pages up to take a complete
+	 * 4K sector again) will will double the image size.
+	 *
+	 * Thus we return the padded_size as an additional padding requirement
+	 * (be sure to add this to the padding returned from the common code).
+	 */
+	return padded_size + padding;
+}
+
+/*
+ * rk_spi parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	rkspi,
+	"Rockchip SPI Boot Image support",
+	0,
+	NULL,
+	rkcommon_check_params,
+	rkcommon_verify_header,
+	rkcommon_print_header,
+	rkspi_set_header,
+	NULL,
+	rkspi_check_image_type,
+	NULL,
+	rkspi_vrec_header
+);
diff --git a/tools/u-boot-tools/rkspi.o b/tools/u-boot-tools/rkspi.o
new file mode 100644
index 0000000000000000000000000000000000000000..2ba893f8f3c1e89bebaed12cfd870904c0dda609
Binary files /dev/null and b/tools/u-boot-tools/rkspi.o differ
diff --git a/tools/u-boot-tools/scripts/define2mk.sed b/tools/u-boot-tools/scripts/define2mk.sed
new file mode 100644
index 0000000000000000000000000000000000000000..0f00285f367e4c1d429aee1f8e8b069a054c2280
--- /dev/null
+++ b/tools/u-boot-tools/scripts/define2mk.sed
@@ -0,0 +1,37 @@
+#
+# Sed script to parse CPP macros and generate output usable by make
+#
+# It is expected that this script is fed the output of 'gpp -dM'
+# which preprocesses the common.h header files and outputs the final
+# list of CPP macros (and whitespace is sanitized)
+#
+
+# Only process values prefixed with #define CONFIG_
+/^#define CONFIG_[A-Za-z0-9_][A-Za-z0-9_]*/ {
+	# Strip the #define prefix
+	s/#define *//;
+	# Change to form CONFIG_*=VALUE
+	s/  */=/;
+	# Drop trailing spaces
+	s/ *$//;
+	# drop quotes around string values
+	s/="\(.*\)"$/=\1/;
+	# Concatenate string values
+	s/" *"//g;
+	# Assume strings as default - add quotes around values
+	s/=\(..*\)/="\1"/;
+	# but remove again from decimal numbers
+	s/="\([0-9][0-9]*\)"/=\1/;
+	# ... and from negative decimal numbers
+	s/="\(-[1-9][0-9]*\)"/=\1/;
+	# ... and from hex numbers
+	s/="\(0[Xx][0-9a-fA-F][0-9a-fA-F]*\)"/=\1/;
+	# ... and from configs defined from other configs
+	s/="\(CONFIG_[A-Za-z0-9_][A-Za-z0-9_]*\)"/=$(\1)/;
+	# Change '1' and empty values to "y" (not perfect, but
+	# supports conditional compilation in the makefiles
+	s/=$/=y/;
+	s/=1$/=y/;
+	# print the line
+	p
+}
diff --git a/tools/u-boot-tools/socfpgaimage.c b/tools/u-boot-tools/socfpgaimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..72d8b96f54140c535276040890d5dfc2d05997cd
--- /dev/null
+++ b/tools/u-boot-tools/socfpgaimage.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014 Charles Manning <cdhmanning@gmail.com>
+ *
+ * Reference documents:
+ *   Cyclone V SoC: https://www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/hb/cyclone-v/cv_5400a.pdf
+ *   Arria V SoC:   https://www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/hb/arria-v/av_5400a.pdf
+ *   Arria 10 SoC:  https://www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/hb/arria-10/a10_5400a.pdf
+ *
+ * Bootable SoCFPGA image requires a structure of the following format
+ * positioned at offset 0x40 of the bootable image. Endian is LSB.
+ *
+ * There are two versions of the SoCFPGA header format, v0 and v1.
+ * The version 0 is used by Cyclone V SoC and Arria V SoC, while
+ * the version 1 is used by the Arria 10 SoC.
+ *
+ * Version 0:
+ * Offset   Length   Usage
+ * -----------------------
+ *   0x40        4   Validation word (0x31305341)
+ *   0x44        1   Version (0x0)
+ *   0x45        1   Flags (unused, zero is fine)
+ *   0x46        2   Length (in units of u32, including the end checksum).
+ *   0x48        2   Zero (0x0)
+ *   0x4A        2   Checksum over the header. NB Not CRC32
+ *
+ * Version 1:
+ * Offset   Length   Usage
+ * -----------------------
+ *   0x40        4   Validation word (0x31305341)
+ *   0x44        1   Version (0x1)
+ *   0x45        1   Flags (unused, zero is fine)
+ *   0x46        2   Header length (in units of u8).
+ *   0x48        4   Length (in units of u8).
+ *   0x4C        4   Image entry offset from standard of header
+ *   0x50        2   Zero (0x0)
+ *   0x52        2   Checksum over the header. NB Not CRC32
+ *
+ * At the end of the code we have a 32-bit CRC checksum over whole binary
+ * excluding the CRC.
+ *
+ * Note that the CRC used here is **not** the zlib/Adler crc32. It is the
+ * CRC-32 used in bzip2, ethernet and elsewhere.
+ *
+ * The Image entry offset in version 1 image is relative the the start of
+ * the header, 0x40, and must not be a negative number. Therefore, it is
+ * only possible to make the SoCFPGA jump forward. The U-Boot bootloader
+ * places a trampoline instruction at offset 0x5c, 0x14 bytes from the
+ * start of the SoCFPGA header, which jumps to the reset vector.
+ *
+ * The image is padded out to 64k, because that is what is
+ * typically used to write the image to the boot medium.
+ */
+
+#include "pbl_crc32.h"
+#include "imagetool.h"
+#include "mkimage.h"
+
+#include <image.h>
+
+#define HEADER_OFFSET	0x40
+#define VALIDATION_WORD	0x31305341
+
+static uint8_t buffer_v0[0x10000];
+static uint8_t buffer_v1[0x40000];
+
+struct socfpga_header_v0 {
+	uint32_t	validation;
+	uint8_t		version;
+	uint8_t		flags;
+	uint16_t	length_u32;
+	uint16_t	zero;
+	uint16_t	checksum;
+};
+
+struct socfpga_header_v1 {
+	uint32_t	validation;
+	uint8_t		version;
+	uint8_t		flags;
+	uint16_t	header_u8;
+	uint32_t	length_u8;
+	uint32_t	entry_offset;
+	uint16_t	zero;
+	uint16_t	checksum;
+};
+
+static unsigned int sfp_hdr_size(uint8_t ver)
+{
+	if (ver == 0)
+		return sizeof(struct socfpga_header_v0);
+	if (ver == 1)
+		return sizeof(struct socfpga_header_v1);
+	return 0;
+}
+
+static unsigned int sfp_pad_size(uint8_t ver)
+{
+	if (ver == 0)
+		return sizeof(buffer_v0);
+	if (ver == 1)
+		return sizeof(buffer_v1);
+	return 0;
+}
+
+/*
+ * The header checksum is just a very simple checksum over
+ * the header area.
+ * There is still a crc32 over the whole lot.
+ */
+static uint16_t sfp_hdr_checksum(uint8_t *buf, unsigned char ver)
+{
+	uint16_t ret = 0;
+	int len = sfp_hdr_size(ver) - sizeof(ret);
+
+	while (--len)
+		ret += *buf++;
+
+	return ret;
+}
+
+static void sfp_build_header(uint8_t *buf, uint8_t ver, uint8_t flags,
+			     uint32_t length_bytes)
+{
+	struct socfpga_header_v0 header_v0 = {
+		.validation	= cpu_to_le32(VALIDATION_WORD),
+		.version	= 0,
+		.flags		= flags,
+		.length_u32	= cpu_to_le16(length_bytes / 4),
+		.zero		= 0,
+	};
+
+	struct socfpga_header_v1 header_v1 = {
+		.validation	= cpu_to_le32(VALIDATION_WORD),
+		.version	= 1,
+		.flags		= flags,
+		.header_u8	= cpu_to_le16(sizeof(header_v1)),
+		.length_u8	= cpu_to_le32(length_bytes),
+		.entry_offset	= cpu_to_le32(0x14),	/* Trampoline offset */
+		.zero		= 0,
+	};
+
+	uint16_t csum;
+
+	if (ver == 0) {
+		csum = sfp_hdr_checksum((uint8_t *)&header_v0, 0);
+		header_v0.checksum = cpu_to_le16(csum);
+		memcpy(buf, &header_v0, sizeof(header_v0));
+	} else {
+		csum = sfp_hdr_checksum((uint8_t *)&header_v1, 1);
+		header_v1.checksum = cpu_to_le16(csum);
+		memcpy(buf, &header_v1, sizeof(header_v1));
+	}
+}
+
+/*
+ * Perform a rudimentary verification of header and return
+ * size of image.
+ */
+static int sfp_verify_header(const uint8_t *buf, uint8_t *ver)
+{
+	struct socfpga_header_v0 header_v0;
+	struct socfpga_header_v1 header_v1;
+	uint16_t hdr_csum, sfp_csum;
+	uint32_t img_len;
+
+	/*
+	 * Header v0 is always smaller than Header v1 and the validation
+	 * word and version field is at the same place, so use Header v0
+	 * to check for version during verifiction and upgrade to Header
+	 * v1 if needed.
+	 */
+	memcpy(&header_v0, buf, sizeof(header_v0));
+
+	if (le32_to_cpu(header_v0.validation) != VALIDATION_WORD)
+		return -1;
+
+	if (header_v0.version == 0) {
+		hdr_csum = le16_to_cpu(header_v0.checksum);
+		sfp_csum = sfp_hdr_checksum((uint8_t *)&header_v0, 0);
+		img_len = le16_to_cpu(header_v0.length_u32) * 4;
+	} else if (header_v0.version == 1) {
+		memcpy(&header_v1, buf, sizeof(header_v1));
+		hdr_csum = le16_to_cpu(header_v1.checksum);
+		sfp_csum = sfp_hdr_checksum((uint8_t *)&header_v1, 1);
+		img_len = le32_to_cpu(header_v1.length_u8);
+	} else {	/* Invalid version */
+		return -EINVAL;
+	}
+
+	/* Verify checksum */
+	if (hdr_csum != sfp_csum)
+		return -EINVAL;
+
+	*ver = header_v0.version;
+	return img_len;
+}
+
+/* Sign the buffer and return the signed buffer size */
+static int sfp_sign_buffer(uint8_t *buf, uint8_t ver, uint8_t flags,
+			   int len, int pad_64k)
+{
+	uint32_t calc_crc;
+
+	/* Align the length up */
+	len = (len + 3) & ~3;
+
+	/* Build header, adding 4 bytes to length to hold the CRC32. */
+	sfp_build_header(buf + HEADER_OFFSET, ver, flags, len + 4);
+
+	/* Calculate and apply the CRC */
+	calc_crc = ~pbl_crc32(0, (char *)buf, len);
+
+	*((uint32_t *)(buf + len)) = cpu_to_le32(calc_crc);
+
+	if (!pad_64k)
+		return len + 4;
+
+	return sfp_pad_size(ver);
+}
+
+/* Verify that the buffer looks sane */
+static int sfp_verify_buffer(const uint8_t *buf)
+{
+	int len; /* Including 32bit CRC */
+	uint32_t calc_crc;
+	uint32_t buf_crc;
+	uint8_t ver = 0;
+
+	len = sfp_verify_header(buf + HEADER_OFFSET, &ver);
+	if (len < 0) {
+		debug("Invalid header\n");
+		return -1;
+	}
+
+	if (len < HEADER_OFFSET || len > sfp_pad_size(ver)) {
+		debug("Invalid header length (%i)\n", len);
+		return -1;
+	}
+
+	/*
+	 * Adjust length to the base of the CRC.
+	 * Check the CRC.
+	*/
+	len -= 4;
+
+	calc_crc = ~pbl_crc32(0, (const char *)buf, len);
+
+	buf_crc = le32_to_cpu(*((uint32_t *)(buf + len)));
+
+	if (buf_crc != calc_crc) {
+		fprintf(stderr, "CRC32 does not match (%08x != %08x)\n",
+			buf_crc, calc_crc);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* mkimage glue functions */
+static int socfpgaimage_verify_header(unsigned char *ptr, int image_size,
+				      struct image_tool_params *params)
+{
+	if (image_size < 0x80)
+		return -1;
+
+	return sfp_verify_buffer(ptr);
+}
+
+static void socfpgaimage_print_header(const void *ptr)
+{
+	if (sfp_verify_buffer(ptr) == 0)
+		printf("Looks like a sane SOCFPGA preloader\n");
+	else
+		printf("Not a sane SOCFPGA preloader\n");
+}
+
+static int socfpgaimage_check_params(struct image_tool_params *params)
+{
+	/* Not sure if we should be accepting fflags */
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag));
+}
+
+static int socfpgaimage_check_image_types_v0(uint8_t type)
+{
+	if (type == IH_TYPE_SOCFPGAIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static int socfpgaimage_check_image_types_v1(uint8_t type)
+{
+	if (type == IH_TYPE_SOCFPGAIMAGE_V1)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+/*
+ * To work in with the mkimage framework, we do some ugly stuff...
+ *
+ * First, socfpgaimage_vrec_header() is called.
+ * We prepend a fake header big enough to make the file sfp_pad_size().
+ * This gives us enough space to do what we want later.
+ *
+ * Next, socfpgaimage_set_header() is called.
+ * We fix up the buffer by moving the image to the start of the buffer.
+ * We now have some room to do what we need (add CRC and padding).
+ */
+
+static int data_size;
+
+static int sfp_fake_header_size(unsigned int size, uint8_t ver)
+{
+	return sfp_pad_size(ver) - size;
+}
+
+static int sfp_vrec_header(struct image_tool_params *params,
+			   struct image_type_params *tparams, uint8_t ver)
+{
+	struct stat sbuf;
+
+	if (params->datafile &&
+	    stat(params->datafile, &sbuf) == 0 &&
+	    sbuf.st_size <= (sfp_pad_size(ver) - sizeof(uint32_t))) {
+		data_size = sbuf.st_size;
+		tparams->header_size = sfp_fake_header_size(data_size, ver);
+	}
+	return 0;
+
+}
+
+static int socfpgaimage_vrec_header_v0(struct image_tool_params *params,
+				       struct image_type_params *tparams)
+{
+	return sfp_vrec_header(params, tparams, 0);
+}
+
+static int socfpgaimage_vrec_header_v1(struct image_tool_params *params,
+				       struct image_type_params *tparams)
+{
+	return sfp_vrec_header(params, tparams, 1);
+}
+
+static void sfp_set_header(void *ptr, unsigned char ver)
+{
+	uint8_t *buf = (uint8_t *)ptr;
+
+	/*
+	 * This function is called after vrec_header() has been called.
+	 * At this stage we have the sfp_fake_header_size() dummy bytes
+	 * followed by data_size image bytes. Total = sfp_pad_size().
+	 * We need to fix the buffer by moving the image bytes back to
+	 * the beginning of the buffer, then actually do the signing stuff...
+	 */
+	memmove(buf, buf + sfp_fake_header_size(data_size, ver), data_size);
+	memset(buf + data_size, 0, sfp_fake_header_size(data_size, ver));
+
+	sfp_sign_buffer(buf, ver, 0, data_size, 0);
+}
+
+static void socfpgaimage_set_header_v0(void *ptr, struct stat *sbuf, int ifd,
+				       struct image_tool_params *params)
+{
+	sfp_set_header(ptr, 0);
+}
+
+static void socfpgaimage_set_header_v1(void *ptr, struct stat *sbuf, int ifd,
+				       struct image_tool_params *params)
+{
+	sfp_set_header(ptr, 1);
+}
+
+U_BOOT_IMAGE_TYPE(
+	socfpgaimage,
+	"Altera SoCFPGA Cyclone V / Arria V image support",
+	0, /* This will be modified by vrec_header() */
+	(void *)buffer_v0,
+	socfpgaimage_check_params,
+	socfpgaimage_verify_header,
+	socfpgaimage_print_header,
+	socfpgaimage_set_header_v0,
+	NULL,
+	socfpgaimage_check_image_types_v0,
+	NULL,
+	socfpgaimage_vrec_header_v0
+);
+
+U_BOOT_IMAGE_TYPE(
+	socfpgaimage_v1,
+	"Altera SoCFPGA Arria10 image support",
+	0, /* This will be modified by vrec_header() */
+	(void *)buffer_v1,
+	socfpgaimage_check_params,
+	socfpgaimage_verify_header,
+	socfpgaimage_print_header,
+	socfpgaimage_set_header_v1,
+	NULL,
+	socfpgaimage_check_image_types_v1,
+	NULL,
+	socfpgaimage_vrec_header_v1
+);
diff --git a/tools/u-boot-tools/socfpgaimage.o b/tools/u-boot-tools/socfpgaimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..5880dee28208c92b6cc257bcb79e793f398f6caa
Binary files /dev/null and b/tools/u-boot-tools/socfpgaimage.o differ
diff --git a/tools/u-boot-tools/stm32image.c b/tools/u-boot-tools/stm32image.c
new file mode 100644
index 0000000000000000000000000000000000000000..08b32ba87dd512a1f5377d24d679dd97731427cd
--- /dev/null
+++ b/tools/u-boot-tools/stm32image.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (C) 2018, STMicroelectronics - All Rights Reserved
+ */
+
+#include <image.h>
+#include "imagetool.h"
+
+/* magic ='S' 'T' 'M' 0x32 */
+#define HEADER_MAGIC be32_to_cpu(0x53544D32)
+#define VER_MAJOR_IDX	2
+#define VER_MINOR_IDX	1
+#define VER_VARIANT_IDX	0
+#define HEADER_VERSION_V1	0x1
+/* default option : bit0 => no signature */
+#define HEADER_DEFAULT_OPTION	(cpu_to_le32(0x00000001))
+
+struct stm32_header {
+	uint32_t magic_number;
+	uint32_t image_signature[64 / 4];
+	uint32_t image_checksum;
+	uint8_t  header_version[4];
+	uint32_t image_length;
+	uint32_t image_entry_point;
+	uint32_t reserved1;
+	uint32_t load_address;
+	uint32_t reserved2;
+	uint32_t version_number;
+	uint32_t option_flags;
+	uint32_t ecdsa_algorithm;
+	uint32_t ecdsa_public_key[64 / 4];
+	uint32_t padding[84 / 4];
+};
+
+static struct stm32_header stm32image_header;
+
+static void stm32image_default_header(struct stm32_header *ptr)
+{
+	if (!ptr)
+		return;
+
+	ptr->magic_number = HEADER_MAGIC;
+	ptr->header_version[VER_MAJOR_IDX] = HEADER_VERSION_V1;
+	ptr->option_flags = HEADER_DEFAULT_OPTION;
+	ptr->ecdsa_algorithm = 1;
+}
+
+static uint32_t stm32image_checksum(void *start, uint32_t len)
+{
+	uint32_t csum = 0;
+	uint32_t hdr_len = sizeof(struct stm32_header);
+	uint8_t *p;
+
+	if (len < hdr_len)
+		return 0;
+
+	p = start + hdr_len;
+	len -= hdr_len;
+
+	while (len > 0) {
+		csum += *p;
+		p++;
+		len--;
+	}
+
+	return csum;
+}
+
+static int stm32image_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_STM32IMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static int stm32image_verify_header(unsigned char *ptr, int image_size,
+				    struct image_tool_params *params)
+{
+	struct stm32_header *stm32hdr = (struct stm32_header *)ptr;
+	int i;
+
+	if (image_size < sizeof(struct stm32_header))
+		return -1;
+	if (stm32hdr->magic_number != HEADER_MAGIC)
+		return -1;
+	if (stm32hdr->header_version[VER_MAJOR_IDX] != HEADER_VERSION_V1)
+		return -1;
+	if (stm32hdr->reserved1 || stm32hdr->reserved2)
+		return -1;
+	for (i = 0; i < (sizeof(stm32hdr->padding) / 4); i++) {
+		if (stm32hdr->padding[i] != 0)
+			return -1;
+	}
+
+	return 0;
+}
+
+static void stm32image_print_header(const void *ptr)
+{
+	struct stm32_header *stm32hdr = (struct stm32_header *)ptr;
+
+	printf("Image Type   : STMicroelectronics STM32 V%d.%d\n",
+	       stm32hdr->header_version[VER_MAJOR_IDX],
+	       stm32hdr->header_version[VER_MINOR_IDX]);
+	printf("Image Size   : %lu bytes\n",
+	       (unsigned long)le32_to_cpu(stm32hdr->image_length));
+	printf("Image Load   : 0x%08x\n",
+	       le32_to_cpu(stm32hdr->load_address));
+	printf("Entry Point  : 0x%08x\n",
+	       le32_to_cpu(stm32hdr->image_entry_point));
+	printf("Checksum     : 0x%08x\n",
+	       le32_to_cpu(stm32hdr->image_checksum));
+	printf("Option     : 0x%08x\n",
+	       le32_to_cpu(stm32hdr->option_flags));
+}
+
+static void stm32image_set_header(void *ptr, struct stat *sbuf, int ifd,
+				  struct image_tool_params *params)
+{
+	struct stm32_header *stm32hdr = (struct stm32_header *)ptr;
+
+	stm32image_default_header(stm32hdr);
+
+	stm32hdr->load_address = cpu_to_le32(params->addr);
+	stm32hdr->image_entry_point = cpu_to_le32(params->ep);
+	stm32hdr->image_length = cpu_to_le32((uint32_t)sbuf->st_size -
+					     sizeof(struct stm32_header));
+	stm32hdr->image_checksum = stm32image_checksum(ptr, sbuf->st_size);
+}
+
+/*
+ * stm32image parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	stm32image,
+	"STMicroelectronics STM32MP Image support",
+	sizeof(struct stm32_header),
+	(void *)&stm32image_header,
+	NULL,
+	stm32image_verify_header,
+	stm32image_print_header,
+	stm32image_set_header,
+	NULL,
+	stm32image_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/stm32image.o b/tools/u-boot-tools/stm32image.o
new file mode 100644
index 0000000000000000000000000000000000000000..161756c496f554575475eb4d525b7ac03abfcd9d
Binary files /dev/null and b/tools/u-boot-tools/stm32image.o differ
diff --git a/tools/u-boot-tools/sunxi-spl-image-builder.c b/tools/u-boot-tools/sunxi-spl-image-builder.c
new file mode 100644
index 0000000000000000000000000000000000000000..a367f117740386c3d37028ad93f2b725caa9420a
--- /dev/null
+++ b/tools/u-boot-tools/sunxi-spl-image-builder.c
@@ -0,0 +1,484 @@
+/*
+ * Allwinner NAND randomizer and image builder implementation:
+ *
+ * Copyright © 2016 NextThing Co.
+ * Copyright © 2016 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ */
+
+#include <linux/bch.h>
+
+#include <getopt.h>
+#include <version.h>
+
+#define BCH_PRIMITIVE_POLY	0x5803
+
+#define ARRAY_SIZE(arr)		(sizeof(arr) / sizeof((arr)[0]))
+#define DIV_ROUND_UP(n,d)	(((n) + (d) - 1) / (d))
+
+struct image_info {
+	int ecc_strength;
+	int ecc_step_size;
+	int page_size;
+	int oob_size;
+	int usable_page_size;
+	int eraseblock_size;
+	int scramble;
+	int boot0;
+	off_t offset;
+	const char *source;
+	const char *dest;
+};
+
+static void swap_bits(uint8_t *buf, int len)
+{
+	int i, j;
+
+	for (j = 0; j < len; j++) {
+		uint8_t byte = buf[j];
+
+		buf[j] = 0;
+		for (i = 0; i < 8; i++) {
+			if (byte & (1 << i))
+				buf[j] |= (1 << (7 - i));
+		}
+	}
+}
+
+static uint16_t lfsr_step(uint16_t state, int count)
+{
+	state &= 0x7fff;
+	while (count--)
+		state = ((state >> 1) |
+			 ((((state >> 0) ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+	return state;
+}
+
+static uint16_t default_scrambler_seeds[] = {
+	0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+	0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+	0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+	0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+	0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+	0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+	0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+	0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+	0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+	0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+	0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+	0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+	0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+	0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+	0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+	0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+static uint16_t brom_scrambler_seeds[] = { 0x4a80 };
+
+static void scramble(const struct image_info *info,
+		     int page, uint8_t *data, int datalen)
+{
+	uint16_t state;
+	int i;
+
+	/* Boot0 is always scrambled no matter the command line option. */
+	if (info->boot0) {
+		state = brom_scrambler_seeds[0];
+	} else {
+		unsigned seedmod = info->eraseblock_size / info->page_size;
+
+		/* Bail out earlier if the user didn't ask for scrambling. */
+		if (!info->scramble)
+			return;
+
+		if (seedmod > ARRAY_SIZE(default_scrambler_seeds))
+			seedmod = ARRAY_SIZE(default_scrambler_seeds);
+
+		state = default_scrambler_seeds[page % seedmod];
+	}
+
+	/* Prepare the initial state... */
+	state = lfsr_step(state, 15);
+
+	/* and start scrambling data. */
+	for (i = 0; i < datalen; i++) {
+		data[i] ^= state;
+		state = lfsr_step(state, 8);
+	}
+}
+
+static int write_page(const struct image_info *info, uint8_t *buffer,
+		      FILE *src, FILE *rnd, FILE *dst,
+		      struct bch_control *bch, int page)
+{
+	int steps = info->usable_page_size / info->ecc_step_size;
+	int eccbytes = DIV_ROUND_UP(info->ecc_strength * 14, 8);
+	off_t pos = ftell(dst);
+	size_t pad, cnt;
+	int i;
+
+	if (eccbytes % 2)
+		eccbytes++;
+
+	memset(buffer, 0xff, info->page_size + info->oob_size);
+	cnt = fread(buffer, 1, info->usable_page_size, src);
+	if (!cnt) {
+		if (!feof(src)) {
+			fprintf(stderr,
+				"Failed to read data from the source\n");
+			return -1;
+		} else {
+			return 0;
+		}
+	}
+
+	fwrite(buffer, info->page_size + info->oob_size, 1, dst);
+
+	for (i = 0; i < info->usable_page_size; i++) {
+		if (buffer[i] !=  0xff)
+			break;
+	}
+
+	/* We leave empty pages at 0xff. */
+	if (i == info->usable_page_size)
+		return 0;
+
+	/* Restore the source pointer to read it again. */
+	fseek(src, -cnt, SEEK_CUR);
+
+	/* Randomize unused space if scrambling is required. */
+	if (info->scramble) {
+		int offs;
+
+		if (info->boot0) {
+			size_t ret;
+
+			offs = steps * (info->ecc_step_size + eccbytes + 4);
+			cnt = info->page_size + info->oob_size - offs;
+			ret = fread(buffer + offs, 1, cnt, rnd);
+			if (!ret && !feof(rnd)) {
+				fprintf(stderr,
+					"Failed to read random data\n");
+				return -1;
+			}
+		} else {
+			offs = info->page_size + (steps * (eccbytes + 4));
+			cnt = info->page_size + info->oob_size - offs;
+			memset(buffer + offs, 0xff, cnt);
+			scramble(info, page, buffer + offs, cnt);
+		}
+		fseek(dst, pos + offs, SEEK_SET);
+		fwrite(buffer + offs, cnt, 1, dst);
+	}
+
+	for (i = 0; i < steps; i++) {
+		int ecc_offs, data_offs;
+		uint8_t *ecc;
+
+		memset(buffer, 0xff, info->ecc_step_size + eccbytes + 4);
+		ecc = buffer + info->ecc_step_size + 4;
+		if (info->boot0) {
+			data_offs = i * (info->ecc_step_size + eccbytes + 4);
+			ecc_offs = data_offs + info->ecc_step_size + 4;
+		} else {
+			data_offs = i * info->ecc_step_size;
+			ecc_offs = info->page_size + 4 + (i * (eccbytes + 4));
+		}
+
+		cnt = fread(buffer, 1, info->ecc_step_size, src);
+		if (!cnt && !feof(src)) {
+			fprintf(stderr,
+				"Failed to read data from the source\n");
+			return -1;
+		}
+
+		pad = info->ecc_step_size - cnt;
+		if (pad) {
+			if (info->scramble && info->boot0) {
+				size_t ret;
+
+				ret = fread(buffer + cnt, 1, pad, rnd);
+				if (!ret && !feof(rnd)) {
+					fprintf(stderr,
+						"Failed to read random data\n");
+					return -1;
+				}
+			} else {
+				memset(buffer + cnt, 0xff, pad);
+			}
+		}
+
+		memset(ecc, 0, eccbytes);
+		swap_bits(buffer, info->ecc_step_size + 4);
+		encode_bch(bch, buffer, info->ecc_step_size + 4, ecc);
+		swap_bits(buffer, info->ecc_step_size + 4);
+		swap_bits(ecc, eccbytes);
+		scramble(info, page, buffer, info->ecc_step_size + 4 + eccbytes);
+
+		fseek(dst, pos + data_offs, SEEK_SET);
+		fwrite(buffer, info->ecc_step_size, 1, dst);
+		fseek(dst, pos + ecc_offs - 4, SEEK_SET);
+		fwrite(ecc - 4, eccbytes + 4, 1, dst);
+	}
+
+	/* Fix BBM. */
+	fseek(dst, pos + info->page_size, SEEK_SET);
+	memset(buffer, 0xff, 2);
+	fwrite(buffer, 2, 1, dst);
+
+	/* Make dst pointer point to the next page. */
+	fseek(dst, pos + info->page_size + info->oob_size, SEEK_SET);
+
+	return 0;
+}
+
+static int create_image(const struct image_info *info)
+{
+	off_t page = info->offset / info->page_size;
+	struct bch_control *bch;
+	FILE *src, *dst, *rnd;
+	uint8_t *buffer;
+
+	bch = init_bch(14, info->ecc_strength, BCH_PRIMITIVE_POLY);
+	if (!bch) {
+		fprintf(stderr, "Failed to init the BCH engine\n");
+		return -1;
+	}
+
+	buffer = malloc(info->page_size + info->oob_size);
+	if (!buffer) {
+		fprintf(stderr, "Failed to allocate the NAND page buffer\n");
+		return -1;
+	}
+
+	memset(buffer, 0xff, info->page_size + info->oob_size);
+
+	src = fopen(info->source, "r");
+	if (!src) {
+		fprintf(stderr, "Failed to open source file (%s)\n",
+			info->source);
+		return -1;
+	}
+
+	dst = fopen(info->dest, "w");
+	if (!dst) {
+		fprintf(stderr, "Failed to open dest file (%s)\n", info->dest);
+		return -1;
+	}
+
+	rnd = fopen("/dev/urandom", "r");
+	if (!rnd) {
+		fprintf(stderr, "Failed to open /dev/urandom\n");
+		return -1;
+	}
+
+	while (!feof(src)) {
+		int ret;
+
+		ret = write_page(info, buffer, src, rnd, dst, bch, page++);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void display_help(int status)
+{
+	fprintf(status == EXIT_SUCCESS ? stdout : stderr,
+		"sunxi-nand-image-builder %s\n"
+		"\n"
+		"Usage: sunxi-nand-image-builder [OPTIONS] source-image output-image\n"
+		"\n"
+		"Creates a raw NAND image that can be read by the sunxi NAND controller.\n"
+		"\n"
+		"-h               --help               Display this help and exit\n"
+		"-c <str>/<step>  --ecc=<str>/<step>   ECC config (strength/step-size)\n"
+		"-p <size>        --page=<size>        Page size\n"
+		"-o <size>        --oob=<size>         OOB size\n"
+		"-u <size>        --usable=<size>      Usable page size\n"
+		"-e <size>        --eraseblock=<size>  Erase block size\n"
+		"-b               --boot0              Build a boot0 image.\n"
+		"-s               --scramble           Scramble data\n"
+		"-a <offset>      --address=<offset>   Where the image will be programmed.\n"
+		"\n"
+		"Notes:\n"
+		"All the information you need to pass to this tool should be part of\n"
+		"the NAND datasheet.\n"
+		"\n"
+		"The NAND controller only supports the following ECC configs\n"
+		"  Valid ECC strengths: 16, 24, 28, 32, 40, 48, 56, 60 and 64\n"
+		"  Valid ECC step size: 512 and 1024\n"
+		"\n"
+		"If you are building a boot0 image, you'll have specify extra options.\n"
+		"These options should be chosen based on the layouts described here:\n"
+		"  http://linux-sunxi.org/NAND#More_information_on_BROM_NAND\n"
+		"\n"
+		"  --usable should be assigned the 'Hardware page' value\n"
+		"  --ecc should be assigned the 'ECC capacity'/'ECC page' values\n"
+		"  --usable should be smaller than --page\n"
+		"\n"
+		"The --address option is only required for non-boot0 images that are \n"
+		"meant to be programmed at a non eraseblock aligned offset.\n"
+		"\n"
+		"Examples:\n"
+		"  The H27UCG8T2BTR-BC NAND exposes\n"
+		"  * 16k pages\n"
+		"  * 1280 OOB bytes per page\n"
+		"  * 4M eraseblocks\n"
+		"  * requires data scrambling\n"
+		"  * expects a minimum ECC of 40bits/1024bytes\n"
+		"\n"
+		"  A normal image can be generated with\n"
+		"    sunxi-nand-image-builder -p 16384 -o 1280 -e 0x400000 -s -c 40/1024\n"
+		"  A boot0 image can be generated with\n"
+		"    sunxi-nand-image-builder -p 16384 -o 1280 -e 0x400000 -s -b -u 4096 -c 64/1024\n",
+		PLAIN_VERSION);
+	exit(status);
+}
+
+static int check_image_info(struct image_info *info)
+{
+	static int valid_ecc_strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+	int eccbytes, eccsteps;
+	unsigned i;
+
+	if (!info->page_size) {
+		fprintf(stderr, "--page is missing\n");
+		return -EINVAL;
+	}
+
+	if (!info->page_size) {
+		fprintf(stderr, "--oob is missing\n");
+		return -EINVAL;
+	}
+
+	if (!info->eraseblock_size) {
+		fprintf(stderr, "--eraseblock is missing\n");
+		return -EINVAL;
+	}
+
+	if (info->ecc_step_size != 512 && info->ecc_step_size != 1024) {
+		fprintf(stderr, "Invalid ECC step argument: %d\n",
+			info->ecc_step_size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(valid_ecc_strengths); i++) {
+		if (valid_ecc_strengths[i] == info->ecc_strength)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(valid_ecc_strengths)) {
+		fprintf(stderr, "Invalid ECC strength argument: %d\n",
+			info->ecc_strength);
+		return -EINVAL;
+	}
+
+	eccbytes = DIV_ROUND_UP(info->ecc_strength * 14, 8);
+	if (eccbytes % 2)
+		eccbytes++;
+	eccbytes += 4;
+
+	eccsteps = info->usable_page_size / info->ecc_step_size;
+
+	if (info->page_size + info->oob_size <
+	    info->usable_page_size + (eccsteps * eccbytes)) {
+		fprintf(stderr,
+			"ECC bytes do not fit in the NAND page, choose a weaker ECC\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int main(int argc, char **argv)
+{
+	struct image_info info;
+
+	memset(&info, 0, sizeof(info));
+	/*
+	 * Process user arguments
+	 */
+	for (;;) {
+		int option_index = 0;
+		char *endptr = NULL;
+		static const struct option long_options[] = {
+			{"help", no_argument, 0, 'h'},
+			{"ecc", required_argument, 0, 'c'},
+			{"page", required_argument, 0, 'p'},
+			{"oob", required_argument, 0, 'o'},
+			{"usable", required_argument, 0, 'u'},
+			{"eraseblock", required_argument, 0, 'e'},
+			{"boot0", no_argument, 0, 'b'},
+			{"scramble", no_argument, 0, 's'},
+			{"address", required_argument, 0, 'a'},
+			{0, 0, 0, 0},
+		};
+
+		int c = getopt_long(argc, argv, "c:p:o:u:e:ba:sh",
+				long_options, &option_index);
+		if (c == EOF)
+			break;
+
+		switch (c) {
+		case 'h':
+			display_help(0);
+			break;
+		case 's':
+			info.scramble = 1;
+			break;
+		case 'c':
+			info.ecc_strength = strtol(optarg, &endptr, 0);
+			if (*endptr == '/')
+				info.ecc_step_size = strtol(endptr + 1, NULL, 0);
+			break;
+		case 'p':
+			info.page_size = strtol(optarg, NULL, 0);
+			break;
+		case 'o':
+			info.oob_size = strtol(optarg, NULL, 0);
+			break;
+		case 'u':
+			info.usable_page_size = strtol(optarg, NULL, 0);
+			break;
+		case 'e':
+			info.eraseblock_size = strtol(optarg, NULL, 0);
+			break;
+		case 'b':
+			info.boot0 = 1;
+			break;
+		case 'a':
+			info.offset = strtoull(optarg, NULL, 0);
+			break;
+		case '?':
+			display_help(-1);
+			break;
+		}
+	}
+
+	if ((argc - optind) != 2)
+		display_help(-1);
+
+	info.source = argv[optind];
+	info.dest = argv[optind + 1];
+
+	if (!info.boot0) {
+		info.usable_page_size = info.page_size;
+	} else if (!info.usable_page_size) {
+		if (info.page_size > 8192)
+			info.usable_page_size = 8192;
+		else if (info.page_size > 4096)
+			info.usable_page_size = 4096;
+		else
+			info.usable_page_size = 1024;
+	}
+
+	if (check_image_info(&info))
+		display_help(-1);
+
+	return create_image(&info);
+}
diff --git a/tools/u-boot-tools/ublimage.c b/tools/u-boot-tools/ublimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..1d2e897f6b35f72aadb72946230563fe78e78ce0
--- /dev/null
+++ b/tools/u-boot-tools/ublimage.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2011
+ * Heiko Schocher, DENX Software Engineering, hs@denx.de.
+ *
+ * Based on:
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ *
+ * (C) Copyright 2008
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include "imagetool.h"
+#include <image.h>
+#include "ublimage.h"
+
+/*
+ * Supported commands for configuration file
+ */
+static table_entry_t ublimage_cmds[] = {
+	{CMD_BOOT_MODE,	"MODE",		"UBL special modes", },
+	{CMD_ENTRY,	"ENTRY",	"Entry point addr for bootloader", },
+	{CMD_PAGE,	"PAGES",
+		"number of pages (size of bootloader)", },
+	{CMD_ST_BLOCK,	"START_BLOCK",
+		"block number where bootloader is present", },
+	{CMD_ST_PAGE,	"START_PAGE",
+		"page number where bootloader is present", },
+	{CMD_LD_ADDR,	"LD_ADDR",
+		"load addr", },
+	{-1,		"",		"", },
+};
+
+/*
+ * Supported Boot options for configuration file
+ * this is needed to set the correct flash offset
+ */
+static table_entry_t ublimage_bootops[] = {
+	{UBL_MAGIC_SAFE,	"safe",	"Safe boot mode",	},
+	{-1,			"",	"Invalid",		},
+};
+
+static struct ubl_header ublimage_header;
+
+static uint32_t get_cfg_value(char *token, char *name,  int linenr)
+{
+	char *endptr;
+	uint32_t value;
+
+	errno = 0;
+	value = strtoul(token, &endptr, 16);
+	if (errno || (token == endptr)) {
+		fprintf(stderr, "Error: %s[%d] - Invalid hex data(%s)\n",
+			name,  linenr, token);
+		exit(EXIT_FAILURE);
+	}
+	return value;
+}
+
+static void print_hdr(struct ubl_header *ubl_hdr)
+{
+	printf("Image Type : Davinci UBL Boot Image\n");
+	printf("UBL magic  : %08x\n", ubl_hdr->magic);
+	printf("Entry Point: %08x\n", ubl_hdr->entry);
+	printf("nr of pages: %08x\n", ubl_hdr->pages);
+	printf("start block: %08x\n", ubl_hdr->block);
+	printf("start page : %08x\n", ubl_hdr->page);
+}
+
+static void parse_cfg_cmd(struct ubl_header *ublhdr, int32_t cmd, char *token,
+				char *name, int lineno, int fld, int dcd_len)
+{
+	static int cmd_ver_first = ~0;
+
+	switch (cmd) {
+	case CMD_BOOT_MODE:
+		ublhdr->magic = get_table_entry_id(ublimage_bootops,
+					"ublimage special boot mode", token);
+		if (ublhdr->magic == -1) {
+			fprintf(stderr, "Error: %s[%d] -Invalid boot mode"
+				"(%s)\n", name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+		ublhdr->magic += UBL_MAGIC_BASE;
+		if (unlikely(cmd_ver_first != 1))
+			cmd_ver_first = 0;
+		break;
+	case CMD_ENTRY:
+		ublhdr->entry = get_cfg_value(token, name, lineno);
+		break;
+	case CMD_PAGE:
+		ublhdr->pages = get_cfg_value(token, name, lineno);
+		break;
+	case CMD_ST_BLOCK:
+		ublhdr->block = get_cfg_value(token, name, lineno);
+		break;
+	case CMD_ST_PAGE:
+		ublhdr->page = get_cfg_value(token, name, lineno);
+		break;
+	case CMD_LD_ADDR:
+		ublhdr->pll_m = get_cfg_value(token, name, lineno);
+		break;
+	}
+}
+
+static void parse_cfg_fld(struct ubl_header *ublhdr, int32_t *cmd,
+		char *token, char *name, int lineno, int fld, int *dcd_len)
+{
+
+	switch (fld) {
+	case CFG_COMMAND:
+		*cmd = get_table_entry_id(ublimage_cmds,
+			"ublimage commands", token);
+		if (*cmd < 0) {
+			fprintf(stderr, "Error: %s[%d] - Invalid command"
+			"(%s)\n", name, lineno, token);
+			exit(EXIT_FAILURE);
+		}
+		break;
+	case CFG_REG_VALUE:
+		parse_cfg_cmd(ublhdr, *cmd, token, name, lineno, fld, *dcd_len);
+		break;
+	default:
+		break;
+	}
+}
+static uint32_t parse_cfg_file(struct ubl_header *ublhdr, char *name)
+{
+	FILE *fd = NULL;
+	char *line = NULL;
+	char *token, *saveptr1, *saveptr2;
+	int lineno = 0;
+	int	i;
+	char *ptr = (char *)ublhdr;
+	int fld;
+	size_t len;
+	int dcd_len = 0;
+	int32_t cmd;
+	int ublhdrlen = sizeof(struct ubl_header);
+
+	fd = fopen(name, "r");
+	if (fd == 0) {
+		fprintf(stderr, "Error: %s - Can't open DCD file\n", name);
+		exit(EXIT_FAILURE);
+	}
+
+	/* Fill header with 0xff */
+	for (i = 0; i < ublhdrlen; i++) {
+		*ptr = 0xff;
+		ptr++;
+	}
+
+	/*
+	 * Very simple parsing, line starting with # are comments
+	 * and are dropped
+	 */
+	while ((getline(&line, &len, fd)) > 0) {
+		lineno++;
+
+		token = strtok_r(line, "\r\n", &saveptr1);
+		if (token == NULL)
+			continue;
+
+		/* Check inside the single line */
+		for (fld = CFG_COMMAND, cmd = CMD_INVALID,
+				line = token; ; line = NULL, fld++) {
+			token = strtok_r(line, " \t", &saveptr2);
+			if (token == NULL)
+				break;
+
+			/* Drop all text starting with '#' as comments */
+			if (token[0] == '#')
+				break;
+
+			parse_cfg_fld(ublhdr, &cmd, token, name,
+					lineno, fld, &dcd_len);
+		}
+	}
+	fclose(fd);
+
+	return dcd_len;
+}
+
+static int ublimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_UBLIMAGE)
+		return EXIT_SUCCESS;
+	else
+		return EXIT_FAILURE;
+}
+
+static int ublimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct ubl_header *ubl_hdr = (struct ubl_header *)ptr;
+
+	if ((ubl_hdr->magic & 0xFFFFFF00) != UBL_MAGIC_BASE)
+		return -1;
+
+	return 0;
+}
+
+static void ublimage_print_header(const void *ptr)
+{
+	struct ubl_header *ubl_hdr = (struct ubl_header *) ptr;
+
+	print_hdr(ubl_hdr);
+}
+
+static void ublimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	struct ubl_header *ublhdr = (struct ubl_header *)ptr;
+
+	/* Parse configuration file */
+	parse_cfg_file(ublhdr, params->imagename);
+}
+
+int ublimage_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return CFG_INVALID;
+	if (!strlen(params->imagename)) {
+		fprintf(stderr, "Error: %s - Configuration file not"
+			"specified, it is needed for ublimage generation\n",
+			params->cmdname);
+		return CFG_INVALID;
+	}
+	/*
+	 * Check parameters:
+	 * XIP is not allowed and verify that incompatible
+	 * parameters are not sent at the same time
+	 * For example, if list is required a data image must not be provided
+	 */
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag)) ||
+		(params->xflag) || !(strlen(params->imagename));
+}
+
+/*
+ * ublimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	ublimage,
+	"Davinci UBL boot support",
+	sizeof(struct ubl_header),
+	(void *)&ublimage_header,
+	ublimage_check_params,
+	ublimage_verify_header,
+	ublimage_print_header,
+	ublimage_set_header,
+	NULL,
+	ublimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/ublimage.h b/tools/u-boot-tools/ublimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..65d6aa1ff07bc2bdb11f0bad3a18e65084b31198
--- /dev/null
+++ b/tools/u-boot-tools/ublimage.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2011
+ * Heiko Schocher, DENX Software Engineering, hs@denx.de.
+ *
+ * Vased on:
+ * (C) Copyright 2009
+ * Stefano Babic, DENX Software Engineering, sbabic@denx.de.
+ */
+
+#ifndef _UBLIMAGE_H_
+#define _UBLIMAGE_H_
+
+enum ublimage_cmd {
+	CMD_INVALID,
+	CMD_BOOT_MODE,
+	CMD_ENTRY,
+	CMD_PAGE,
+	CMD_ST_BLOCK,
+	CMD_ST_PAGE,
+	CMD_LD_ADDR
+};
+
+enum ublimage_fld_types {
+	CFG_INVALID = -1,
+	CFG_COMMAND,
+	CFG_REG_VALUE
+};
+
+/*
+ * from sprufg5a.pdf Table 110
+ * Used by RBL when doing NAND boot
+ */
+#define UBL_MAGIC_BASE              (0xA1ACED00)
+/* Safe boot mode */
+#define UBL_MAGIC_SAFE              (0x00)
+/* DMA boot mode */
+#define UBL_MAGIC_DMA               (0x11)
+/* I Cache boot mode */
+#define UBL_MAGIC_IC                (0x22)
+/* Fast EMIF boot mode */
+#define UBL_MAGIC_FAST              (0x33)
+/* DMA + ICache boot mode */
+#define UBL_MAGIC_DMA_IC            (0x44)
+/* DMA + ICache + Fast EMIF boot mode */
+#define UBL_MAGIC_DMA_IC_FAST       (0x55)
+
+/* Define max UBL image size */
+#define UBL_IMAGE_SIZE              (0x00003800u)
+
+/* one NAND block */
+#define UBL_BLOCK_SIZE 2048
+
+/* from sprufg5a.pdf Table 109 */
+struct ubl_header {
+	uint32_t	magic;	/* Magic Number, see UBL_* defines */
+	uint32_t	entry;	/* entry point address for bootloader */
+	uint32_t	pages;	/* number of pages (size of bootloader) */
+	uint32_t	block;	/*
+				 * blocknumber where user bootloader is
+				 * present
+				 */
+	uint32_t	page;	/*
+				 * page number where user bootloader is
+				 * present.
+				 */
+	uint32_t	pll_m;	/*
+				 * PLL setting -Multiplier (only valid if
+				 * Magic Number indicates PLL enable).
+				 */
+	uint32_t	pll_n;	/*
+				 * PLL setting -Divider (only valid if
+				 * Magic Number indicates PLL enable).
+				 */
+	uint32_t	emif;	/*
+				 * fast EMIF setting (only valid if
+				 * Magic Number indicates fast EMIF boot).
+				 */
+	/* to fit in one nand block */
+	unsigned char	res[UBL_BLOCK_SIZE - 8 * 4];
+};
+
+#endif /* _UBLIMAGE_H_ */
diff --git a/tools/u-boot-tools/ublimage.o b/tools/u-boot-tools/ublimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..ecc367cfdb6d37122253f83530e5429ee934f949
Binary files /dev/null and b/tools/u-boot-tools/ublimage.o differ
diff --git a/tools/u-boot-tools/ubsha1.c b/tools/u-boot-tools/ubsha1.c
new file mode 100644
index 0000000000000000000000000000000000000000..90a6f3f59d2da956b6342284ed66bffa9e8671f9
--- /dev/null
+++ b/tools/u-boot-tools/ubsha1.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2007
+ * Heiko Schocher, DENX Software Engineering, <hs@denx.de>
+ */
+
+#include "os_support.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <u-boot/sha1.h>
+
+int main (int argc, char **argv)
+{
+	unsigned char output[20];
+	int i, len;
+
+	char	*imagefile;
+	char	*cmdname = *argv;
+	unsigned char	*ptr;
+	unsigned char	*data;
+	struct stat sbuf;
+	unsigned char	*ptroff;
+	int	ifd;
+	int	off;
+
+	if (argc > 1) {
+		imagefile = argv[1];
+		ifd = open (imagefile, O_RDWR|O_BINARY);
+		if (ifd < 0) {
+			fprintf (stderr, "%s: Can't open %s: %s\n",
+				cmdname, imagefile, strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+		if (fstat (ifd, &sbuf) < 0) {
+			fprintf (stderr, "%s: Can't stat %s: %s\n",
+				cmdname, imagefile, strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+		len = sbuf.st_size;
+		ptr = (unsigned char *)mmap(0, len,
+				    PROT_READ, MAP_SHARED, ifd, 0);
+		if (ptr == (unsigned char *)MAP_FAILED) {
+			fprintf (stderr, "%s: Can't read %s: %s\n",
+				cmdname, imagefile, strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+
+		/* create a copy, so we can blank out the sha1 sum */
+		data = malloc (len);
+		memcpy (data, ptr, len);
+		off = SHA1_SUM_POS;
+		ptroff = &data[len +  off];
+		for (i = 0; i < SHA1_SUM_LEN; i++) {
+			ptroff[i] = 0;
+		}
+
+		sha1_csum ((unsigned char *) data, len, (unsigned char *)output);
+
+		printf ("U-Boot sum:\n");
+		for (i = 0; i < 20 ; i++) {
+		    printf ("%02X ", output[i]);
+		}
+		printf ("\n");
+		/* overwrite the sum in the bin file, with the actual */
+		lseek (ifd, SHA1_SUM_POS, SEEK_END);
+		if (write (ifd, output, SHA1_SUM_LEN) != SHA1_SUM_LEN) {
+			fprintf (stderr, "%s: Can't write %s: %s\n",
+				cmdname, imagefile, strerror(errno));
+			exit (EXIT_FAILURE);
+		}
+
+		free (data);
+		(void) munmap((void *)ptr, len);
+		(void) close (ifd);
+	}
+
+	return EXIT_SUCCESS;
+}
diff --git a/tools/u-boot-tools/version.h b/tools/u-boot-tools/version.h
new file mode 120000
index 0000000000000000000000000000000000000000..bb576071e84d5232d2aae7815426c3709d62a1e2
--- /dev/null
+++ b/tools/u-boot-tools/version.h
@@ -0,0 +1 @@
+../include/version.h
\ No newline at end of file
diff --git a/tools/u-boot-tools/vybridimage.c b/tools/u-boot-tools/vybridimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..94a6684c19b73c249ed3ba2818cbb838e30db451
--- /dev/null
+++ b/tools/u-boot-tools/vybridimage.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Image manipulator for Vybrid SoCs
+ *
+ * Derived from vybridimage.c
+ *
+ * (C) Copyright 2016  DENX Software Engineering GmbH
+ * Written-by: Albert ARIBAUD <albert.aribaud@3adev.fr>
+ */
+
+#include "imagetool.h"
+#include <compiler.h>
+#include <image.h>
+
+/*
+ * NAND page 0 boot header
+ */
+
+struct nand_page_0_boot_header {
+	union {
+		uint32_t fcb[128];
+		uint8_t fcb_bytes[512];
+	};				/* 0x00000000 - 0x000001ff */
+	uint8_t  sw_ecc[512];		/* 0x00000200 - 0x000003ff */
+	uint32_t padding[65280];	/* 0x00000400 - 0x0003ffff */
+	uint8_t ivt_prefix[1024];	/* 0x00040000 - 0x000403ff */
+};
+
+/* signature byte for a readable block */
+
+static struct nand_page_0_boot_header vybridimage_header;
+
+static int vybridimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_VYBRIDIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static uint8_t vybridimage_sw_ecc(uint8_t byte)
+{
+	uint8_t bit0  = (byte & (1 << 0)) ? 1 : 0;
+	uint8_t bit1  = (byte & (1 << 1)) ? 1 : 0;
+	uint8_t bit2  = (byte & (1 << 2)) ? 1 : 0;
+	uint8_t bit3  = (byte & (1 << 3)) ? 1 : 0;
+	uint8_t bit4  = (byte & (1 << 4)) ? 1 : 0;
+	uint8_t bit5  = (byte & (1 << 5)) ? 1 : 0;
+	uint8_t bit6  = (byte & (1 << 6)) ? 1 : 0;
+	uint8_t bit7  = (byte & (1 << 7)) ? 1 : 0;
+	uint8_t res = 0;
+
+	res |= ((bit6 ^ bit5 ^ bit3 ^ bit2) << 0);
+	res |= ((bit7 ^ bit5 ^ bit4 ^ bit2 ^ bit1) << 1);
+	res |= ((bit7 ^ bit6 ^ bit5 ^ bit1 ^ bit0) << 2);
+	res |= ((bit7 ^ bit4 ^ bit3 ^ bit0) << 3);
+	res |= ((bit6 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0) << 4);
+
+	return res;
+}
+
+static int vybridimage_verify_header(unsigned char *ptr, int image_size,
+			struct image_tool_params *params)
+{
+	struct nand_page_0_boot_header *hdr =
+		(struct nand_page_0_boot_header *)ptr;
+	int idx;
+
+	if (hdr->fcb[1] != 0x46434220)
+		return -1;
+	if (hdr->fcb[2] != 1)
+		return -1;
+	if (hdr->fcb[7] != 64)
+		return -1;
+	if (hdr->fcb[14] != 6)
+		return -1;
+	if (hdr->fcb[30] != 0x0001ff00)
+		return -1;
+	if (hdr->fcb[43] != 1)
+		return -1;
+	if (hdr->fcb[54] != 0)
+		return -1;
+	if (hdr->fcb[55] != 8)
+		return -1;
+
+	/* check software ECC */
+	for (idx = 0; idx < sizeof(hdr->fcb_bytes); idx++) {
+		uint8_t sw_ecc = vybridimage_sw_ecc(hdr->fcb_bytes[idx]);
+		if (sw_ecc != hdr->sw_ecc[idx])
+			return -1;
+	}
+
+	return 0;
+}
+
+static void vybridimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+				struct image_tool_params *params)
+{
+	struct nand_page_0_boot_header *hdr =
+		(struct nand_page_0_boot_header *)ptr;
+	int idx;
+
+	/* fill header with 0x00 for first 56 entries then 0xff */
+	memset(&hdr->fcb[0], 0x0, 56*sizeof(uint32_t));
+	memset(&hdr->fcb[56], 0xff, 72*sizeof(uint32_t));
+	/* fill SW ecc and padding with 0xff */
+	memset(&hdr->sw_ecc[0], 0xff, sizeof(hdr->sw_ecc));
+	memset(&hdr->padding[0], 0xff, sizeof(hdr->padding));
+	/* fill IVT prefix with 0x00 */
+	memset(&hdr->ivt_prefix[0], 0x00, sizeof(hdr->ivt_prefix));
+
+	/* populate fcb */
+	hdr->fcb[1] = 0x46434220; /* signature */
+	hdr->fcb[2] = 0x00000001; /* version */
+	hdr->fcb[5] = 2048; /* page size */
+	hdr->fcb[6] = (2048+64); /* page + OOB size */
+	hdr->fcb[7] = 64; /* pages per block */
+	hdr->fcb[14] = 6; /* ECC mode 6 */
+	hdr->fcb[26] = 128; /* fw address (0x40000) in 2K pages */
+	hdr->fcb[27] = 128; /* fw address (0x40000) in 2K pages */
+	hdr->fcb[30] = 0x0001ff00; /* DBBT search area start address */
+	hdr->fcb[33] = 2048; /* BB marker physical offset */
+	hdr->fcb[43] = 1; /* DISBBM */
+	hdr->fcb[54] = 0; /* DISBB_Search */
+	hdr->fcb[55] = 8; /* Bad block search limit */
+
+	/* compute software ECC */
+	for (idx = 0; idx < sizeof(hdr->fcb_bytes); idx++)
+		hdr->sw_ecc[idx] = vybridimage_sw_ecc(hdr->fcb_bytes[idx]);
+}
+
+static void vybridimage_print_hdr_field(struct nand_page_0_boot_header *hdr,
+	int idx)
+{
+	printf("header.fcb[%d] = %08x\n", idx, hdr->fcb[idx]);
+}
+
+static void vybridimage_print_header(const void *ptr)
+{
+	struct nand_page_0_boot_header *hdr =
+		(struct nand_page_0_boot_header *)ptr;
+	int idx;
+
+	for (idx = 0; idx < 56; idx++)
+		vybridimage_print_hdr_field(hdr, idx);
+}
+
+/*
+ * vybridimage parameters
+ */
+U_BOOT_IMAGE_TYPE(
+	vybridimage,
+	"Vybrid Boot Image",
+	sizeof(vybridimage_header),
+	(void *)&vybridimage_header,
+	NULL,
+	vybridimage_verify_header,
+	vybridimage_print_header,
+	vybridimage_set_header,
+	NULL,
+	vybridimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/vybridimage.o b/tools/u-boot-tools/vybridimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..b77b3e0e5d61ba4f964088e5b0e235e16ef9735f
Binary files /dev/null and b/tools/u-boot-tools/vybridimage.o differ
diff --git a/tools/u-boot-tools/xway-swap-bytes.c b/tools/u-boot-tools/xway-swap-bytes.c
new file mode 100644
index 0000000000000000000000000000000000000000..3a6d82d54a06f68753e923907688720f8d9dac5a
--- /dev/null
+++ b/tools/u-boot-tools/xway-swap-bytes.c
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifndef BUFSIZ
+# define BUFSIZ 4096
+#endif
+
+#undef BUFSIZ
+# define BUFSIZ 64
+int main (void)
+{
+	short ibuff[BUFSIZ], obuff[BUFSIZ];
+	int rc, i, len;
+
+	while ((rc = read (0, ibuff, sizeof (ibuff))) > 0) {
+		memset (obuff, 0, sizeof (obuff));
+		for (i = 0; i < (rc + 1) / 2; i++) {
+			obuff[i] = ibuff[i ^ 1];
+		}
+
+		len = (rc + 1) & ~1;
+
+		if (write (1, obuff, len) != len) {
+			perror ("read error");
+			return (EXIT_FAILURE);
+		}
+
+		memset (ibuff, 0, sizeof (ibuff));
+	}
+
+	if (rc < 0) {
+		perror ("read error");
+		return (EXIT_FAILURE);
+	}
+	return (EXIT_SUCCESS);
+}
diff --git a/tools/u-boot-tools/zynqimage.c b/tools/u-boot-tools/zynqimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..d3f418b0612b311b0537dddce8769c6e07b046a4
--- /dev/null
+++ b/tools/u-boot-tools/zynqimage.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015 Nathan Rossi <nathan@nathanrossi.com>
+ *
+ * The following Boot Header format/structures and values are defined in the
+ * following documents:
+ *   * Xilinx Zynq-7000 Technical Reference Manual (Section 6.3)
+ *   * Xilinx Zynq-7000 Software Developers Guide (Appendix A.7 and A.8)
+ *
+ * Expected Header Size = 0x8C0
+ * Forced as 'little' endian, 32-bit words
+ *
+ *  0x  0 - Interrupt Table (8 words)
+ *  ...     (Default value = 0xeafffffe)
+ *  0x 1f
+ *  0x 20 - Width Detection
+ *         * DEFAULT_WIDTHDETECTION    0xaa995566
+ *  0x 24 - Image Identifier
+ *         * DEFAULT_IMAGEIDENTIFIER   0x584c4e58
+ *  0x 28 - Encryption
+ *         * 0x00000000 - None
+ *         * 0xa5c3c5a3 - eFuse
+ *         * 0x3a5c3c5a - bbRam
+ *  0x 2C - User Field
+ *  0x 30 - Image Offset
+ *  0x 34 - Image Size
+ *  0x 38 - Reserved (0x00000000) (according to spec)
+ *          * FSBL defines this field for Image Destination Address.
+ *  0x 3C - Image Load
+ *  0x 40 - Image Stored Size
+ *  0x 44 - Reserved (0x00000000) (according to spec)
+ *          * FSBL defines this field for QSPI configuration Data.
+ *  0x 48 - Checksum
+ *  0x 4c - Unused (21 words)
+ *  ...
+ *  0x 9c
+ *  0x a0 - Register Initialization, 256 Address and Data word pairs
+ *         * List is terminated with an address of 0xffffffff or
+ *  ...    * at the max number of entries
+ *  0x89c
+ *  0x8a0 - Unused (8 words)
+ *  ...
+ *  0x8bf
+ *  0x8c0 - Data/Image starts here or above
+ */
+
+#include "imagetool.h"
+#include "mkimage.h"
+#include <image.h>
+
+#define HEADER_INTERRUPT_DEFAULT (cpu_to_le32(0xeafffffe))
+#define HEADER_REGINIT_NULL (cpu_to_le32(0xffffffff))
+#define HEADER_WIDTHDETECTION (cpu_to_le32(0xaa995566))
+#define HEADER_IMAGEIDENTIFIER (cpu_to_le32(0x584c4e58))
+
+enum {
+	ENCRYPTION_EFUSE = 0xa5c3c5a3,
+	ENCRYPTION_BBRAM = 0x3a5c3c5a,
+	ENCRYPTION_NONE = 0x0,
+};
+
+struct zynq_reginit {
+	uint32_t address;
+	uint32_t data;
+};
+
+#define HEADER_INTERRUPT_VECTORS 8
+#define HEADER_REGINITS 256
+
+struct zynq_header {
+	uint32_t interrupt_vectors[HEADER_INTERRUPT_VECTORS]; /* 0x0 */
+	uint32_t width_detection; /* 0x20 */
+	uint32_t image_identifier; /* 0x24 */
+	uint32_t encryption; /* 0x28 */
+	uint32_t user_field; /* 0x2c */
+	uint32_t image_offset; /* 0x30 */
+	uint32_t image_size; /* 0x34 */
+	uint32_t __reserved1; /* 0x38 */
+	uint32_t image_load; /* 0x3c */
+	uint32_t image_stored_size; /* 0x40 */
+	uint32_t __reserved2; /* 0x44 */
+	uint32_t checksum; /* 0x48 */
+	uint32_t __reserved3[21]; /* 0x4c */
+	struct zynq_reginit register_init[HEADER_REGINITS]; /* 0xa0 */
+	uint32_t __reserved4[8]; /* 0x8a0 */
+};
+
+static struct zynq_header zynqimage_header;
+
+static uint32_t zynqimage_checksum(struct zynq_header *ptr)
+{
+	uint32_t checksum = 0;
+
+	if (ptr == NULL)
+		return 0;
+
+	checksum += le32_to_cpu(ptr->width_detection);
+	checksum += le32_to_cpu(ptr->image_identifier);
+	checksum += le32_to_cpu(ptr->encryption);
+	checksum += le32_to_cpu(ptr->user_field);
+	checksum += le32_to_cpu(ptr->image_offset);
+	checksum += le32_to_cpu(ptr->image_size);
+	checksum += le32_to_cpu(ptr->__reserved1);
+	checksum += le32_to_cpu(ptr->image_load);
+	checksum += le32_to_cpu(ptr->image_stored_size);
+	checksum += le32_to_cpu(ptr->__reserved2);
+	checksum = ~checksum;
+
+	return cpu_to_le32(checksum);
+}
+
+static void zynqimage_default_header(struct zynq_header *ptr)
+{
+	int i;
+
+	if (ptr == NULL)
+		return;
+
+	ptr->width_detection = HEADER_WIDTHDETECTION;
+	ptr->image_identifier = HEADER_IMAGEIDENTIFIER;
+	ptr->encryption = cpu_to_le32(ENCRYPTION_NONE);
+
+	/* Setup not-supported/constant/reserved fields */
+	for (i = 0; i < HEADER_INTERRUPT_VECTORS; i++)
+		ptr->interrupt_vectors[i] = HEADER_INTERRUPT_DEFAULT;
+
+	for (i = 0; i < HEADER_REGINITS; i++) {
+		ptr->register_init[i].address = HEADER_REGINIT_NULL;
+		ptr->register_init[i].data = HEADER_REGINIT_NULL;
+	}
+
+	/*
+	 * Certain reserved fields are required to be set to 0, ensure they are
+	 * set as such.
+	 */
+	ptr->__reserved1 = 0x0;
+	ptr->__reserved2 = 0x0;
+}
+
+/* mkimage glue functions */
+static int zynqimage_verify_header(unsigned char *ptr, int image_size,
+		struct image_tool_params *params)
+{
+	struct zynq_header *zynqhdr = (struct zynq_header *)ptr;
+
+	if (image_size < sizeof(struct zynq_header))
+		return -1;
+
+	if (zynqhdr->__reserved1 != 0)
+		return -1;
+
+	if (zynqhdr->__reserved2 != 0)
+		return -1;
+
+	if (zynqhdr->width_detection != HEADER_WIDTHDETECTION)
+		return -1;
+	if (zynqhdr->image_identifier != HEADER_IMAGEIDENTIFIER)
+		return -1;
+
+	if (zynqimage_checksum(zynqhdr) != zynqhdr->checksum)
+		return -1;
+
+	return 0;
+}
+
+static void zynqimage_print_header(const void *ptr)
+{
+	struct zynq_header *zynqhdr = (struct zynq_header *)ptr;
+	int i;
+
+	printf("Image Type   : Xilinx Zynq Boot Image support\n");
+	printf("Image Offset : 0x%08x\n", le32_to_cpu(zynqhdr->image_offset));
+	printf("Image Size   : %lu bytes (%lu bytes packed)\n",
+	       (unsigned long)le32_to_cpu(zynqhdr->image_size),
+	       (unsigned long)le32_to_cpu(zynqhdr->image_stored_size));
+	printf("Image Load   : 0x%08x\n", le32_to_cpu(zynqhdr->image_load));
+	printf("User Field   : 0x%08x\n", le32_to_cpu(zynqhdr->user_field));
+	printf("Checksum     : 0x%08x\n", le32_to_cpu(zynqhdr->checksum));
+
+	for (i = 0; i < HEADER_INTERRUPT_VECTORS; i++) {
+		if (zynqhdr->interrupt_vectors[i] == HEADER_INTERRUPT_DEFAULT)
+			continue;
+
+		printf("Modified Interrupt Vector Address [%d]: 0x%08x\n", i,
+		       le32_to_cpu(zynqhdr->interrupt_vectors[i]));
+	}
+
+	for (i = 0; i < HEADER_REGINITS; i++) {
+		if (zynqhdr->register_init[i].address == HEADER_REGINIT_NULL)
+			break;
+
+		if (i == 0)
+			printf("Custom Register Initialization:\n");
+
+		printf("    @ 0x%08x -> 0x%08x\n",
+		       le32_to_cpu(zynqhdr->register_init[i].address),
+		       le32_to_cpu(zynqhdr->register_init[i].data));
+	}
+}
+
+static int zynqimage_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return 0;
+
+	if (params->addr != 0x0) {
+		fprintf(stderr, "Error: Load Address cannot be specified.\n");
+		return -1;
+	}
+
+	/*
+	 * If the entry point is specified ensure it is 64 byte aligned.
+	 */
+	if (params->eflag && (params->ep % 64 != 0)) {
+		fprintf(stderr,
+			"Error: Entry Point must be aligned to a 64-byte boundary.\n");
+		return -1;
+	}
+
+	return !(params->lflag || params->dflag);
+}
+
+static int zynqimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_ZYNQIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static void zynqimage_parse_initparams(struct zynq_header *zynqhdr,
+	const char *filename)
+{
+	FILE *fp;
+	struct zynq_reginit reginit;
+	unsigned int reg_count = 0;
+	int r, err;
+	struct stat path_stat;
+
+	/* Expect a table of register-value pairs, e.g. "0x12345678 0x4321" */
+	fp = fopen(filename, "r");
+	if (!fp) {
+		fprintf(stderr, "Cannot open initparams file: %s\n", filename);
+		exit(1);
+	}
+
+	err = fstat(fileno(fp), &path_stat);
+	if (err) {
+		fclose(fp);
+		return;
+	}
+
+	if (!S_ISREG(path_stat.st_mode)) {
+		fclose(fp);
+		return;
+	}
+
+	do {
+		r = fscanf(fp, "%x %x", &reginit.address, &reginit.data);
+		if (r == 2) {
+			zynqhdr->register_init[reg_count] = reginit;
+			++reg_count;
+		}
+		r = fscanf(fp, "%*[^\n]\n"); /* Skip to next line */
+	} while ((r != EOF) && (reg_count < HEADER_REGINITS));
+	fclose(fp);
+}
+
+static void zynqimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+		struct image_tool_params *params)
+{
+	struct zynq_header *zynqhdr = (struct zynq_header *)ptr;
+	zynqimage_default_header(zynqhdr);
+
+	/* place image directly after header */
+	zynqhdr->image_offset =
+		cpu_to_le32((uint32_t)sizeof(struct zynq_header));
+	zynqhdr->image_size = cpu_to_le32((uint32_t)sbuf->st_size);
+	zynqhdr->image_stored_size = zynqhdr->image_size;
+	zynqhdr->image_load = 0x0;
+	if (params->eflag)
+		zynqhdr->image_load = cpu_to_le32((uint32_t)params->ep);
+
+	/* User can pass in text file with init list */
+	if (strlen(params->imagename2))
+		zynqimage_parse_initparams(zynqhdr, params->imagename2);
+
+	zynqhdr->checksum = zynqimage_checksum(zynqhdr);
+}
+
+U_BOOT_IMAGE_TYPE(
+	zynqimage,
+	"Xilinx Zynq Boot Image support",
+	sizeof(struct zynq_header),
+	(void *)&zynqimage_header,
+	zynqimage_check_params,
+	zynqimage_verify_header,
+	zynqimage_print_header,
+	zynqimage_set_header,
+	NULL,
+	zynqimage_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/zynqimage.o b/tools/u-boot-tools/zynqimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..cbe874cc58b3b30de1f518b1bad0122237fe11fe
Binary files /dev/null and b/tools/u-boot-tools/zynqimage.o differ
diff --git a/tools/u-boot-tools/zynqmp_pm_cfg_obj_convert.py b/tools/u-boot-tools/zynqmp_pm_cfg_obj_convert.py
new file mode 100755
index 0000000000000000000000000000000000000000..dd27f47921310446e8381815875ed21a68c3f247
--- /dev/null
+++ b/tools/u-boot-tools/zynqmp_pm_cfg_obj_convert.py
@@ -0,0 +1,301 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+
+import sys
+import re
+import struct
+import logging
+import argparse
+
+parser = argparse.ArgumentParser(
+    description='Convert a PMU configuration object from C source to a binary blob.')
+parser.add_argument('-D', '--debug', action="store_true")
+parser.add_argument(
+    "in_file", metavar='INPUT_FILE',
+    help='PMU configuration object (C source as produced by Xilinx XSDK)')
+parser.add_argument(
+    "out_file", metavar='OUTPUT_FILE',
+    help='PMU configuration object binary blob')
+args = parser.parse_args()
+
+logging.basicConfig(format='%(levelname)s:%(message)s',
+                    level=(logging.DEBUG if args.debug else logging.WARNING))
+
+pm_define = {
+    'PM_CAP_ACCESS'   : 0x1,
+    'PM_CAP_CONTEXT'  : 0x2,
+    'PM_CAP_WAKEUP'   : 0x4,
+
+    'NODE_UNKNOWN'    :  0,
+    'NODE_APU'        :  1,
+    'NODE_APU_0'      :  2,
+    'NODE_APU_1'      :  3,
+    'NODE_APU_2'      :  4,
+    'NODE_APU_3'      :  5,
+    'NODE_RPU'        :  6,
+    'NODE_RPU_0'      :  7,
+    'NODE_RPU_1'      :  8,
+    'NODE_PLD'        :  9,
+    'NODE_FPD'        : 10,
+    'NODE_OCM_BANK_0' : 11,
+    'NODE_OCM_BANK_1' : 12,
+    'NODE_OCM_BANK_2' : 13,
+    'NODE_OCM_BANK_3' : 14,
+    'NODE_TCM_0_A'    : 15,
+    'NODE_TCM_0_B'    : 16,
+    'NODE_TCM_1_A'    : 17,
+    'NODE_TCM_1_B'    : 18,
+    'NODE_L2'         : 19,
+    'NODE_GPU_PP_0'   : 20,
+    'NODE_GPU_PP_1'   : 21,
+    'NODE_USB_0'      : 22,
+    'NODE_USB_1'      : 23,
+    'NODE_TTC_0'      : 24,
+    'NODE_TTC_1'      : 25,
+    'NODE_TTC_2'      : 26,
+    'NODE_TTC_3'      : 27,
+    'NODE_SATA'       : 28,
+    'NODE_ETH_0'      : 29,
+    'NODE_ETH_1'      : 30,
+    'NODE_ETH_2'      : 31,
+    'NODE_ETH_3'      : 32,
+    'NODE_UART_0'     : 33,
+    'NODE_UART_1'     : 34,
+    'NODE_SPI_0'      : 35,
+    'NODE_SPI_1'      : 36,
+    'NODE_I2C_0'      : 37,
+    'NODE_I2C_1'      : 38,
+    'NODE_SD_0'       : 39,
+    'NODE_SD_1'       : 40,
+    'NODE_DP'         : 41,
+    'NODE_GDMA'       : 42,
+    'NODE_ADMA'       : 43,
+    'NODE_NAND'       : 44,
+    'NODE_QSPI'       : 45,
+    'NODE_GPIO'       : 46,
+    'NODE_CAN_0'      : 47,
+    'NODE_CAN_1'      : 48,
+    'NODE_EXTERN'     : 49,
+    'NODE_APLL'       : 50,
+    'NODE_VPLL'       : 51,
+    'NODE_DPLL'       : 52,
+    'NODE_RPLL'       : 53,
+    'NODE_IOPLL'      : 54,
+    'NODE_DDR'        : 55,
+    'NODE_IPI_APU'    : 56,
+    'NODE_IPI_RPU_0'  : 57,
+    'NODE_GPU'        : 58,
+    'NODE_PCIE'       : 59,
+    'NODE_PCAP'       : 60,
+    'NODE_RTC'        : 61,
+    'NODE_LPD'        : 62,
+    'NODE_VCU'        : 63,
+    'NODE_IPI_RPU_1'  : 64,
+    'NODE_IPI_PL_0'   : 65,
+    'NODE_IPI_PL_1'   : 66,
+    'NODE_IPI_PL_2'   : 67,
+    'NODE_IPI_PL_3'   : 68,
+    'NODE_PL'         : 69,
+    'NODE_ID_MA'      : 70,
+
+    'XILPM_RESET_PCIE_CFG'         : 1000,
+    'XILPM_RESET_PCIE_BRIDGE'      : 1001,
+    'XILPM_RESET_PCIE_CTRL'        : 1002,
+    'XILPM_RESET_DP'               : 1003,
+    'XILPM_RESET_SWDT_CRF'         : 1004,
+    'XILPM_RESET_AFI_FM5'          : 1005,
+    'XILPM_RESET_AFI_FM4'          : 1006,
+    'XILPM_RESET_AFI_FM3'          : 1007,
+    'XILPM_RESET_AFI_FM2'          : 1008,
+    'XILPM_RESET_AFI_FM1'          : 1009,
+    'XILPM_RESET_AFI_FM0'          : 1010,
+    'XILPM_RESET_GDMA'             : 1011,
+    'XILPM_RESET_GPU_PP1'          : 1012,
+    'XILPM_RESET_GPU_PP0'          : 1013,
+    'XILPM_RESET_GPU'              : 1014,
+    'XILPM_RESET_GT'               : 1015,
+    'XILPM_RESET_SATA'             : 1016,
+    'XILPM_RESET_ACPU3_PWRON'      : 1017,
+    'XILPM_RESET_ACPU2_PWRON'      : 1018,
+    'XILPM_RESET_ACPU1_PWRON'      : 1019,
+    'XILPM_RESET_ACPU0_PWRON'      : 1020,
+    'XILPM_RESET_APU_L2'           : 1021,
+    'XILPM_RESET_ACPU3'            : 1022,
+    'XILPM_RESET_ACPU2'            : 1023,
+    'XILPM_RESET_ACPU1'            : 1024,
+    'XILPM_RESET_ACPU0'            : 1025,
+    'XILPM_RESET_DDR'              : 1026,
+    'XILPM_RESET_APM_FPD'          : 1027,
+    'XILPM_RESET_SOFT'             : 1028,
+    'XILPM_RESET_GEM0'             : 1029,
+    'XILPM_RESET_GEM1'             : 1030,
+    'XILPM_RESET_GEM2'             : 1031,
+    'XILPM_RESET_GEM3'             : 1032,
+    'XILPM_RESET_QSPI'             : 1033,
+    'XILPM_RESET_UART0'            : 1034,
+    'XILPM_RESET_UART1'            : 1035,
+    'XILPM_RESET_SPI0'             : 1036,
+    'XILPM_RESET_SPI1'             : 1037,
+    'XILPM_RESET_SDIO0'            : 1038,
+    'XILPM_RESET_SDIO1'            : 1039,
+    'XILPM_RESET_CAN0'             : 1040,
+    'XILPM_RESET_CAN1'             : 1041,
+    'XILPM_RESET_I2C0'             : 1042,
+    'XILPM_RESET_I2C1'             : 1043,
+    'XILPM_RESET_TTC0'             : 1044,
+    'XILPM_RESET_TTC1'             : 1045,
+    'XILPM_RESET_TTC2'             : 1046,
+    'XILPM_RESET_TTC3'             : 1047,
+    'XILPM_RESET_SWDT_CRL'         : 1048,
+    'XILPM_RESET_NAND'             : 1049,
+    'XILPM_RESET_ADMA'             : 1050,
+    'XILPM_RESET_GPIO'             : 1051,
+    'XILPM_RESET_IOU_CC'           : 1052,
+    'XILPM_RESET_TIMESTAMP'        : 1053,
+    'XILPM_RESET_RPU_R50'          : 1054,
+    'XILPM_RESET_RPU_R51'          : 1055,
+    'XILPM_RESET_RPU_AMBA'         : 1056,
+    'XILPM_RESET_OCM'              : 1057,
+    'XILPM_RESET_RPU_PGE'          : 1058,
+    'XILPM_RESET_USB0_CORERESET'   : 1059,
+    'XILPM_RESET_USB1_CORERESET'   : 1060,
+    'XILPM_RESET_USB0_HIBERRESET'  : 1061,
+    'XILPM_RESET_USB1_HIBERRESET'  : 1062,
+    'XILPM_RESET_USB0_APB'         : 1063,
+    'XILPM_RESET_USB1_APB'         : 1064,
+    'XILPM_RESET_IPI'              : 1065,
+    'XILPM_RESET_APM_LPD'          : 1066,
+    'XILPM_RESET_RTC'              : 1067,
+    'XILPM_RESET_SYSMON'           : 1068,
+    'XILPM_RESET_AFI_FM6'          : 1069,
+    'XILPM_RESET_LPD_SWDT'         : 1070,
+    'XILPM_RESET_FPD'              : 1071,
+    'XILPM_RESET_RPU_DBG1'         : 1072,
+    'XILPM_RESET_RPU_DBG0'         : 1073,
+    'XILPM_RESET_DBG_LPD'          : 1074,
+    'XILPM_RESET_DBG_FPD'          : 1075,
+    'XILPM_RESET_APLL'             : 1076,
+    'XILPM_RESET_DPLL'             : 1077,
+    'XILPM_RESET_VPLL'             : 1078,
+    'XILPM_RESET_IOPLL'            : 1079,
+    'XILPM_RESET_RPLL'             : 1080,
+    'XILPM_RESET_GPO3_PL_0'        : 1081,
+    'XILPM_RESET_GPO3_PL_1'        : 1082,
+    'XILPM_RESET_GPO3_PL_2'        : 1083,
+    'XILPM_RESET_GPO3_PL_3'        : 1084,
+    'XILPM_RESET_GPO3_PL_4'        : 1085,
+    'XILPM_RESET_GPO3_PL_5'        : 1086,
+    'XILPM_RESET_GPO3_PL_6'        : 1087,
+    'XILPM_RESET_GPO3_PL_7'        : 1088,
+    'XILPM_RESET_GPO3_PL_8'        : 1089,
+    'XILPM_RESET_GPO3_PL_9'        : 1090,
+    'XILPM_RESET_GPO3_PL_10'       : 1091,
+    'XILPM_RESET_GPO3_PL_11'       : 1092,
+    'XILPM_RESET_GPO3_PL_12'       : 1093,
+    'XILPM_RESET_GPO3_PL_13'       : 1094,
+    'XILPM_RESET_GPO3_PL_14'       : 1095,
+    'XILPM_RESET_GPO3_PL_15'       : 1096,
+    'XILPM_RESET_GPO3_PL_16'       : 1097,
+    'XILPM_RESET_GPO3_PL_17'       : 1098,
+    'XILPM_RESET_GPO3_PL_18'       : 1099,
+    'XILPM_RESET_GPO3_PL_19'       : 1100,
+    'XILPM_RESET_GPO3_PL_20'       : 1101,
+    'XILPM_RESET_GPO3_PL_21'       : 1102,
+    'XILPM_RESET_GPO3_PL_22'       : 1103,
+    'XILPM_RESET_GPO3_PL_23'       : 1104,
+    'XILPM_RESET_GPO3_PL_24'       : 1105,
+    'XILPM_RESET_GPO3_PL_25'       : 1106,
+    'XILPM_RESET_GPO3_PL_26'       : 1107,
+    'XILPM_RESET_GPO3_PL_27'       : 1108,
+    'XILPM_RESET_GPO3_PL_28'       : 1109,
+    'XILPM_RESET_GPO3_PL_29'       : 1110,
+    'XILPM_RESET_GPO3_PL_30'       : 1111,
+    'XILPM_RESET_GPO3_PL_31'       : 1112,
+    'XILPM_RESET_RPU_LS'           : 1113,
+    'XILPM_RESET_PS_ONLY'          : 1114,
+    'XILPM_RESET_PL'               : 1115,
+    'XILPM_RESET_GPIO5_EMIO_92'    : 1116,
+    'XILPM_RESET_GPIO5_EMIO_93'    : 1117,
+    'XILPM_RESET_GPIO5_EMIO_94'    : 1118,
+    'XILPM_RESET_GPIO5_EMIO_95'    : 1119,
+
+    'PM_CONFIG_MASTER_SECTION_ID'        : 0x101,
+    'PM_CONFIG_SLAVE_SECTION_ID'         : 0x102,
+    'PM_CONFIG_PREALLOC_SECTION_ID'      : 0x103,
+    'PM_CONFIG_POWER_SECTION_ID'         : 0x104,
+    'PM_CONFIG_RESET_SECTION_ID'         : 0x105,
+    'PM_CONFIG_SHUTDOWN_SECTION_ID'      : 0x106,
+    'PM_CONFIG_SET_CONFIG_SECTION_ID'    : 0x107,
+    'PM_CONFIG_GPO_SECTION_ID'           : 0x108,
+
+    'PM_SLAVE_FLAG_IS_SHAREABLE'         : 0x1,
+    'PM_MASTER_USING_SLAVE_MASK'         : 0x2,
+
+    'PM_CONFIG_GPO1_MIO_PIN_34_MAP'      : (1 << 10),
+    'PM_CONFIG_GPO1_MIO_PIN_35_MAP'      : (1 << 11),
+    'PM_CONFIG_GPO1_MIO_PIN_36_MAP'      : (1 << 12),
+    'PM_CONFIG_GPO1_MIO_PIN_37_MAP'      : (1 << 13),
+
+    'PM_CONFIG_GPO1_BIT_2_MASK'          : (1 << 2),
+    'PM_CONFIG_GPO1_BIT_3_MASK'          : (1 << 3),
+    'PM_CONFIG_GPO1_BIT_4_MASK'          : (1 << 4),
+    'PM_CONFIG_GPO1_BIT_5_MASK'          : (1 << 5),
+
+    'SUSPEND_TIMEOUT'                    : 0xFFFFFFFF,
+
+    'PM_CONFIG_IPI_PSU_CORTEXA53_0_MASK' : 0x00000001,
+    'PM_CONFIG_IPI_PSU_CORTEXR5_0_MASK'  : 0x00000100,
+    'PM_CONFIG_IPI_PSU_CORTEXR5_1_MASK'  : 0x00000200,
+}
+
+in_file  = open(args.in_file,  mode='r')
+out_file = open(args.out_file, mode='wb')
+
+num_re   = re.compile(r"^([0-9]+)U?$")
+const_re = re.compile(r"^([A-Z_][A-Z0-9_]*)$")
+
+def process_item(item):
+    logging.debug("* ITEM   " + item)
+
+    value = 0
+    for item in item.split('|'):
+        item = item.strip()
+
+        num_match   = num_re  .match(item)
+        const_match = const_re.match(item)
+
+        if num_match:
+            num = int(num_match.group(1))
+            logging.debug("  - num  " + str(num))
+            value |= num
+        elif const_match:
+            name = const_match.group(1)
+            if not name in pm_define:
+                sys.stderr.write("Unknown define " + name + "!\n")
+                exit(1)
+            num = pm_define[name]
+            logging.debug("  - def  " + hex(num))
+            value |= num
+
+    logging.debug("  = res  " + hex(value))
+    out_file.write(struct.pack('<L', value))
+
+
+# Read all code
+code = in_file.read()
+
+# remove comments
+code = re.sub('//.*?\n|/\*.*?\*/', '', code, flags=re.DOTALL)
+
+# remove everything outside the XPm_ConfigObject array definition
+code = re.search('const u32 XPm_ConfigObject.*= {\n(.*)};',
+                 code, flags=re.DOTALL).group(1)
+
+# Process each comma-separated array item
+for item in code.split(','):
+    item = item.strip()
+    if item:
+        process_item(item)
+
+print("Wrote %d bytes" % out_file.tell())
diff --git a/tools/u-boot-tools/zynqmp_psu_init_minimize.sh b/tools/u-boot-tools/zynqmp_psu_init_minimize.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4ee418f07eac9013016a7e5d294b49046e32a694
--- /dev/null
+++ b/tools/u-boot-tools/zynqmp_psu_init_minimize.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2018 Michal Simek <michal.simek@xilinx.com>
+# Copyright (C) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+
+usage()
+{
+    cat <<EOF
+
+Transform a pair of psu_init_gpl.c and .h files produced by the Xilinx
+Vivado tool for ZynqMP into a smaller psu_init_gpl.c file that is almost
+checkpatch compliant. Minor coding style might still be needed. Must be
+run from the top-level U-Boot source directory.
+
+Usage:   zynqmp_psu_init_minimize.sh INPUT_DIR OUTPUT_DIR
+Example: zynqmp_psu_init_minimize.sh \\
+                 /path/to/original/psu_init_gpl_c_and_h/ \\
+                 board/xilinx/zynqmp/<my_board>/
+
+Notes:   INPUT_DIR must contain both .c and .h files.
+         If INPUT_DIR and OUTPUT_DIR are the same directory,
+         psu_init_gpl.c will be overwritten.
+
+EOF
+}
+
+set -o errexit -o errtrace
+set -o nounset
+
+if [ $# -ne 2 ]
+then
+    usage >&2
+    exit 1
+fi
+
+IN="${1}/psu_init_gpl.c"
+OUT="${2}/psu_init_gpl.c"
+TMP=$(mktemp /tmp/psu_init_gpl.XXXXXX)
+trap "rm ${TMP}" ERR
+
+# Step through a temp file to allow both $IN!=$OUT and $IN==$OUT
+sed -e '/sleep.h/d' \
+    -e '/xil_io.h/d' \
+    ${IN} >${TMP}
+cp ${TMP} ${OUT}
+
+# preprocess to expand defines, then remove cpp lines starting with '#'
+gcc -I${1} -E ${OUT} -o ${TMP}
+sed '/^#/d' ${TMP} >${OUT}
+
+# Remove trivial code before psu_pll_init_data()
+sed -ni '/psu_pll_init_data/,$p' ${OUT}
+
+# Functions are lowercase in U-Boot, rename them
+sed -i 's/PSU_Mask_Write/psu_mask_write/g' ${OUT}
+sed -i 's/mask_pollOnValue/mask_pollonvalue/g' ${OUT}
+sed -i 's/RegValue/regvalue/g' ${OUT}
+sed -i 's/MaskStatus/maskstatus/g' ${OUT}
+
+sed -i '/&= psu_peripherals_powerdwn_data()/d' ${OUT}
+
+FUNCS_TO_REMOVE="psu_protection
+psu_..._protection
+psu_init_xppu_aper_ram
+mask_delay(u32
+mask_read(u32
+mask_poll(u32
+mask_pollonvalue(u32
+psu_ps_pl_reset_config_data
+psu_ps_pl_isolation_removal_data
+psu_apply_master_tz
+psu_post_config_data
+psu_post_config_data
+psu_peripherals_powerdwn_data
+psu_init_ddr_self_refresh
+xmpu
+xppu
+"
+for i in $FUNCS_TO_REMOVE; do
+sed -i "/$i/,/^}$/d" ${OUT}
+done
+
+scripts/Lindent ${OUT}
+
+# Prepend 'static' to internal functions
+sed -i 's/^.*data(void)$/static &/g' ${OUT}
+sed -i 's/^.*psu_afi_config(void)$/static &/g' ${OUT}
+sed -i 's/^void init_peripheral/static &/g' ${OUT}
+sed -i 's/^int serdes/static &/g' ${OUT}
+sed -i 's/^int init_serdes/static &/g' ${OUT}
+sed -i 's/^unsigned long /static &/g' ${OUT}
+
+sed -i 's/()$/(void)/g' ${OUT}
+sed -i 's/0X/0x/g' ${OUT}
+
+# return (0) -> return 0
+sed -ri 's/return \(([0-9]+)\)/return \1/g' ${OUT}
+
+# Add header
+cat << EOF >${TMP}
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (c) Copyright 2015 Xilinx, Inc. All rights reserved.
+ */
+
+#include <asm/arch/psu_init_gpl.h>
+#include <xil_io.h>
+
+EOF
+
+cat ${OUT} >>${TMP}
+cp ${TMP} ${OUT}
+
+# Temporarily convert newlines to do some mangling across lines
+tr "\n" "\r" <${OUT} >${TMP}
+
+# Cleanup empty loops. E.g.:
+# |while (e) {|
+# |           | ==> |while (e)|
+# |    }      |     |    ;    |
+# |           |
+sed -i -r 's| \{\r+(\t*)\}\r\r|\n\1\t;\n|g' ${TMP}
+
+# Remove empty line between variable declaration
+sed -i -r 's|\r(\r\t(unsigned )?int )|\1|g' ${TMP}
+
+# Remove empty lines at function beginning/end
+sed -i -e 's|\r{\r\r|\r{\r|g' ${TMP}
+sed -i -e 's|\r\r}\r|\r}\r|g' ${TMP}
+
+# Remove empty lines after '{' line
+sed -i -e 's| {\r\r| {\r|g' ${TMP}
+
+# Remove braces {} around single statement blocks. E.g.:
+# | while (e) { |    | while (e) |
+# |     stg();  | => |     stg();|
+# | }           |
+sed -i -r 's| \{(\r[^\r]*;)\r\t*\}|\1|g' ${TMP}
+
+# Remove Unnecessary parentheses around 'n_code <= 0x3C' and similar. E.g.:
+# if ((p_code >= 0x26) && ...) -> if (p_code >= 0x26 && ...)
+sed -i -r 's|\((._code .= [x[:xdigit:]]+)\)|\1|g' ${TMP}
+
+# Convert back newlines
+tr "\r" "\n" <${TMP} >${OUT}
+
+rm ${TMP}
diff --git a/tools/u-boot-tools/zynqmpbif.c b/tools/u-boot-tools/zynqmpbif.c
new file mode 100644
index 0000000000000000000000000000000000000000..8c47107c7b9456aab5a369520a9aa6534215f883
--- /dev/null
+++ b/tools/u-boot-tools/zynqmpbif.c
@@ -0,0 +1,1017 @@
+/*
+ * Copyright (C) 2018 Alexander Graf <agraf@suse.de>
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include "imagetool.h"
+#include "mkimage.h"
+#include "zynqmpimage.h"
+#include <elf.h>
+#include <image.h>
+
+struct bif_entry {
+	const char *filename;
+	uint64_t flags;
+	uint64_t dest_cpu;
+	uint64_t exp_lvl;
+	uint64_t dest_dev;
+	uint64_t load;
+	uint64_t entry;
+	size_t offset;
+};
+
+enum bif_flag {
+	BIF_FLAG_AESKEYFILE,
+	BIF_FLAG_INIT,
+	BIF_FLAG_UDF_BH,
+	BIF_FLAG_HEADERSIGNATURE,
+	BIF_FLAG_PPKFILE,
+	BIF_FLAG_PSKFILE,
+	BIF_FLAG_SPKFILE,
+	BIF_FLAG_SSKFILE,
+	BIF_FLAG_SPKSIGNATURE,
+	BIF_FLAG_FSBL_CONFIG,
+	BIF_FLAG_AUTH_PARAMS,
+	BIF_FLAG_KEYSRC_ENCRYPTION,
+	BIF_FLAG_PMUFW_IMAGE,
+	BIF_FLAG_BOOTLOADER,
+	BIF_FLAG_TZ,
+	BIF_FLAG_BH_KEY_IV,
+	BIF_FLAG_BH_KEYFILE,
+	BIF_FLAG_PUF_FILE,
+	BIF_FLAG_AARCH32,
+	BIF_FLAG_PART_OWNER_UBOOT,
+
+	/* Internal flags */
+	BIF_FLAG_BIT_FILE,
+	BIF_FLAG_ELF_FILE,
+	BIF_FLAG_BIN_FILE,
+};
+
+struct bif_flags {
+	const char name[32];
+	uint64_t flag;
+	char *(*parse)(char *line, struct bif_entry *bf);
+};
+
+struct bif_file_type {
+	const char name[32];
+	uint32_t header;
+	int (*add)(struct bif_entry *bf);
+};
+
+struct bif_output {
+	size_t data_len;
+	char *data;
+	struct image_header_table *imgheader;
+	struct zynqmp_header *header;
+	struct partition_header *last_part;
+};
+
+struct bif_output bif_output;
+
+static uint32_t zynqmp_csum(void *start, void *end)
+{
+	uint32_t checksum = 0;
+	uint32_t *ptr32 = start;
+
+	while (ptr32 != end) {
+		checksum += le32_to_cpu(*ptr32);
+		ptr32++;
+	}
+
+	return ~checksum;
+}
+
+static int zynqmpbif_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return 0;
+
+	if (params->addr != 0x0) {
+		fprintf(stderr, "Error: Load Address can not be specified.\n");
+		return -1;
+	}
+
+	if (params->eflag) {
+		fprintf(stderr, "Error: Entry Point can not be specified.\n");
+		return -1;
+	}
+
+	return !(params->lflag || params->dflag);
+}
+
+static int zynqmpbif_check_image_types(uint8_t type)
+{
+	return (type == IH_TYPE_ZYNQMPBIF) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+static char *parse_dest_cpu(char *line, struct bif_entry *bf)
+{
+	uint64_t i;
+
+	for (i = 0; i < ARRAY_SIZE(dest_cpus); i++) {
+		if (!strncmp(line, dest_cpus[i], strlen(dest_cpus[i]))) {
+			bf->dest_cpu = i << PART_ATTR_DEST_CPU_SHIFT;
+			return line + strlen(dest_cpus[i]);
+		}
+
+		/* a5x can also be written as a53 */
+		if (!strncmp(dest_cpus[i], "a5x", 3)) {
+			char a53[] = "a53-X";
+
+			a53[4] = dest_cpus[i][4];
+			if (!strncmp(line, a53, strlen(a53))) {
+				bf->dest_cpu = i << PART_ATTR_DEST_CPU_SHIFT;
+				return line + strlen(a53);
+			}
+		}
+	}
+
+	return line;
+}
+
+static char *parse_el(char *line, struct bif_entry *bf)
+{
+	const char *dest_els[] = { "none", "el-0", "el-1", "el-2", "el-3" };
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dest_els); i++) {
+		if (!strncmp(line, dest_els[i], strlen(dest_els[i]))) {
+			bf->exp_lvl = i;
+			return line + strlen(dest_els[i]);
+		}
+	}
+
+	return line;
+}
+
+static char *parse_load(char *line, struct bif_entry *bf)
+{
+	char *endptr;
+
+	bf->load = strtoll(line, &endptr, 0);
+
+	return endptr;
+}
+
+static char *parse_entry(char *line, struct bif_entry *bf)
+{
+	char *endptr;
+
+	bf->entry = strtoll(line, &endptr, 0);
+
+	return endptr;
+}
+
+static char *parse_offset(char *line, struct bif_entry *bf)
+{
+	char *endptr;
+
+	bf->offset = strtoll(line, &endptr, 0);
+
+	return endptr;
+}
+
+static char *parse_partition_owner(char *line, struct bif_entry *bf)
+{
+	char *endptr = NULL;
+
+	if (!strncmp(line, "fsbl", 4)) {
+		endptr = line + 4;
+	} else if (!strncmp(line, "uboot", 5)) {
+		bf->flags |= 1ULL << BIF_FLAG_PART_OWNER_UBOOT;
+		endptr = line + 5;
+	} else {
+		printf("ERROR: Unknown partition type '%s'\n", line);
+	}
+
+	return endptr;
+}
+
+static const struct bif_flags bif_flags[] = {
+	{ "fsbl_config", BIF_FLAG_FSBL_CONFIG },
+	{ "trustzone", BIF_FLAG_TZ },
+	{ "pmufw_image", BIF_FLAG_PMUFW_IMAGE },
+	{ "bootloader", BIF_FLAG_BOOTLOADER },
+	{ "destination_cpu=", 0, parse_dest_cpu },
+	{ "exception_level=", 0, parse_el },
+	{ "load=", 0, parse_load },
+	{ "startup=", 0, parse_entry },
+	{ "offset=", 0, parse_offset },
+	{ "partition_owner=", 0, parse_partition_owner },
+};
+
+static char *read_full_file(const char *filename, size_t *size)
+{
+	char *buf, *bufp;
+	struct stat sbuf;
+	int len = 0, r, fd;
+
+	fd = open(filename, O_RDONLY);
+	if (fd < 0)
+		return NULL;
+
+	if (fstat(fd, &sbuf) < 0)
+		return NULL;
+
+	if (size)
+		*size = sbuf.st_size;
+
+	buf = malloc(sbuf.st_size);
+	if (!buf)
+		return NULL;
+
+	bufp = buf;
+	while (len < sbuf.st_size) {
+		r = read(fd, bufp, sbuf.st_size - len);
+		if (r < 0)
+			return NULL;
+		len += r;
+		bufp += r;
+	}
+
+	close(fd);
+
+	return buf;
+}
+
+static int bif_add_blob(const void *data, size_t len, size_t *offset)
+{
+	size_t new_size;
+	uintptr_t header_off;
+	uintptr_t last_part_off;
+	uintptr_t imgheader_off;
+	uintptr_t old_data = (uintptr_t)bif_output.data;
+	void *new_data;
+
+	header_off = (uintptr_t)bif_output.header - old_data;
+	last_part_off = (uintptr_t)bif_output.last_part - old_data;
+	imgheader_off = (uintptr_t)bif_output.imgheader - old_data;
+
+	if (offset && *offset) {
+		/* Pad to a given offset */
+		if (bif_output.data_len > *offset) {
+			printf("Can not pad to offset %zx\n", *offset);
+			return -1;
+		}
+
+		bif_output.data_len = *offset;
+	}
+
+	new_size = ROUND(bif_output.data_len + len, 64);
+	new_data = realloc(bif_output.data, new_size);
+	memcpy(new_data + bif_output.data_len, data, len);
+	if (offset)
+		*offset = bif_output.data_len;
+	bif_output.data = new_data;
+	bif_output.data_len = new_size;
+
+	/* Readjust internal pointers */
+	if (bif_output.header)
+		bif_output.header = new_data + header_off;
+	if (bif_output.last_part)
+		bif_output.last_part = new_data + last_part_off;
+	if (bif_output.imgheader)
+		bif_output.imgheader = new_data + imgheader_off;
+
+	return 0;
+}
+
+static int bif_init(void)
+{
+	struct zynqmp_header header = { { 0 } };
+	int r;
+
+	zynqmpimage_default_header(&header);
+
+	r = bif_add_blob(&header, sizeof(header), NULL);
+	if (r)
+		return r;
+
+	bif_output.header = (void *)bif_output.data;
+
+	return 0;
+}
+
+static int bif_add_pmufw(struct bif_entry *bf, const char *data, size_t len)
+{
+	int r;
+
+	if (bif_output.header->image_offset) {
+		printf("PMUFW expected before bootloader in your .bif file!\n");
+		return -1;
+	}
+
+	r = bif_add_blob(data, len, &bf->offset);
+	if (r)
+		return r;
+
+	len = ROUND(len, 64);
+	bif_output.header->pfw_image_length = cpu_to_le32(len);
+	bif_output.header->total_pfw_image_length = cpu_to_le32(len);
+	bif_output.header->image_offset = cpu_to_le32(bf->offset);
+
+	return 0;
+}
+
+static int bif_add_part(struct bif_entry *bf, const char *data, size_t len)
+{
+	size_t parthdr_offset = 0;
+	size_t len_padded = ROUND(len, 4);
+
+	struct partition_header parthdr = {
+		.len_enc = cpu_to_le32(len_padded / 4),
+		.len_unenc = cpu_to_le32(len_padded / 4),
+		.len = cpu_to_le32(len_padded / 4),
+		.entry_point = cpu_to_le64(bf->entry),
+		.load_address = cpu_to_le64(bf->load),
+	};
+	int r;
+	uint32_t csum;
+
+	if (len < len_padded) {
+		char *newdata = malloc(len_padded);
+		memcpy(newdata, data, len);
+		memset(newdata + len, 0, len_padded - len);
+		data = newdata;
+	}
+
+	if (bf->flags & (1ULL << BIF_FLAG_PMUFW_IMAGE))
+		return bif_add_pmufw(bf, data, len);
+
+	r = bif_add_blob(data, len, &bf->offset);
+	if (r)
+		return r;
+
+	parthdr.offset = cpu_to_le32(bf->offset / 4);
+
+	if (bf->flags & (1ULL << BIF_FLAG_BOOTLOADER)) {
+		if (bif_output.last_part) {
+			printf("ERROR: Bootloader expected before others\n");
+			return -1;
+		}
+
+		parthdr.offset = cpu_to_le32(bif_output.header->image_offset);
+		parthdr.len = cpu_to_le32((bf->offset + len -
+			bif_output.header->image_offset) / 4);
+		parthdr.len_enc = parthdr.len;
+		parthdr.len_unenc = parthdr.len;
+	}
+
+	/* Normalize EL */
+	bf->exp_lvl = bf->exp_lvl ? bf->exp_lvl - 1 : 3;
+	parthdr.attributes |= bf->exp_lvl << PART_ATTR_TARGET_EL_SHIFT;
+	parthdr.attributes |= bf->dest_dev;
+	parthdr.attributes |= bf->dest_cpu;
+	if (bf->flags & (1ULL << BIF_FLAG_TZ))
+		parthdr.attributes |= PART_ATTR_TZ_SECURE;
+	if (bf->flags & (1ULL << BIF_FLAG_PART_OWNER_UBOOT))
+		parthdr.attributes |= PART_ATTR_PART_OWNER_UBOOT;
+	switch (bf->dest_cpu) {
+	case PART_ATTR_DEST_CPU_NONE:
+	case PART_ATTR_DEST_CPU_A53_0:
+	case PART_ATTR_DEST_CPU_A53_1:
+	case PART_ATTR_DEST_CPU_A53_2:
+	case PART_ATTR_DEST_CPU_A53_3:
+		if (bf->flags & (1ULL << BIF_FLAG_AARCH32))
+			parthdr.attributes |= PART_ATTR_A53_EXEC_AARCH32;
+	}
+
+	csum = zynqmp_csum(&parthdr, &parthdr.checksum);
+	parthdr.checksum = cpu_to_le32(csum);
+
+	r = bif_add_blob(&parthdr, sizeof(parthdr), &parthdr_offset);
+	if (r)
+		return r;
+
+	/* Add image header table if not there yet */
+	if (!bif_output.imgheader) {
+		size_t imghdr_off = 0;
+		struct image_header_table imghdr = {
+			.version = cpu_to_le32(0x01020000),
+			.nr_parts = 0,
+		};
+
+		r = bif_add_blob(&imghdr, sizeof(imghdr), &imghdr_off);
+		if (r)
+			return r;
+
+		bif_output.header->image_header_table_offset = imghdr_off;
+		bif_output.imgheader = (void *)(bif_output.data + imghdr_off);
+	}
+
+	bif_output.imgheader->nr_parts = cpu_to_le32(le32_to_cpu(
+		bif_output.imgheader->nr_parts) + 1);
+
+	/* Link to this partition header */
+	if (bif_output.last_part) {
+		bif_output.last_part->next_partition_offset =
+			cpu_to_le32(parthdr_offset / 4);
+
+		/* Recalc checksum of last_part */
+		csum = zynqmp_csum(bif_output.last_part,
+				   &bif_output.last_part->checksum);
+		bif_output.last_part->checksum = cpu_to_le32(csum);
+	} else {
+		bif_output.imgheader->partition_header_offset =
+			cpu_to_le32(parthdr_offset / 4);
+	}
+	bif_output.last_part = (void *)(bif_output.data + parthdr_offset);
+
+	if (bf->flags & (1ULL << BIF_FLAG_BOOTLOADER)) {
+		bif_output.header->image_load = cpu_to_le32(bf->load);
+		if (!bif_output.header->image_offset)
+			bif_output.header->image_offset =
+				cpu_to_le32(bf->offset);
+		bif_output.header->image_size = cpu_to_le32(len_padded);
+		bif_output.header->image_stored_size = cpu_to_le32(len_padded);
+
+		bif_output.header->image_attributes &= ~HEADER_CPU_SELECT_MASK;
+		switch (bf->dest_cpu) {
+		default:
+		case PART_ATTR_DEST_CPU_A53_0:
+			if (bf->flags & BIF_FLAG_AARCH32)
+				bif_output.header->image_attributes |=
+					HEADER_CPU_SELECT_A53_32BIT;
+			else
+				bif_output.header->image_attributes |=
+					HEADER_CPU_SELECT_A53_64BIT;
+			break;
+		case PART_ATTR_DEST_CPU_R5_0:
+			bif_output.header->image_attributes |=
+				HEADER_CPU_SELECT_R5_SINGLE;
+			break;
+		case PART_ATTR_DEST_CPU_R5_L:
+			bif_output.header->image_attributes |=
+				HEADER_CPU_SELECT_R5_DUAL;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/* Add .bit bitstream */
+static int bif_add_bit(struct bif_entry *bf)
+{
+	char *bit = read_full_file(bf->filename, NULL);
+	char *bitbin;
+	uint8_t initial_header[] = { 0x00, 0x09, 0x0f, 0xf0, 0x0f, 0xf0, 0x0f,
+				     0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x01, 0x61 };
+	uint16_t len;
+	uint32_t bitlen;
+	int i;
+
+	if (!bit)
+		return -1;
+
+	/* Skip initial header */
+	if (memcmp(bit, initial_header, sizeof(initial_header)))
+		return -1;
+
+	bit += sizeof(initial_header);
+
+	/* Design name */
+	len = be16_to_cpu(*(uint16_t *)bit);
+	bit += sizeof(uint16_t);
+	debug("Design: %s\n", bit);
+	bit += len;
+
+	/* Device identifier */
+	if (*bit != 'b')
+		return -1;
+	bit++;
+	len = be16_to_cpu(*(uint16_t *)bit);
+	bit += sizeof(uint16_t);
+	debug("Device: %s\n", bit);
+	bit += len;
+
+	/* Date */
+	if (*bit != 'c')
+		return -1;
+	bit++;
+	len = be16_to_cpu(*(uint16_t *)bit);
+	bit += sizeof(uint16_t);
+	debug("Date: %s\n", bit);
+	bit += len;
+
+	/* Time */
+	if (*bit != 'd')
+		return -1;
+	bit++;
+	len = be16_to_cpu(*(uint16_t *)bit);
+	bit += sizeof(uint16_t);
+	debug("Time: %s\n", bit);
+	bit += len;
+
+	/* Bitstream length */
+	if (*bit != 'e')
+		return -1;
+	bit++;
+	bitlen = be32_to_cpu(*(uint32_t *)bit);
+	bit += sizeof(uint32_t);
+	bitbin = bit;
+
+	debug("Bitstream Length: 0x%x\n", bitlen);
+	for (i = 0; i < bitlen; i += sizeof(uint32_t)) {
+		uint32_t *bitbin32 = (uint32_t *)&bitbin[i];
+		*bitbin32 = __swab32(*bitbin32);
+	}
+
+	if (!bf->dest_dev)
+		bf->dest_dev = PART_ATTR_DEST_DEVICE_PL;
+
+	bf->load = 0xffffffff;
+	bf->entry = 0;
+
+	bf->flags |= 1ULL << BIF_FLAG_BIT_FILE;
+	return bif_add_part(bf, bit, bitlen);
+}
+
+/* Add .bin bitstream */
+static int bif_add_bin(struct bif_entry *bf)
+{
+	size_t size;
+	char *bin = read_full_file(bf->filename, &size);
+
+	if (!bf->dest_dev)
+		bf->dest_dev = PART_ATTR_DEST_DEVICE_PS;
+
+	bf->flags |= 1ULL << BIF_FLAG_BIN_FILE;
+	return bif_add_part(bf, bin, size);
+}
+
+/* Add elf file */
+static char *elf2flat64(char *elf, size_t *flat_size, size_t *load_addr)
+{
+	Elf64_Ehdr *ehdr;
+	Elf64_Shdr *shdr;
+	size_t min_addr = -1, max_addr = 0;
+	char *flat;
+	int i;
+
+	ehdr = (void *)elf;
+	shdr = (void *)(elf + le64_to_cpu(ehdr->e_shoff));
+
+	/* Look for smallest / biggest address */
+	for (i = 0; i < le64_to_cpu(ehdr->e_shnum); i++, shdr++) {
+		if (!shdr->sh_size || !shdr->sh_addr ||
+		    !(shdr->sh_flags & SHF_ALLOC) ||
+		    (shdr->sh_type == SHT_NOBITS))
+			continue;
+
+		if (le64_to_cpu(shdr->sh_addr) < min_addr)
+			min_addr = le64_to_cpu(shdr->sh_addr);
+		if ((le64_to_cpu(shdr->sh_addr) + le64_to_cpu(shdr->sh_size)) >
+			max_addr)
+			max_addr = le64_to_cpu(shdr->sh_addr) +
+				   le64_to_cpu(shdr->sh_size);
+	}
+
+	*load_addr = min_addr;
+	*flat_size = max_addr - min_addr;
+	flat = calloc(1, *flat_size);
+	if (!flat)
+		return NULL;
+
+	shdr = (void *)(elf + le64_to_cpu(ehdr->e_shoff));
+	for (i = 0; i < le64_to_cpu(ehdr->e_shnum); i++, shdr++) {
+		char *dst = flat + le64_to_cpu(shdr->sh_addr) - min_addr;
+		char *src = elf + le64_to_cpu(shdr->sh_offset);
+
+		if (!shdr->sh_size || !shdr->sh_addr ||
+		    !(shdr->sh_flags & SHF_ALLOC))
+			continue;
+
+		if (shdr->sh_type != SHT_NOBITS)
+			memcpy(dst, src, le64_to_cpu(shdr->sh_size));
+	}
+
+	return flat;
+}
+
+static char *elf2flat32(char *elf, size_t *flat_size, size_t *load_addr)
+{
+	Elf32_Ehdr *ehdr;
+	Elf32_Shdr *shdr;
+	size_t min_addr = -1, max_addr = 0;
+	char *flat;
+	int i;
+
+	ehdr = (void *)elf;
+	shdr = (void *)(elf + le32_to_cpu(ehdr->e_shoff));
+
+	/* Look for smallest / biggest address */
+	for (i = 0; i < le32_to_cpu(ehdr->e_shnum); i++, shdr++) {
+		if (!shdr->sh_size || !shdr->sh_addr ||
+		    !(shdr->sh_flags & SHF_ALLOC) ||
+		    (shdr->sh_type == SHT_NOBITS))
+			continue;
+
+		if (le32_to_cpu(shdr->sh_addr) < min_addr)
+			min_addr = le32_to_cpu(shdr->sh_addr);
+		if ((le32_to_cpu(shdr->sh_addr) + le32_to_cpu(shdr->sh_size)) >
+			max_addr)
+			max_addr = le32_to_cpu(shdr->sh_addr) +
+				   le32_to_cpu(shdr->sh_size);
+	}
+
+	*load_addr = min_addr;
+	*flat_size = max_addr - min_addr;
+	flat = calloc(1, *flat_size);
+	if (!flat)
+		return NULL;
+
+	shdr = (void *)(elf + le32_to_cpu(ehdr->e_shoff));
+	for (i = 0; i < le32_to_cpu(ehdr->e_shnum); i++, shdr++) {
+		char *dst = flat + le32_to_cpu(shdr->sh_addr) - min_addr;
+		char *src = elf + le32_to_cpu(shdr->sh_offset);
+
+		if (!shdr->sh_size || !shdr->sh_addr ||
+		    !(shdr->sh_flags & SHF_ALLOC))
+			continue;
+
+		if (shdr->sh_type != SHT_NOBITS)
+			memcpy(dst, src, le32_to_cpu(shdr->sh_size));
+	}
+
+	return flat;
+}
+
+static int bif_add_elf(struct bif_entry *bf)
+{
+	size_t size;
+	size_t elf_size;
+	char *elf;
+	char *flat;
+	size_t load_addr;
+	Elf32_Ehdr *ehdr32;
+	Elf64_Ehdr *ehdr64;
+
+	elf = read_full_file(bf->filename, &elf_size);
+	if (!elf)
+		return -1;
+
+	ehdr32 = (void *)elf;
+	ehdr64 = (void *)elf;
+
+	switch (ehdr32->e_ident[EI_CLASS]) {
+	case ELFCLASS32:
+		flat = elf2flat32(elf, &size, &load_addr);
+		bf->entry = le32_to_cpu(ehdr32->e_entry);
+		bf->flags |= 1ULL << BIF_FLAG_AARCH32;
+		break;
+	case ELFCLASS64:
+		flat = elf2flat64(elf, &size, &load_addr);
+		bf->entry = le64_to_cpu(ehdr64->e_entry);
+		break;
+	default:
+		printf("Unknown ELF class: %d\n", ehdr32->e_ident[EI_CLASS]);
+		return -1;
+	}
+
+	if (!flat)
+		return -1;
+
+	bf->load = load_addr;
+	if (!bf->dest_dev)
+		bf->dest_dev = PART_ATTR_DEST_DEVICE_PS;
+
+	bf->flags |= 1ULL << BIF_FLAG_ELF_FILE;
+	return bif_add_part(bf, flat, size);
+}
+
+static const struct bif_file_type bif_file_types[] = {
+	{
+		.name = "bitstream (.bit)",
+		.header = 0x00090ff0,
+		.add = bif_add_bit,
+	},
+
+	{
+		.name = "ELF",
+		.header = 0x7f454c46,
+		.add = bif_add_elf,
+	},
+
+	/* Anything else is a .bin file */
+	{
+		.name = ".bin",
+		.add = bif_add_bin,
+	},
+};
+
+static int bif_fsbl_config(struct bif_entry *fsbl_config,
+			   struct bif_entry *entries, int nr_entries)
+{
+	int i;
+	int config_set = 0;
+	struct {
+		const char *name;
+		uint64_t flags;
+		uint64_t dest_cpu;
+	} configs[] = {
+		{ .name = "a5x_x64", .dest_cpu = PART_ATTR_DEST_CPU_A53_0 },
+		{ .name = "a53_x64", .dest_cpu = PART_ATTR_DEST_CPU_A53_0 },
+		{ .name = "a5x_x32", .dest_cpu = PART_ATTR_DEST_CPU_A53_0,
+				     .flags = 1ULL << BIF_FLAG_AARCH32 },
+		{ .name = "a53_x32", .dest_cpu = PART_ATTR_DEST_CPU_A53_0,
+				     .flags = 1ULL << BIF_FLAG_AARCH32 },
+		{ .name = "r5_single", .dest_cpu = PART_ATTR_DEST_CPU_R5_0 },
+		{ .name = "r5_dual", .dest_cpu = PART_ATTR_DEST_CPU_R5_L },
+	};
+
+	/* Set target CPU of bootloader entry */
+	for (i = 0; i < nr_entries; i++) {
+		struct bif_entry *b = &entries[i];
+		const char *config_attr = fsbl_config->filename;
+		int j;
+
+		if (!(b->flags & (1ULL << BIF_FLAG_BOOTLOADER)))
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(configs); j++) {
+			if (!strncmp(config_attr, configs[j].name,
+				     strlen(configs[j].name))) {
+				b->dest_cpu = configs[j].dest_cpu;
+				b->flags |= configs[j].flags;
+				config_set = 1;
+			}
+		}
+
+		if (!config_set) {
+			printf("ERROR: Unsupported fsbl_config: %s\n",
+			       config_attr);
+			return -1;
+		}
+	}
+
+	if (!config_set) {
+		printf("ERROR: fsbl_config w/o bootloader\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static const struct bif_flags *find_flag(char *str)
+{
+	const struct bif_flags *bf;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bif_flags); i++) {
+		bf = &bif_flags[i];
+		if (!strncmp(bf->name, str, strlen(bf->name)))
+			return bf;
+	}
+
+	printf("ERROR: Flag '%s' not found\n", str);
+
+	return NULL;
+}
+
+static int bif_open_file(struct bif_entry *entry)
+{
+	int fd = open(entry->filename, O_RDONLY);
+
+	if (fd < 0)
+		printf("Error opening file %s\n", entry->filename);
+
+	return fd;
+}
+
+static const struct bif_file_type *get_file_type(struct bif_entry *entry)
+{
+	int fd = bif_open_file(entry);
+	uint32_t header;
+	int i;
+
+	if (fd < 0)
+		return NULL;
+
+	if (read(fd, &header, sizeof(header)) != sizeof(header)) {
+		printf("Error reading file %s", entry->filename);
+		return NULL;
+	}
+
+	close(fd);
+
+	for (i = 0; i < ARRAY_SIZE(bif_file_types); i++) {
+		const struct bif_file_type *type = &bif_file_types[i];
+
+		if (!type->header)
+			return type;
+		if (type->header == be32_to_cpu(header))
+			return type;
+	}
+
+	return NULL;
+}
+
+#define NEXT_CHAR(str, chr) ({		\
+	char *_n = strchr(str, chr);	\
+	if (!_n)			\
+		goto err;		\
+	_n;				\
+})
+
+static char *skip_whitespace(char *str)
+{
+	while (*str == ' ' || *str == '\t')
+		str++;
+
+	return str;
+}
+
+int zynqmpbif_copy_image(int outfd, struct image_tool_params *mparams)
+{
+	char *bif, *bifp, *bifpn;
+	char *line;
+	struct bif_entry entries[32] = { { 0 } };
+	int nr_entries = 0;
+	struct bif_entry *entry = entries;
+	size_t len;
+	int i;
+	uint32_t csum;
+	int bldr = -1;
+
+	bif_init();
+
+	/* Read .bif input file */
+	bif = read_full_file(mparams->datafile, NULL);
+	if (!bif)
+		goto err;
+
+	/* Interpret .bif file */
+	bifp = bif;
+
+	/* A bif description starts with a { section */
+	bifp = NEXT_CHAR(bifp, '{') + 1;
+
+	/* Read every line */
+	while (1) {
+		bifpn = NEXT_CHAR(bifp, '\n');
+
+		if (bifpn[-1] == '\r')
+			bifpn[-1] = '\0';
+
+		*bifpn = '\0';
+		bifpn++;
+		line = bifp;
+
+		line = skip_whitespace(line);
+
+		/* Attributes? */
+		if (*line == '[') {
+			line++;
+			while (1) {
+				const struct bif_flags *bf;
+
+				line = skip_whitespace(line);
+				bf = find_flag(line);
+				if (!bf)
+					goto err;
+
+				line += strlen(bf->name);
+				if (bf->parse)
+					line = bf->parse(line, entry);
+				else
+					entry->flags |= 1ULL << bf->flag;
+
+				if (!line)
+					goto err;
+
+				/* Go to next attribute or quit */
+				if (*line == ']') {
+					line++;
+					break;
+				}
+				if (*line == ',')
+					line++;
+			}
+		}
+
+		/* End of image description */
+		if (*line == '}')
+			break;
+
+		if (*line) {
+			line = skip_whitespace(line);
+			entry->filename = line;
+			nr_entries++;
+			entry++;
+		}
+
+		/* Use next line */
+		bifp = bifpn;
+	}
+
+	for (i = 0; i < nr_entries; i++) {
+		debug("Entry flags=%#lx name=%s\n", entries[i].flags,
+		      entries[i].filename);
+	}
+
+	/*
+	 * Some entries are actually configuration option for other ones,
+	 * let's apply them in an intermediate step.
+	 */
+	for (i = 0; i < nr_entries; i++) {
+		struct bif_entry *entry = &entries[i];
+
+		if (entry->flags & (1ULL << BIF_FLAG_FSBL_CONFIG))
+			if (bif_fsbl_config(entry, entries, nr_entries))
+				goto err;
+	}
+
+	/* Make sure PMUFW comes before bootloader */
+	for (i = 0; i < nr_entries; i++) {
+		struct bif_entry *entry = &entries[i];
+
+		if (entry->flags & (1ULL << BIF_FLAG_BOOTLOADER))
+			bldr = i;
+		if (entry->flags & (1ULL << BIF_FLAG_PMUFW_IMAGE)) {
+			if (bldr >= 0) {
+				struct bif_entry tmp = *entry;
+
+				*entry = entries[bldr];
+				entries[bldr] = tmp;
+			}
+		}
+	}
+
+	for (i = 0; i < nr_entries; i++) {
+		struct bif_entry *entry = &entries[i];
+		const struct bif_file_type *type;
+		int r;
+
+		if (entry->flags & (1ULL << BIF_FLAG_FSBL_CONFIG))
+			continue;
+
+		type = get_file_type(entry);
+		if (!type)
+			goto err;
+
+		debug("type=%s file=%s\n", type->name, entry->filename);
+		r = type->add(entry);
+		if (r)
+			goto err;
+	}
+
+	/* Calculate checksums */
+	csum = zynqmp_csum(&bif_output.header->width_detection,
+			   &bif_output.header->checksum);
+	bif_output.header->checksum = cpu_to_le32(csum);
+
+	if (bif_output.imgheader) {
+		csum = zynqmp_csum(bif_output.imgheader,
+				   &bif_output.imgheader->checksum);
+		bif_output.imgheader->checksum = cpu_to_le32(csum);
+	}
+
+	/* Write headers and components */
+	if (lseek(outfd, 0, SEEK_SET) != 0)
+		goto err;
+
+	len = bif_output.data_len;
+	bifp = bif_output.data;
+	while (len) {
+		int r;
+
+		r = write(outfd, bifp, len);
+		if (r < 0)
+			goto err;
+		len -= r;
+		bifp += r;
+	}
+
+	return 0;
+
+err:
+	fprintf(stderr, "Error: Failed to create image.\n");
+	return -1;
+}
+
+/* Needs to be stubbed out so we can print after creation */
+static void zynqmpbif_set_header(void *ptr, struct stat *sbuf, int ifd,
+				 struct image_tool_params *params)
+{
+}
+
+static struct zynqmp_header zynqmpimage_header;
+
+U_BOOT_IMAGE_TYPE(
+	zynqmpbif,
+	"Xilinx ZynqMP Boot Image support (bif)",
+	sizeof(struct zynqmp_header),
+	(void *)&zynqmpimage_header,
+	zynqmpbif_check_params,
+	NULL,
+	zynqmpimage_print_header,
+	zynqmpbif_set_header,
+	NULL,
+	zynqmpbif_check_image_types,
+	NULL,
+	NULL
+);
diff --git a/tools/u-boot-tools/zynqmpbif.o b/tools/u-boot-tools/zynqmpbif.o
new file mode 100644
index 0000000000000000000000000000000000000000..6f4586e11a11d3c7821dca697294295b4705918a
Binary files /dev/null and b/tools/u-boot-tools/zynqmpbif.o differ
diff --git a/tools/u-boot-tools/zynqmpimage.c b/tools/u-boot-tools/zynqmpimage.c
new file mode 100644
index 0000000000000000000000000000000000000000..19b2f02ff150bf751316edff33004590a9e1d2fa
--- /dev/null
+++ b/tools/u-boot-tools/zynqmpimage.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Michal Simek <michals@xilinx.com>
+ * Copyright (C) 2015 Nathan Rossi <nathan@nathanrossi.com>
+ *
+ * The following Boot Header format/structures and values are defined in the
+ * following documents:
+ *   * ug1085 ZynqMP TRM doc v1.4 (Chapter 11, Table 11-4)
+ *   * ug1137 ZynqMP Software Developer Guide v6.0 (Chapter 16)
+ *
+ * Expected Header Size = 0x9C0
+ * Forced as 'little' endian, 32-bit words
+ *
+ *  0x  0 - Interrupt table (8 words)
+ *  ...     (Default value = 0xeafffffe)
+ *  0x 1f
+ *  0x 20 - Width detection
+ *         * DEFAULT_WIDTHDETECTION    0xaa995566
+ *  0x 24 - Image identifier
+ *         * DEFAULT_IMAGEIDENTIFIER   0x584c4e58
+ *  0x 28 - Encryption
+ *         * 0x00000000 - None
+ *         * 0xa5c3c5a3 - eFuse
+ *         * 0xa5c3c5a7 - obfuscated key in eFUSE
+ *         * 0x3a5c3c5a - bbRam
+ *         * 0xa35c7ca5 - obfuscated key in boot header
+ *  0x 2C - Image load
+ *  0x 30 - Image offset
+ *  0x 34 - PFW image length
+ *  0x 38 - Total PFW image length
+ *  0x 3C - Image length
+ *  0x 40 - Total image length
+ *  0x 44 - Image attributes
+ *  0x 48 - Header checksum
+ *  0x 4c - Obfuscated key
+ *  ...
+ *  0x 68
+ *  0x 6c - Reserved
+ *  0x 70 - User defined
+ *  ...
+ *  0x 9c
+ *  0x a0 - Secure header initialization vector
+ *  ...
+ *  0x a8
+ *  0x ac - Obfuscated key initialization vector
+ *  ...
+ *  0x b4
+ *  0x b8 - Register Initialization, 511 Address and Data word pairs
+ *         * List is terminated with an address of 0xffffffff or
+ *  ...    * at the max number of entries
+ *  0x8b4
+ *  0x8b8 - Reserved
+ *  ...
+ *  0x9bf
+ *  0x9c0 - Data/Image starts here or above
+ */
+
+#include "imagetool.h"
+#include "mkimage.h"
+#include "zynqmpimage.h"
+#include <image.h>
+
+static struct zynqmp_header zynqmpimage_header;
+static void *dynamic_header;
+static FILE *fpmu;
+
+static uint32_t zynqmpimage_checksum(struct zynqmp_header *ptr)
+{
+	uint32_t checksum = 0;
+
+	if (ptr == NULL)
+		return 0;
+
+	checksum += le32_to_cpu(ptr->width_detection);
+	checksum += le32_to_cpu(ptr->image_identifier);
+	checksum += le32_to_cpu(ptr->encryption);
+	checksum += le32_to_cpu(ptr->image_load);
+	checksum += le32_to_cpu(ptr->image_offset);
+	checksum += le32_to_cpu(ptr->pfw_image_length);
+	checksum += le32_to_cpu(ptr->total_pfw_image_length);
+	checksum += le32_to_cpu(ptr->image_size);
+	checksum += le32_to_cpu(ptr->image_stored_size);
+	checksum += le32_to_cpu(ptr->image_attributes);
+	checksum = ~checksum;
+
+	return cpu_to_le32(checksum);
+}
+
+void zynqmpimage_default_header(struct zynqmp_header *ptr)
+{
+	int i;
+
+	if (ptr == NULL)
+		return;
+
+	ptr->width_detection = HEADER_WIDTHDETECTION;
+	ptr->image_attributes = HEADER_CPU_SELECT_A53_64BIT;
+	ptr->image_identifier = HEADER_IMAGEIDENTIFIER;
+	ptr->encryption = cpu_to_le32(ENCRYPTION_NONE);
+
+	/* Setup not-supported/constant/reserved fields */
+	for (i = 0; i < HEADER_INTERRUPT_VECTORS; i++)
+		ptr->interrupt_vectors[i] = HEADER_INTERRUPT_DEFAULT;
+
+	for (i = 0; i < HEADER_REGINITS; i++) {
+		ptr->register_init[i].address = HEADER_REGINIT_NULL;
+		ptr->register_init[i].data = 0;
+	}
+
+	/*
+	 * Certain reserved fields are required to be set to 0, ensure they are
+	 * set as such.
+	 */
+	ptr->pfw_image_length = 0x0;
+	ptr->total_pfw_image_length = 0x0;
+}
+
+/* mkimage glue functions */
+static int zynqmpimage_verify_header(unsigned char *ptr, int image_size,
+		struct image_tool_params *params)
+{
+	struct zynqmp_header *zynqhdr = (struct zynqmp_header *)ptr;
+
+	if (image_size < sizeof(struct zynqmp_header))
+		return -1;
+
+	if (zynqhdr->width_detection != HEADER_WIDTHDETECTION)
+		return -1;
+	if (zynqhdr->image_identifier != HEADER_IMAGEIDENTIFIER)
+		return -1;
+
+	if (zynqmpimage_checksum(zynqhdr) != zynqhdr->checksum)
+		return -1;
+
+	return 0;
+}
+
+static void print_partition(const void *ptr, const struct partition_header *ph)
+{
+	uint32_t attr = le32_to_cpu(ph->attributes);
+	unsigned long len = le32_to_cpu(ph->len) * 4;
+	const char *part_owner;
+	const char *dest_devs[0x8] = {
+		"none", "PS", "PL", "PMU", "unknown", "unknown", "unknown",
+		"unknown"
+	};
+
+	switch (attr & PART_ATTR_PART_OWNER_MASK) {
+	case PART_ATTR_PART_OWNER_FSBL:
+		part_owner = "FSBL";
+		break;
+	case PART_ATTR_PART_OWNER_UBOOT:
+		part_owner = "U-Boot";
+		break;
+	default:
+		part_owner = "Unknown";
+		break;
+	}
+
+	printf("%s payload on CPU %s (%s):\n", part_owner,
+	       dest_cpus[(attr & PART_ATTR_DEST_CPU_MASK) >> 8],
+	       dest_devs[(attr & PART_ATTR_DEST_DEVICE_MASK) >> 4]);
+
+	printf("    Offset     : 0x%08x\n", le32_to_cpu(ph->offset) * 4);
+	printf("    Size       : %lu (0x%lx) bytes\n", len, len);
+	printf("    Load       : 0x%08llx",
+	       (unsigned long long)le64_to_cpu(ph->load_address));
+	if (ph->load_address != ph->entry_point)
+		printf(" (entry=0x%08llx)\n",
+		       (unsigned long long)le64_to_cpu(ph->entry_point));
+	else
+		printf("\n");
+	printf("    Attributes : ");
+
+	if (attr & PART_ATTR_VEC_LOCATION)
+		printf("vec ");
+
+	if (attr & PART_ATTR_ENCRYPTED)
+		printf("encrypted ");
+
+	switch (attr & PART_ATTR_CHECKSUM_MASK) {
+	case PART_ATTR_CHECKSUM_MD5:
+		printf("md5 ");
+		break;
+	case PART_ATTR_CHECKSUM_SHA2:
+		printf("sha2 ");
+		break;
+	case PART_ATTR_CHECKSUM_SHA3:
+		printf("sha3 ");
+		break;
+	}
+
+	if (attr & PART_ATTR_BIG_ENDIAN)
+		printf("BigEndian ");
+
+	if (attr & PART_ATTR_RSA_SIG)
+		printf("RSA ");
+
+	if (attr & PART_ATTR_A53_EXEC_AARCH32)
+		printf("AArch32 ");
+
+	if (attr & PART_ATTR_TARGET_EL_MASK)
+		printf("EL%d ", (attr & PART_ATTR_TARGET_EL_MASK) >> 1);
+
+	if (attr & PART_ATTR_TZ_SECURE)
+		printf("secure ");
+	printf("\n");
+
+	printf("    Checksum   : 0x%08x\n", le32_to_cpu(ph->checksum));
+}
+
+void zynqmpimage_print_header(const void *ptr)
+{
+	struct zynqmp_header *zynqhdr = (struct zynqmp_header *)ptr;
+	int i;
+
+	printf("Image Type   : Xilinx ZynqMP Boot Image support\n");
+	printf("Image Offset : 0x%08x\n", le32_to_cpu(zynqhdr->image_offset));
+	printf("Image Size   : %lu bytes (%lu bytes packed)\n",
+	       (unsigned long)le32_to_cpu(zynqhdr->image_size),
+	       (unsigned long)le32_to_cpu(zynqhdr->image_stored_size));
+
+	if (zynqhdr->pfw_image_length)
+		printf("PMUFW Size   : %lu bytes (%lu bytes packed)\n",
+		       (unsigned long)le32_to_cpu(zynqhdr->pfw_image_length),
+		       (unsigned long)le32_to_cpu(
+				zynqhdr->total_pfw_image_length));
+
+	printf("Image Load   : 0x%08x\n", le32_to_cpu(zynqhdr->image_load));
+	printf("Checksum     : 0x%08x\n", le32_to_cpu(zynqhdr->checksum));
+
+	for (i = 0; i < HEADER_INTERRUPT_VECTORS; i++) {
+		if (zynqhdr->interrupt_vectors[i] == HEADER_INTERRUPT_DEFAULT)
+			continue;
+
+		printf("Modified Interrupt Vector Address [%d]: 0x%08x\n", i,
+		       le32_to_cpu(zynqhdr->interrupt_vectors[i]));
+	}
+
+	for (i = 0; i < HEADER_REGINITS; i++) {
+		if (zynqhdr->register_init[i].address == HEADER_REGINIT_NULL)
+			break;
+
+		if (i == 0)
+			printf("Custom Register Initialization:\n");
+
+		printf("    @ 0x%08x -> 0x%08x\n",
+		       le32_to_cpu(zynqhdr->register_init[i].address),
+		       le32_to_cpu(zynqhdr->register_init[i].data));
+	}
+
+	if (zynqhdr->image_header_table_offset) {
+		struct image_header_table *iht = (void *)ptr +
+			zynqhdr->image_header_table_offset;
+		struct partition_header *ph;
+		uint32_t ph_offset;
+		uint32_t next;
+		int i;
+
+		ph_offset = le32_to_cpu(iht->partition_header_offset) * 4;
+		ph = (void *)ptr + ph_offset;
+		for (i = 0; i < le32_to_cpu(iht->nr_parts); i++) {
+			next = le32_to_cpu(ph->next_partition_offset) * 4;
+
+			/* Partition 0 is the base image itself */
+			if (i)
+				print_partition(ptr, ph);
+
+			ph = (void *)ptr + next;
+		}
+	}
+
+	free(dynamic_header);
+}
+
+static int zynqmpimage_check_params(struct image_tool_params *params)
+{
+	if (!params)
+		return 0;
+
+	if (params->addr != 0x0) {
+		fprintf(stderr, "Error: Load Address cannot be specified.\n");
+		return -1;
+	}
+
+	/*
+	 * If the entry point is specified ensure it is 64 byte aligned.
+	 */
+	if (params->eflag && (params->ep % 64 != 0)) {
+		fprintf(stderr,
+			"Error: Entry Point must be aligned to a 64-byte boundary.\n");
+		return -1;
+	}
+
+	return !(params->lflag || params->dflag);
+}
+
+static int zynqmpimage_check_image_types(uint8_t type)
+{
+	if (type == IH_TYPE_ZYNQMPIMAGE)
+		return EXIT_SUCCESS;
+	return EXIT_FAILURE;
+}
+
+static uint32_t fsize(FILE *fp)
+{
+	int size, ret, origin;
+
+	origin = ftell(fp);
+	if (origin < 0) {
+		fprintf(stderr, "Incorrect file size\n");
+		fclose(fp);
+		exit(2);
+	}
+
+	ret = fseek(fp, 0L, SEEK_END);
+	if (ret) {
+		fprintf(stderr, "Incorrect file SEEK_END\n");
+		fclose(fp);
+		exit(3);
+	}
+
+	size = ftell(fp);
+	if (size < 0) {
+		fprintf(stderr, "Incorrect file size\n");
+		fclose(fp);
+		exit(4);
+	}
+
+	/* going back */
+	ret = fseek(fp, origin, SEEK_SET);
+	if (ret) {
+		fprintf(stderr, "Incorrect file SEEK_SET to %d\n", origin);
+		fclose(fp);
+		exit(3);
+	}
+
+	return size;
+}
+
+static void zynqmpimage_pmufw(struct zynqmp_header *zynqhdr,
+			      const char *filename)
+{
+	uint32_t size;
+
+	/* Setup PMU fw size */
+	zynqhdr->pfw_image_length = fsize(fpmu);
+	zynqhdr->total_pfw_image_length = zynqhdr->pfw_image_length;
+
+	zynqhdr->image_size -= zynqhdr->pfw_image_length;
+	zynqhdr->image_stored_size -= zynqhdr->total_pfw_image_length;
+
+	/* Read the whole PMUFW to the header */
+	size = fread(&zynqhdr->__reserved4[66], 1,
+		     zynqhdr->pfw_image_length, fpmu);
+	if (size != zynqhdr->pfw_image_length) {
+		fprintf(stderr, "Cannot read PMUFW file: %s\n", filename);
+		fclose(fpmu);
+		exit(1);
+	}
+
+	fclose(fpmu);
+}
+
+static void zynqmpimage_parse_initparams(struct zynqmp_header *zynqhdr,
+	const char *filename)
+{
+	FILE *fp;
+	struct zynqmp_reginit reginit;
+	unsigned int reg_count = 0;
+	int r, err;
+	struct stat path_stat;
+
+	/* Expect a table of register-value pairs, e.g. "0x12345678 0x4321" */
+	fp = fopen(filename, "r");
+	if (!fp) {
+		fprintf(stderr, "Cannot open initparams file: %s\n", filename);
+		exit(1);
+	}
+
+	err = fstat(fileno(fp), &path_stat);
+	if (err) {
+		fclose(fp);
+		return;
+	}
+
+	if (!S_ISREG(path_stat.st_mode)) {
+		fclose(fp);
+		return;
+	}
+
+	do {
+		r = fscanf(fp, "%x %x", &reginit.address, &reginit.data);
+		if (r == 2) {
+			zynqhdr->register_init[reg_count] = reginit;
+			++reg_count;
+		}
+		r = fscanf(fp, "%*[^\n]\n"); /* Skip to next line */
+	} while ((r != EOF) && (reg_count < HEADER_REGINITS));
+	fclose(fp);
+}
+
+static void zynqmpimage_set_header(void *ptr, struct stat *sbuf, int ifd,
+		struct image_tool_params *params)
+{
+	struct zynqmp_header *zynqhdr = (struct zynqmp_header *)ptr;
+	zynqmpimage_default_header(zynqhdr);
+
+	/* place image directly after header */
+	zynqhdr->image_offset =
+		cpu_to_le32((uint32_t)sizeof(struct zynqmp_header));
+	zynqhdr->image_size = cpu_to_le32(params->file_size -
+					  sizeof(struct zynqmp_header));
+	zynqhdr->image_stored_size = zynqhdr->image_size;
+	zynqhdr->image_load = 0xfffc0000;
+	if (params->eflag)
+		zynqhdr->image_load = cpu_to_le32((uint32_t)params->ep);
+
+	/* PMUFW */
+	if (fpmu)
+		zynqmpimage_pmufw(zynqhdr, params->imagename);
+
+	/* User can pass in text file with init list */
+	if (strlen(params->imagename2))
+		zynqmpimage_parse_initparams(zynqhdr, params->imagename2);
+
+	zynqhdr->checksum = zynqmpimage_checksum(zynqhdr);
+}
+
+static int zynqmpimage_vrec_header(struct image_tool_params *params,
+				   struct image_type_params *tparams)
+{
+	struct stat path_stat;
+	char *filename = params->imagename;
+	int err;
+
+	/* Handle static case without PMUFW */
+	tparams->header_size = sizeof(struct zynqmp_header);
+	tparams->hdr = (void *)&zynqmpimage_header;
+
+	/* PMUFW name is passed via params->imagename */
+	if (strlen(filename) == 0)
+		return EXIT_SUCCESS;
+
+	fpmu = fopen(filename, "r");
+	if (!fpmu) {
+		fprintf(stderr, "Cannot open PMUFW file: %s\n", filename);
+		return EXIT_FAILURE;
+	}
+
+	err = fstat(fileno(fpmu), &path_stat);
+	if (err) {
+		fclose(fpmu);
+		fpmu = NULL;
+		return EXIT_FAILURE;
+	}
+
+	if (!S_ISREG(path_stat.st_mode)) {
+		fclose(fpmu);
+		fpmu = NULL;
+		return EXIT_FAILURE;
+	}
+
+	/* Increase header size by PMUFW file size */
+	tparams->header_size += fsize(fpmu);
+
+	/* Allocate buffer with space for PMUFW */
+	dynamic_header = calloc(1, tparams->header_size);
+	tparams->hdr = dynamic_header;
+
+	return EXIT_SUCCESS;
+}
+
+U_BOOT_IMAGE_TYPE(
+	zynqmpimage,
+	"Xilinx ZynqMP Boot Image support",
+	sizeof(struct zynqmp_header),
+	(void *)&zynqmpimage_header,
+	zynqmpimage_check_params,
+	zynqmpimage_verify_header,
+	zynqmpimage_print_header,
+	zynqmpimage_set_header,
+	NULL,
+	zynqmpimage_check_image_types,
+	NULL,
+	zynqmpimage_vrec_header
+);
diff --git a/tools/u-boot-tools/zynqmpimage.h b/tools/u-boot-tools/zynqmpimage.h
new file mode 100644
index 0000000000000000000000000000000000000000..a1db819aa36c2bfa9d0d9ce1f86b6b148e62fa99
--- /dev/null
+++ b/tools/u-boot-tools/zynqmpimage.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Michal Simek <michals@xilinx.com>
+ * Copyright (C) 2015 Nathan Rossi <nathan@nathanrossi.com>
+ *
+ * The following Boot Header format/structures and values are defined in the
+ * following documents:
+ *   * ug1085 ZynqMP TRM doc v1.4 (Chapter 11, Table 11-4)
+ *   * ug1137 ZynqMP Software Developer Guide v6.0 (Chapter 16)
+ */
+
+#ifndef _ZYNQMPIMAGE_H_
+#define _ZYNQMPIMAGE_H_
+
+#include <stdint.h>
+
+#define HEADER_INTERRUPT_DEFAULT (cpu_to_le32(0xeafffffe))
+#define HEADER_REGINIT_NULL (cpu_to_le32(0xffffffff))
+#define HEADER_WIDTHDETECTION (cpu_to_le32(0xaa995566))
+#define HEADER_IMAGEIDENTIFIER (cpu_to_le32(0x584c4e58))
+#define HEADER_CPU_SELECT_MASK		(0x3 << 10)
+#define HEADER_CPU_SELECT_R5_SINGLE	(0x0 << 10)
+#define HEADER_CPU_SELECT_A53_32BIT	(0x1 << 10)
+#define HEADER_CPU_SELECT_A53_64BIT	(0x2 << 10)
+#define HEADER_CPU_SELECT_R5_DUAL	(0x3 << 10)
+
+enum {
+	ENCRYPTION_EFUSE = 0xa5c3c5a3,
+	ENCRYPTION_OEFUSE = 0xa5c3c5a7,
+	ENCRYPTION_BBRAM = 0x3a5c3c5a,
+	ENCRYPTION_OBBRAM = 0xa35c7ca5,
+	ENCRYPTION_NONE = 0x0,
+};
+
+struct zynqmp_reginit {
+	uint32_t address;
+	uint32_t data;
+};
+
+#define HEADER_INTERRUPT_VECTORS	8
+#define HEADER_REGINITS			256
+
+struct image_header_table {
+	uint32_t version;		  /* 0x00 */
+	uint32_t nr_parts;		  /* 0x04 */
+	uint32_t partition_header_offset; /* 0x08, divided by 4 */
+	uint32_t image_header_offset;	  /* 0x0c, divided by 4 */
+	uint32_t auth_certificate_offset; /* 0x10 */
+	uint32_t boot_device;		  /* 0x14 */
+	uint32_t __reserved1[9];	  /* 0x18 - 0x38 */
+	uint32_t checksum;		  /* 0x3c */
+};
+
+#define PART_ATTR_VEC_LOCATION		0x800000
+#define PART_ATTR_BS_BLOCK_SIZE_MASK	0x700000
+#define     PART_ATTR_BS_BLOCK_SIZE_DEFAULT	0x000000
+#define     PART_ATTR_BS_BLOCK_SIZE_8MB		0x400000
+#define PART_ATTR_BIG_ENDIAN		0x040000
+#define PART_ATTR_PART_OWNER_MASK	0x030000
+#define     PART_ATTR_PART_OWNER_FSBL		0x000000
+#define     PART_ATTR_PART_OWNER_UBOOT		0x010000
+#define PART_ATTR_RSA_SIG		0x008000
+#define PART_ATTR_CHECKSUM_MASK		0x007000
+#define    PART_ATTR_CHECKSUM_NONE		0x000000
+#define    PART_ATTR_CHECKSUM_MD5		0x001000
+#define    PART_ATTR_CHECKSUM_SHA2		0x002000
+#define    PART_ATTR_CHECKSUM_SHA3		0x003000
+#define PART_ATTR_DEST_CPU_SHIFT	8
+#define PART_ATTR_DEST_CPU_MASK		0x000f00
+#define    PART_ATTR_DEST_CPU_NONE		0x000000
+#define    PART_ATTR_DEST_CPU_A53_0		0x000100
+#define    PART_ATTR_DEST_CPU_A53_1		0x000200
+#define    PART_ATTR_DEST_CPU_A53_2		0x000300
+#define    PART_ATTR_DEST_CPU_A53_3		0x000400
+#define    PART_ATTR_DEST_CPU_R5_0		0x000500
+#define    PART_ATTR_DEST_CPU_R5_1		0x000600
+#define    PART_ATTR_DEST_CPU_R5_L		0x000700
+#define    PART_ATTR_DEST_CPU_PMU		0x000800
+#define PART_ATTR_ENCRYPTED		0x000080
+#define PART_ATTR_DEST_DEVICE_SHIFT	4
+#define PART_ATTR_DEST_DEVICE_MASK	0x000070
+#define    PART_ATTR_DEST_DEVICE_NONE		0x000000
+#define    PART_ATTR_DEST_DEVICE_PS		0x000010
+#define    PART_ATTR_DEST_DEVICE_PL		0x000020
+#define    PART_ATTR_DEST_DEVICE_PMU		0x000030
+#define    PART_ATTR_DEST_DEVICE_XIP		0x000040
+#define PART_ATTR_A53_EXEC_AARCH32	0x000008
+#define PART_ATTR_TARGET_EL_SHIFT	1
+#define PART_ATTR_TARGET_EL_MASK	0x000006
+#define PART_ATTR_TZ_SECURE		0x000001
+
+static const char *dest_cpus[0x10] = {
+	"none", "a5x-0", "a5x-1", "a5x-2", "a5x-3", "r5-0", "r5-1",
+	"r5-lockstep", "pmu", "unknown", "unknown", "unknown", "unknown",
+	"unknown", "unknown", "unknown"
+};
+
+struct partition_header {
+	uint32_t len_enc;		  /* 0x00, divided by 4 */
+	uint32_t len_unenc;		  /* 0x04, divided by 4 */
+	uint32_t len;			  /* 0x08, divided by 4 */
+	uint32_t next_partition_offset;   /* 0x0c */
+	uint64_t entry_point;		  /* 0x10 */
+	uint64_t load_address;		  /* 0x18 */
+	uint32_t offset;		  /* 0x20, divided by 4 */
+	uint32_t attributes;		  /* 0x24 */
+	uint32_t __reserved1;		  /* 0x28 */
+	uint32_t checksum_offset;	  /* 0x2c, divided by 4 */
+	uint32_t __reserved2;		  /* 0x30 */
+	uint32_t auth_certificate_offset; /* 0x34 */
+	uint32_t __reserved3;		  /* 0x38 */
+	uint32_t checksum;		  /* 0x3c */
+};
+
+struct zynqmp_header {
+	uint32_t interrupt_vectors[HEADER_INTERRUPT_VECTORS]; /* 0x0 */
+	uint32_t width_detection; /* 0x20 */
+	uint32_t image_identifier; /* 0x24 */
+	uint32_t encryption; /* 0x28 */
+	uint32_t image_load; /* 0x2c */
+	uint32_t image_offset; /* 0x30 */
+	uint32_t pfw_image_length; /* 0x34 */
+	uint32_t total_pfw_image_length; /* 0x38 */
+	uint32_t image_size; /* 0x3c */
+	uint32_t image_stored_size; /* 0x40 */
+	uint32_t image_attributes; /* 0x44 */
+	uint32_t checksum; /* 0x48 */
+	uint32_t __reserved1[19]; /* 0x4c */
+	uint32_t image_header_table_offset; /* 0x98 */
+	uint32_t __reserved2[7]; /* 0x9c */
+	struct zynqmp_reginit register_init[HEADER_REGINITS]; /* 0xb8 */
+	uint32_t __reserved4[66]; /* 0x9c0 */
+};
+
+void zynqmpimage_default_header(struct zynqmp_header *ptr);
+void zynqmpimage_print_header(const void *ptr);
+
+#endif /* _ZYNQMPIMAGE_H_ */
diff --git a/tools/u-boot-tools/zynqmpimage.o b/tools/u-boot-tools/zynqmpimage.o
new file mode 100644
index 0000000000000000000000000000000000000000..155c339aceef16a514c388355f6b9f955c5e93f8
Binary files /dev/null and b/tools/u-boot-tools/zynqmpimage.o differ