Re: [PATCH 30/78] ARM: aarch64: Add relocation support

2018-03-16 Thread Andrey Smirnov
On Fri, Mar 16, 2018 at 5:53 AM, Sascha Hauer  wrote:
> This adds aarch64 support for relocating binaries linked with -pie.
>
> Support is integrated into the already exisiting
> relocate_to_current_adr() function which is now used for both arm32
> and aarch64.
>
> Signed-off-by: Sascha Hauer 


Sascha:

Two small suggestions w.r.t. this patch:

 - I'd consider changing the code of relocate_to_current_adr() such
that AARCH64 specific codepaths are not taken on ARM32 (via IS_ENABLED
check or something similar)

 - I've always wanted to fix the original code to use Elf32_rel type
instead of magic hard-coded offsets, so depending on your
willingness/time-budget, maybe now would be a good time to do that as
well as use Elf64_rela for AARCH64?

Thanks,
Andrey Smirnov

> ---
>  arch/arm/cpu/common.c| 38 ---
>  arch/arm/cpu/setupc_64.S | 58 
> 
>  common/Kconfig   |  2 +-
>  3 files changed, 89 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
> index 3766116d97..c317e502d0 100644
> --- a/arch/arm/cpu/common.c
> +++ b/arch/arm/cpu/common.c
> @@ -24,39 +24,61 @@
>  #include 
>  #include 
>  #include 
> +#include 
> +
> +#define R_ARM_RELATIVE 23
> +#define R_AARCH64_RELATIVE 1027
>
>  /*
>   * relocate binary to the currently running address
>   */
>  void relocate_to_current_adr(void)
>  {
> -   unsigned long offset;
> +   unsigned long offset, offset_var;
> unsigned long *dstart, *dend, *dynsym, *dynend;
>
> /* Get offset between linked address and runtime address */
> offset = get_runtime_offset();
> +   offset_var = global_variable_offset();
>
> -   dstart = (void *)__rel_dyn_start + offset;
> -   dend = (void *)__rel_dyn_end + offset;
> +   dstart = (void *)__rel_dyn_start + offset_var;
> +   dend = (void *)__rel_dyn_end + offset_var;
>
> -   dynsym = (void *)__dynsym_start + offset;
> -   dynend = (void *)__dynsym_end + offset;
> +   dynsym = (void *)__dynsym_start + offset_var;
> +   dynend = (void *)__dynsym_end + offset_var;
>
> while (dstart < dend) {
> unsigned long *fixup = (unsigned long *)(*dstart + offset);
> unsigned long type = *(dstart + 1);
> +   int add;
> +
> +   if (ELF64_R_TYPE(type) == R_AARCH64_RELATIVE) {
> +   unsigned long addend = *(dstart + 2);
>
> -   if ((type & 0xff) == 0x17) {
> +   *fixup = addend + offset;
> +
> +   add = 3;
> +   } else if (ELF32_R_TYPE(type) == R_ARM_RELATIVE) {
> *fixup = *fixup + offset;
> -   } else {
> +
> +   add = 2;
> +   } else if (ELF32_R_TYPE(type) == R_ARM_ABS32) {
> int index = type >> 8;
> unsigned long r = dynsym[index * 4 + 1];
>
> *fixup = *fixup + r + offset;
> +
> +   add = 2;
> +   } else {
> +   putc_ll('>');
> +   puthex_ll(type);
> +   putc_ll('\n');
> +   /* We're doomed */
> +   panic(NULL);
> }
>
> *dstart += offset;
> -   dstart += 2;
> +   dstart += add;
> }
>
> memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
> diff --git a/arch/arm/cpu/setupc_64.S b/arch/arm/cpu/setupc_64.S
> index 3515854784..88c7899205 100644
> --- a/arch/arm/cpu/setupc_64.S
> +++ b/arch/arm/cpu/setupc_64.S
> @@ -16,3 +16,61 @@ ENTRY(setup_c)
> mov x30, x15
> ret
>  ENDPROC(setup_c)
> +
> +/*
> + * void relocate_to_adr(unsigned long targetadr)
> + *
> + * Copy binary to targetadr, relocate code and continue
> + * executing at new address.
> + */
> +.section .text.relocate_to_adr
> +ENTRY(relocate_to_adr)
> +   /* x0: target address */
> +
> +   stp x19, x20, [sp, #-16]!
> +
> +   mov x19, lr
> +
> +   mov x6, x0
> +
> +   bl  get_runtime_offset
> +   mov x5, x0
> +
> +   ldr x0, =_text
> +   mov x8, x0
> +
> +   add x1, x0, x5  /* x1: from address */
> +
> +   cmp x1, x6  /* already at correct address? */
> +   beq 1f  /* yes, skip copy to new address */
> +
> +   ldr x2, =__bss_start
> +
> +   sub x2, x2, x0  /* x2: size */
> +   mov x0, x6  /* x0: target */
> +
> +   /* adjust return address */
> +   sub x19, x19, x1/* sub address where we are actually 
> running */
> +   add x19, x19, x0/* add address where we are going to 
> run */
> +
> +

[PATCH] of: demote "no ranges" message to debug level

2018-03-16 Thread Lucas Stach
There are valid cases where there is no way to translate a OF node to
a MMIO address via ranges, so do the same as the Linux kernel and don't
print an error message in that case, but make it available as a debug
message.

Signed-off-by: Lucas Stach 
---
 drivers/of/address.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/of/address.c b/drivers/of/address.c
index 8018d78bcbd8..14db08041725 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -314,7 +314,7 @@ static int of_translate_one(struct device_node *parent, 
struct of_bus *bus,
ranges = of_get_property(parent, rprop, );
 #if !defined(CONFIG_PPC)
if (ranges == NULL) {
-   pr_err("OF: no ranges; cannot translate\n");
+   pr_debug("OF: no ranges; cannot translate\n");
return 1;
}
 #endif /* !defined(CONFIG_PPC) */
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 66/78] dt-bindings: Drop unused files

2018-03-16 Thread Sascha Hauer
include/dt-bindings is not included in device tree builds anymore since
2014. Since then this directory is unused, so remove the files in it.

Signed-off-by: Sascha Hauer 
---
 include/dt-bindings/clock/tegra30-car.h | 265 
 include/dt-bindings/pinctrl/pinctrl-tegra.h |  45 -
 include/dt-bindings/pinctrl/rockchip.h  |  32 
 3 files changed, 342 deletions(-)
 delete mode 100644 include/dt-bindings/clock/tegra30-car.h
 delete mode 100644 include/dt-bindings/pinctrl/pinctrl-tegra.h
 delete mode 100644 include/dt-bindings/pinctrl/rockchip.h

diff --git a/include/dt-bindings/clock/tegra30-car.h 
b/include/dt-bindings/clock/tegra30-car.h
deleted file mode 100644
index e40fae8f9a..00
--- a/include/dt-bindings/clock/tegra30-car.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * This header provides constants for binding nvidia,tegra30-car.
- *
- * The first 130 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
- * registers. These IDs often match those in the CAR's RST_DEVICES registers,
- * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
- * this case, those clocks are assigned IDs above 160 in order to highlight
- * this issue. Implementations that interpret these clock IDs as bit values
- * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
- * explicitly handle these special cases.
- *
- * The balance of the clocks controlled by the CAR are assigned IDs of 160 and
- * above.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_TEGRA30_CAR_H
-#define _DT_BINDINGS_CLOCK_TEGRA30_CAR_H
-
-#define TEGRA30_CLK_CPU 0
-/* 1 */
-/* 2 */
-/* 3 */
-#define TEGRA30_CLK_RTC 4
-#define TEGRA30_CLK_TIMER 5
-#define TEGRA30_CLK_UARTA 6
-/* 7 (register bit affects uartb and vfir) */
-#define TEGRA30_CLK_GPIO 8
-#define TEGRA30_CLK_SDMMC2 9
-/* 10 (register bit affects spdif_in and spdif_out) */
-#define TEGRA30_CLK_I2S1 11
-#define TEGRA30_CLK_I2C1 12
-#define TEGRA30_CLK_NDFLASH 13
-#define TEGRA30_CLK_SDMMC1 14
-#define TEGRA30_CLK_SDMMC4 15
-/* 16 */
-#define TEGRA30_CLK_PWM 17
-#define TEGRA30_CLK_I2S2 18
-#define TEGRA30_CLK_EPP 19
-/* 20 (register bit affects vi and vi_sensor) */
-#define TEGRA30_CLK_GR2D 21
-#define TEGRA30_CLK_USBD 22
-#define TEGRA30_CLK_ISP 23
-#define TEGRA30_CLK_GR3D 24
-/* 25 */
-#define TEGRA30_CLK_DISP2 26
-#define TEGRA30_CLK_DISP1 27
-#define TEGRA30_CLK_HOST1X 28
-#define TEGRA30_CLK_VCP 29
-#define TEGRA30_CLK_I2S0 30
-#define TEGRA30_CLK_COP_CACHE 31
-
-#define TEGRA30_CLK_MC 32
-#define TEGRA30_CLK_AHBDMA 33
-#define TEGRA30_CLK_APBDMA 34
-/* 35 */
-#define TEGRA30_CLK_KBC 36
-#define TEGRA30_CLK_STATMON 37
-#define TEGRA30_CLK_PMC 38
-/* 39 (register bit affects fuse and fuse_burn) */
-#define TEGRA30_CLK_KFUSE 40
-#define TEGRA30_CLK_SBC1 41
-#define TEGRA30_CLK_NOR 42
-/* 43 */
-#define TEGRA30_CLK_SBC2 44
-/* 45 */
-#define TEGRA30_CLK_SBC3 46
-#define TEGRA30_CLK_I2C5 47
-#define TEGRA30_CLK_DSIA 48
-/* 49 (register bit affects cve and tvo) */
-#define TEGRA30_CLK_MIPI 50
-#define TEGRA30_CLK_HDMI 51
-#define TEGRA30_CLK_CSI 52
-#define TEGRA30_CLK_TVDAC 53
-#define TEGRA30_CLK_I2C2 54
-#define TEGRA30_CLK_UARTC 55
-/* 56 */
-#define TEGRA30_CLK_EMC 57
-#define TEGRA30_CLK_USB2 58
-#define TEGRA30_CLK_USB3 59
-#define TEGRA30_CLK_MPE 60
-#define TEGRA30_CLK_VDE 61
-#define TEGRA30_CLK_BSEA 62
-#define TEGRA30_CLK_BSEV 63
-
-#define TEGRA30_CLK_SPEEDO 64
-#define TEGRA30_CLK_UARTD 65
-#define TEGRA30_CLK_UARTE 66
-#define TEGRA30_CLK_I2C3 67
-#define TEGRA30_CLK_SBC4 68
-#define TEGRA30_CLK_SDMMC3 69
-#define TEGRA30_CLK_PCIE 70
-#define TEGRA30_CLK_OWR 71
-#define TEGRA30_CLK_AFI 72
-#define TEGRA30_CLK_CSITE 73
-#define TEGRA30_CLK_PCIEX 74
-#define TEGRA30_CLK_AVPUCQ 75
-#define TEGRA30_CLK_LA 76
-/* 77 */
-/* 78 */
-#define TEGRA30_CLK_DTV 79
-#define TEGRA30_CLK_NDSPEED 80
-#define TEGRA30_CLK_I2CSLOW 81
-#define TEGRA30_CLK_DSIB 82
-/* 83 */
-#define TEGRA30_CLK_IRAMA 84
-#define TEGRA30_CLK_IRAMB 85
-#define TEGRA30_CLK_IRAMC 86
-#define TEGRA30_CLK_IRAMD 87
-#define TEGRA30_CLK_CRAM2 88
-/* 89 */
-#define TEGRA30_CLK_AUDIO_2X 90 /* a/k/a audio_2x_sync_clk */
-/* 91 */
-#define TEGRA30_CLK_CSUS 92
-#define TEGRA30_CLK_CDEV2 93
-#define TEGRA30_CLK_CDEV1 94
-/* 95 */
-
-#define TEGRA30_CLK_CPU_G 96
-#define TEGRA30_CLK_CPU_LP 97
-#define TEGRA30_CLK_GR3D2 98
-#define TEGRA30_CLK_MSELECT 99
-#define TEGRA30_CLK_TSENSOR 100
-#define TEGRA30_CLK_I2S3 101
-#define TEGRA30_CLK_I2S4 102
-#define TEGRA30_CLK_I2C4 103
-#define TEGRA30_CLK_SBC5 104
-#define TEGRA30_CLK_SBC6 105
-#define TEGRA30_CLK_D_AUDIO 106
-#define TEGRA30_CLK_APBIF 107
-#define TEGRA30_CLK_DAM0 108
-#define TEGRA30_CLK_DAM1 109
-#define TEGRA30_CLK_DAM2 110
-#define TEGRA30_CLK_HDA2CODEC_2X 111
-#define TEGRA30_CLK_ATOMICS 112
-#define TEGRA30_CLK_AUDIO0_2X 113
-#define TEGRA30_CLK_AUDIO1_2X 114
-#define TEGRA30_CLK_AUDIO2_2X 115
-#define TEGRA30_CLK_AUDIO3_2X 116
-#define TEGRA30_CLK_AUDIO4_2X 117
-#define 

[PATCH 03/78] ARM: return positive offset in get_runtime_offset()

2018-03-16 Thread Sascha Hauer
When we are linked at 0x0 and running at 0x0100 then
get_runtime_offset() should return 0x0100 and not 0xff00. This
makes get_runtime_offset() more consistent and better understandable.

This was tested on a Freescale i.MX53 Quickstart board. Additionally
relocate_to_adr() was tested since that is normally not called.

Signed-off-by: Sascha Hauer 
---
 arch/arm/boards/afi-gf/lowlevel.c|  2 +-
 arch/arm/boards/altera-socdk/lowlevel.c  |  2 +-
 arch/arm/boards/at91sam9263ek/lowlevel_init.c|  2 +-
 arch/arm/boards/at91sam9x5ek/lowlevel.c  |  2 +-
 arch/arm/boards/beaglebone/lowlevel.c|  2 +-
 arch/arm/boards/boundarydevices-nitrogen6/lowlevel.c | 10 +-
 arch/arm/boards/ccxmx53/lowlevel.c   |  4 ++--
 arch/arm/boards/datamodul-edm-qmx6/lowlevel.c|  2 +-
 arch/arm/boards/dfi-fs700-m60/lowlevel.c |  6 +++---
 arch/arm/boards/duckbill/lowlevel.c  |  2 +-
 arch/arm/boards/ebv-socrates/lowlevel.c  |  2 +-
 arch/arm/boards/efika-mx-smartbook/lowlevel.c|  2 +-
 arch/arm/boards/element14-warp7/lowlevel.c   |  2 +-
 arch/arm/boards/eltec-hipercam/lowlevel.c|  2 +-
 arch/arm/boards/embedsky-e9/lowlevel.c   |  2 +-
 arch/arm/boards/embest-riotboard/lowlevel.c  |  2 +-
 arch/arm/boards/freescale-mx51-babbage/lowlevel.c|  2 +-
 arch/arm/boards/freescale-mx53-qsb/lowlevel.c|  4 ++--
 arch/arm/boards/freescale-mx53-vmx53/lowlevel.c  |  2 +-
 arch/arm/boards/freescale-mx6-sabresd/lowlevel.c |  2 +-
 arch/arm/boards/freescale-mx6sx-sabresdb/lowlevel.c  |  2 +-
 arch/arm/boards/freescale-mx7-sabresd/lowlevel.c |  2 +-
 arch/arm/boards/freescale-vf610-twr/lowlevel.c   |  2 +-
 arch/arm/boards/gateworks-ventana/lowlevel.c |  2 +-
 arch/arm/boards/gk802/lowlevel.c |  2 +-
 arch/arm/boards/globalscale-guruplug/lowlevel.c  |  2 +-
 arch/arm/boards/globalscale-mirabox/lowlevel.c   |  2 +-
 arch/arm/boards/karo-tx25/lowlevel.c |  2 +-
 arch/arm/boards/karo-tx6x/lowlevel.c |  8 
 arch/arm/boards/kindle-mx50/lowlevel.c   |  6 +++---
 arch/arm/boards/lenovo-ix4-300d/lowlevel.c   |  2 +-
 arch/arm/boards/marvell-armada-xp-gp/lowlevel.c  |  2 +-
 arch/arm/boards/netgear-rn104/lowlevel.c |  2 +-
 arch/arm/boards/netgear-rn2120/lowlevel.c|  2 +-
 arch/arm/boards/phytec-phycard-imx27/lowlevel.c  |  2 +-
 arch/arm/boards/phytec-phycore-imx27/lowlevel.c  |  2 +-
 arch/arm/boards/phytec-phycore-imx7/lowlevel.c   |  4 ++--
 arch/arm/boards/phytec-som-am335x/lowlevel.c |  6 +++---
 arch/arm/boards/phytec-som-imx6/lowlevel.c   |  2 +-
 arch/arm/boards/phytec-som-rk3288/lowlevel.c |  2 +-
 arch/arm/boards/plathome-openblocks-a6/lowlevel.c|  2 +-
 arch/arm/boards/plathome-openblocks-ax3/lowlevel.c   |  2 +-
 arch/arm/boards/radxa-rock/lowlevel.c|  2 +-
 arch/arm/boards/raspberry-pi/lowlevel.c  |  4 ++--
 arch/arm/boards/reflex-achilles/lowlevel.c   |  2 +-
 arch/arm/boards/solidrun-cubox/lowlevel.c|  2 +-
 arch/arm/boards/solidrun-microsom/lowlevel.c | 18 +-
 arch/arm/boards/technexion-pico-hobbit/lowlevel.c|  2 +-
 arch/arm/boards/terasic-de0-nano-soc/lowlevel.c  |  2 +-
 arch/arm/boards/terasic-sockit/lowlevel.c|  2 +-
 arch/arm/boards/tqma53/lowlevel.c|  4 ++--
 arch/arm/boards/tqma6x/lowlevel.c|  4 ++--
 arch/arm/boards/turris-omnia/lowlevel.c  |  2 +-
 arch/arm/boards/udoo/lowlevel.c  |  2 +-
 arch/arm/boards/usi-topkick/lowlevel.c   |  2 +-
 arch/arm/boards/variscite-mx6/lowlevel.c |  2 +-
 arch/arm/boards/vexpress/lowlevel.c  |  2 +-
 arch/arm/boards/vscom-baltos/lowlevel.c  |  2 +-
 arch/arm/boards/zii-imx6q-rdu2/lowlevel.c|  4 ++--
 arch/arm/boards/zii-vf610-dev/lowlevel.c |  2 +-
 arch/arm/cpu/common.c| 16 
 arch/arm/cpu/setupc.S|  9 +
 arch/arm/cpu/start-pbl.c |  4 ++--
 arch/arm/cpu/uncompress.c|  2 +-
 arch/arm/lib32/runtime-offset.S  |  4 ++--
 arch/arm/mach-mvebu/include/mach/common.h|  2 +-
 arch/arm/mach-tegra/include/mach/lowlevel.h  |  2 +-
 arch/arm/mach-tegra/tegra_avp_init.c |  2 +-
 68 files changed, 109 insertions(+), 108 deletions(-)

diff --git a/arch/arm/boards/afi-gf/lowlevel.c 
b/arch/arm/boards/afi-gf/lowlevel.c
index 91b4b68c6d..1afc96571b 100644
--- a/arch/arm/boards/afi-gf/lowlevel.c
+++ b/arch/arm/boards/afi-gf/lowlevel.c
@@ -258,7 +258,7 @@ 

[PATCH 70/78] bootm: provide handlers the start of the OS image

2018-03-16 Thread Sascha Hauer
The bootm code needs to read the beginning of the OS image in order to
determine the filetype. If it does so already, then we can provide the
handlers the buffer. This can help the handlers to find some image
metadata before loading the full image.

Signed-off-by: Sascha Hauer 
---
 common/bootm.c  | 10 +-
 include/bootm.h |  7 +++
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/common/bootm.c b/common/bootm.c
index 3e48ca1d88..5ff6683fe7 100644
--- a/common/bootm.c
+++ b/common/bootm.c
@@ -529,6 +529,7 @@ int bootm_boot(struct bootm_data *bootm_data)
struct image_handler *handler;
int ret;
enum filetype os_type;
+   size_t size;
 
if (!bootm_data->os_file) {
printf("no image given\n");
@@ -548,7 +549,13 @@ int bootm_boot(struct bootm_data *bootm_data)
data->os_address = bootm_data->os_address;
data->os_entry = bootm_data->os_entry;
 
-   os_type = file_name_detect_type(data->os_file);
+   ret = read_file_2(data->os_file, , >os_header, PAGE_SIZE);
+   if (ret < 0 && ret != -EFBIG)
+   goto err_out;
+   if (size < PAGE_SIZE)
+   goto err_out;
+
+   os_type = file_detect_type(data->os_header, PAGE_SIZE);
if ((int)os_type < 0) {
printf("could not open %s: %s\n", data->os_file,
strerror(-os_type));
@@ -674,6 +681,7 @@ err_out:
of_delete_node(data->of_root_node);
 
globalvar_remove("linux.bootargs.bootm.appendroot");
+   free(data->os_header);
free(data->os_file);
free(data->oftree_file);
free(data->initrd_file);
diff --git a/include/bootm.h b/include/bootm.h
index 35c18dc276..62951d6058 100644
--- a/include/bootm.h
+++ b/include/bootm.h
@@ -81,6 +81,13 @@ struct image_data {
struct fdt_header *oftree;
struct resource *oftree_res;
 
+   /*
+* The first PAGE_SIZE bytes of the OS image. Can be used by the image
+* handlers to analyze the OS image before actually loading the bulk of
+* it.
+*/
+   void *os_header;
+
enum bootm_verify verify;
int verbose;
int force;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 46/78] ARM: aarch64: mmu: Fix disabling the MMU

2018-03-16 Thread Sascha Hauer
Do it as U-Boot: Disable MMU first, then flush caches and finally
invalidate tlbs. I wish I could reference some document instead of
U-Boot code, but I haven't found anything.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 165ff5bac6..6606405b21 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -329,12 +329,9 @@ void mmu_disable(void)
cr = get_cr();
cr &= ~(CR_M | CR_C);
 
-   tlb_invalidate();
-
-   dsb();
-   isb();
-
set_cr(cr);
+   v8_flush_dcache_all();
+   tlb_invalidate();
 
dsb();
isb();
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 48/78] dma: Use dma_addr_t as type for DMA addresses

2018-03-16 Thread Sascha Hauer
DMA addresses are not necessarily the same as unsigned long. Fix
the type for the dma_sync_single_* operations.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu.c  | 4 ++--
 arch/mips/lib/dma-default.c | 4 ++--
 include/dma.h   | 4 ++--
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index de0b631981..fc71cf03cd 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -575,7 +575,7 @@ void dma_free_coherent(void *mem, dma_addr_t dma_handle, 
size_t size)
free(mem);
 }
 
-void dma_sync_single_for_cpu(unsigned long address, size_t size,
+void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
 enum dma_data_direction dir)
 {
if (dir != DMA_TO_DEVICE) {
@@ -585,7 +585,7 @@ void dma_sync_single_for_cpu(unsigned long address, size_t 
size,
}
 }
 
-void dma_sync_single_for_device(unsigned long address, size_t size,
+void dma_sync_single_for_device(dma_addr_t address, size_t size,
enum dma_data_direction dir)
 {
if (dir == DMA_FROM_DEVICE) {
diff --git a/arch/mips/lib/dma-default.c b/arch/mips/lib/dma-default.c
index 9b2fe7d410..71c1e423b7 100644
--- a/arch/mips/lib/dma-default.c
+++ b/arch/mips/lib/dma-default.c
@@ -44,13 +44,13 @@ static inline void __dma_sync_mips(void *addr, size_t size,
 }
 #endif
 
-void dma_sync_single_for_cpu(unsigned long address, size_t size,
+void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
 enum dma_data_direction dir)
 {
__dma_sync_mips(address, size, dir);
 }
 
-void dma_sync_single_for_device(unsigned long address, size_t size,
+void dma_sync_single_for_device(dma_addr_t address, size_t size,
enum dma_data_direction dir)
 {
__dma_sync_mips(address, size, dir);
diff --git a/include/dma.h b/include/dma.h
index 4d31797968..29d94c0a52 100644
--- a/include/dma.h
+++ b/include/dma.h
@@ -31,10 +31,10 @@ static inline void dma_free(void *mem)
 #endif
 
 /* streaming DMA - implement the below calls to support HAS_DMA */
-void dma_sync_single_for_cpu(unsigned long address, size_t size,
+void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
 enum dma_data_direction dir);
 
-void dma_sync_single_for_device(unsigned long address, size_t size,
+void dma_sync_single_for_device(dma_addr_t address, size_t size,
enum dma_data_direction dir);
 
 void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 43/78] ARM: aarch64: mmu: Fix TCR setting

2018-03-16 Thread Sascha Hauer
A BITS_PER_VA value of 33 is a little small. Increase it to 39 which is
the maximum size we can do with 3 level page tables. The TCR value
depends on the current exception level, so we have to calculate the
value during runtime. To do this use a function derived from U-Boots
get_tcr function.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu.h   |  7 ---
 arch/arm/cpu/mmu_64.c| 31 ---
 arch/arm/include/asm/pgtable64.h |  3 ++-
 3 files changed, 30 insertions(+), 11 deletions(-)

diff --git a/arch/arm/cpu/mmu.h b/arch/arm/cpu/mmu.h
index 186d408ead..5803cb6a83 100644
--- a/arch/arm/cpu/mmu.h
+++ b/arch/arm/cpu/mmu.h
@@ -3,13 +3,6 @@
 
 #ifdef CONFIG_CPU_64v8
 
-#define TCR_FLAGS  (TCR_TG0_4K | \
-   TCR_SHARED_OUTER | \
-   TCR_SHARED_INNER | \
-   TCR_IRGN_WBWA | \
-   TCR_ORGN_WBWA | \
-   TCR_T0SZ(BITS_PER_VA))
-
 #ifndef __ASSEMBLY__
 
 static inline void set_ttbr_tcr_mair(int el, uint64_t table, uint64_t tcr, 
uint64_t attr)
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index c7590fa33c..7932185885 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -54,6 +54,27 @@ static void arm_mmu_not_initialized_error(void)
panic("MMU not initialized\n");
 }
 
+static uint64_t calc_tcr(int el)
+{
+   u64 ips, va_bits;
+   u64 tcr;
+
+   ips = 2;
+   va_bits = BITS_PER_VA;
+
+   if (el == 1)
+   tcr = (ips << 32) | TCR_EPD1_DISABLE;
+   else if (el == 2)
+   tcr = (ips << 16);
+   else
+   tcr = (ips << 16);
+
+   /* PTWs cacheable, inner/outer WBWA and inner shareable */
+   tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
+   tcr |= TCR_T0SZ(va_bits);
+
+   return tcr;
+}
 
 /*
  * Do it the simple way for now and invalidate the entire
@@ -254,6 +275,7 @@ static void mmu_enable(void)
 static int mmu_init(void)
 {
struct memory_bank *bank;
+   unsigned int el;
 
if (list_empty(_banks))
/*
@@ -281,8 +303,8 @@ static int mmu_init(void)
 
memset(ttb, 0, GRANULE_SIZE);
 
-   set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS,
- MEMORY_ATTRIBUTES);
+   el = current_el();
+   set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el), 
MEMORY_ATTRIBUTES);
}
 
pr_debug("ttb: 0x%p\n", ttb);
@@ -323,11 +345,14 @@ void mmu_disable(void)
 
 void mmu_early_enable(uint64_t membase, uint64_t memsize, uint64_t _ttb)
 {
+   int el;
+
ttb = (uint64_t *)_ttb;
 
memset(ttb, 0, GRANULE_SIZE);
 
-   set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, 
MEMORY_ATTRIBUTES);
+   el = current_el();
+   set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el), MEMORY_ATTRIBUTES);
 
create_sections(0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
 
diff --git a/arch/arm/include/asm/pgtable64.h b/arch/arm/include/asm/pgtable64.h
index f2888c3ccd..d8382505d0 100644
--- a/arch/arm/include/asm/pgtable64.h
+++ b/arch/arm/include/asm/pgtable64.h
@@ -21,7 +21,7 @@
 #define UNUSED_DESC0x6EbAAD0BBADbA6E0
 
 #define VA_START   0x0
-#define BITS_PER_VA33
+#define BITS_PER_VA39
 
 /* Granule size of 4KB is being used */
 #define GRANULE_SIZE_SHIFT 12
@@ -116,6 +116,7 @@
 #define TCR_EL1_IPS_BITS   (UL(3) << 32)   /* 42 bits physical address */
 #define TCR_EL2_IPS_BITS   (3 << 16)   /* 42 bits physical address */
 #define TCR_EL3_IPS_BITS   (3 << 16)   /* 42 bits physical address */
+#define TCR_EPD1_DISABLE   (1 << 23)
 
 #define TCR_EL1_RSVD   (1 << 31)
 #define TCR_EL2_RSVD   (1 << 31 | 1 << 23)
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 64/78] ARM: aarch64: print more information on sync exception

2018-03-16 Thread Sascha Hauer
On a sync exception more useful information can be printed than we
currently do. Pass the ESR and FAR value to do_sync and print the
reason for the exception along with the address that actually faulted.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/exceptions_64.S | 4 
 arch/arm/cpu/interrupts_64.c | 4 ++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/arch/arm/cpu/exceptions_64.S b/arch/arm/cpu/exceptions_64.S
index 58ca50b709..22034eaef9 100644
--- a/arch/arm/cpu/exceptions_64.S
+++ b/arch/arm/cpu/exceptions_64.S
@@ -35,12 +35,15 @@
switch_el x11, 3f, 2f, 1f
 3: mrs x1, esr_el3
mrs x2, elr_el3
+   mrs x3, far_el3
b   0f
 2: mrs x1, esr_el2
mrs x2, elr_el2
+   mrs x3, far_el2
b   0f
 1: mrs x1, esr_el1
mrs x2, elr_el1
+   mrs x3, far_el1
 0:
stp x2, x0, [sp, #-16]!
mov x0, sp
@@ -99,6 +102,7 @@ _do_bad_error:
 
 _do_sync:
exception_entry
+   mov x2, x3
bl  do_sync
b   exception_exit
 
diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index c32cd4f051..ee3dc3380b 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -143,9 +143,9 @@ void do_bad_error(struct pt_regs *pt_regs)
do_exception(pt_regs);
 }
 
-void do_sync(struct pt_regs *pt_regs)
+void do_sync(struct pt_regs *pt_regs, unsigned int esr, unsigned long far)
 {
-   printf("sync exception\n");
+   printf("%s exception at 0x%016lx\n", esr_get_class_string(esr), far);
do_exception(pt_regs);
 }
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 45/78] ARM: aarch64: mmu: drop ttb check when disabling the MMU

2018-03-16 Thread Sascha Hauer
If the MMU is enabled then we should be able to disable it, no
matter if we initialized it in barebox or not. This change is not
really needed but helps when we are starting second stage from U-Boot
with the 'go' command which leaves the MMU enabled.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index c8c3c9d84f..165ff5bac6 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -326,9 +326,6 @@ void mmu_disable(void)
 {
unsigned int cr;
 
-   if (!ttb)
-   arm_mmu_not_initialized_error();
-
cr = get_cr();
cr &= ~(CR_M | CR_C);
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 72/78] ARM: aarch64: Add support to start kernel and barebox

2018-03-16 Thread Sascha Hauer
aarch64 has its own image format. Add a bootm handler to handle this
format. Also add a barebox handler.

Signed-off-by: Sascha Hauer 
---
 arch/arm/lib64/armlinux.c | 120 +-
 1 file changed, 98 insertions(+), 22 deletions(-)

diff --git a/arch/arm/lib64/armlinux.c b/arch/arm/lib64/armlinux.c
index 54ce6ca046..238e8b67a4 100644
--- a/arch/arm/lib64/armlinux.c
+++ b/arch/arm/lib64/armlinux.c
@@ -1,51 +1,127 @@
 /*
- * (C) Copyright 2002
- * Sysgo Real-Time Solutions, GmbH 
- * Marius Groeger 
+ * Copyright (C) 2018 Sascha Hauer 
  *
- * Copyright (C) 2001  Erik Mouw (j.a.k.m...@its.tudelft.nl)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2.
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
  */
 
 #include 
 #include 
-#include 
-#include 
 #include 
 #include 
-#include 
 #include 
-#include 
 #include 
 #include 
 #include 
 #include 
 #include 
 #include 
-#include 
-
+#include 
+#include 
+#include 
 #include 
 #include 
 #include 
 #include 
 #include 
 
-void start_linux(void *adr, int swap, unsigned long initrd_address,
-unsigned long initrd_size, void *oftree,
-enum arm_security_state bootm_secure_state)
+static int do_bootm_linux(struct image_data *data)
 {
-   void (*kernel)(void *dtb) = adr;
+   void (*fn)(unsigned long dtb, unsigned long x1, unsigned long x2,
+  unsigned long x3);
+   resource_size_t start, end;
+   unsigned long text_offset, image_size, devicetree, kernel;
+   int ret;
+
+   text_offset = le64_to_cpup(data->os_header + 8);
+   image_size = le64_to_cpup(data->os_header + 16);
+
+   ret = memory_bank_first_find_space(, );
+   if (ret)
+   goto out;
+
+   kernel = ALIGN(start, SZ_2M) + text_offset;
+
+   ret = bootm_load_os(data, kernel);
+   if (ret)
+   goto out;
+
+   devicetree = ALIGN(kernel + image_size, PAGE_SIZE);
+
+   ret = bootm_load_devicetree(data, devicetree);
+   if (ret)
+   goto out;
+
+   printf("Loaded kernel to 0x%08lx, devicetree at 0x%08lx\n",
+  kernel, devicetree);
 
shutdown_barebox();
 
-   kernel(oftree);
+   fn = (void *)kernel;
+
+   fn(devicetree, 0, 0, 0);
+
+   ret = -EINVAL;
+
+out:
+   return ret;
+}
+
+static struct image_handler aarch64_linux_handler = {
+.name = "ARM aarch64 Linux image",
+.bootm = do_bootm_linux,
+.filetype = filetype_arm64_linux_image,
+};
+
+static int do_bootm_barebox(struct image_data *data)
+{
+   void (*fn)(unsigned long x0, unsigned long x1, unsigned long x2,
+  unsigned long x3);
+   resource_size_t start, end;
+   unsigned long barebox;
+   int ret;
+
+   ret = memory_bank_first_find_space(, );
+   if (ret)
+   goto out;
+
+   barebox = start;
+
+   ret = bootm_load_os(data, barebox);
+   if (ret)
+   goto out;
+
+   printf("Loaded barebox image to 0x%08lx\n", barebox);
+
+   shutdown_barebox();
+
+   fn = (void *)barebox;
+
+   fn(0, 0, 0, 0);
+
+   ret = -EINVAL;
+
+out:
+   return ret;
+}
+
+static struct image_handler aarch64_barebox_handler = {
+.name = "ARM aarch64 barebox image",
+.bootm = do_bootm_barebox,
+.filetype = filetype_arm_barebox,
+};
+
+static int aarch64_register_image_handler(void)
+{
+   register_image_handler(_linux_handler);
+   register_image_handler(_barebox_handler);
+
+   return 0;
 }
+late_initcall(aarch64_register_image_handler);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 75/78] ARM: Create own cache.c file for aarch64

2018-03-16 Thread Sascha Hauer
cache.c does not work properly for aarch64. We create a struct cache_fns
using C preprocessor foo which assumes the existence of cache
maintenance operations with a certain name. These functions have other
names on aarch64. While we could fix this we do not need the automatic
cache function selection on aarch64 since here we only have one function
set.
Create a separate file and be done with this issue.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/Makefile   |  4 ++--
 arch/arm/cpu/cache.c| 16 
 arch/arm/cpu/cache_64.c | 35 +++
 3 files changed, 37 insertions(+), 18 deletions(-)
 create mode 100644 arch/arm/cpu/cache_64.c

diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index edd4a290ca..f79cedd085 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -10,7 +10,7 @@ endif
 
 obj-y += start.o entry.o
 
-obj-pbl-y += setupc$(S64).o
+obj-pbl-y += setupc$(S64).o cache$(S64).o
 
 #
 # Any variants can be called as start-armxyz.S
@@ -44,4 +44,4 @@ pbl-y += entry.o
 pbl-$(CONFIG_PBL_SINGLE_IMAGE) += start-pbl.o
 pbl-$(CONFIG_PBL_MULTI_IMAGES) += uncompress.o
 
-obj-pbl-y += common.o cache.o sections.o
+obj-pbl-y += common.o sections.o
diff --git a/arch/arm/cpu/cache.c b/arch/arm/cpu/cache.c
index 1a8f49d301..7047470f0c 100644
--- a/arch/arm/cpu/cache.c
+++ b/arch/arm/cpu/cache.c
@@ -101,11 +101,6 @@ int arm_set_cache_functions(void)
case CPU_ARCH_ARMv7:
cache_fns = _fns_armv7;
break;
-#endif
-#ifdef CONFIG_CPU_64v8
-   case CPU_ARCH_ARMv8:
-   cache_fns = _fns_armv8;
-   break;
 #endif
default:
while(1);
@@ -143,11 +138,6 @@ void arm_early_mmu_cache_flush(void)
case CPU_ARCH_ARMv7:
v7_mmu_cache_flush();
return;
-#endif
-#ifdef CONFIG_CPU_64v8
-   case CPU_ARCH_ARMv8:
-   v8_flush_dcache_all();
-   return;
 #endif
}
 }
@@ -171,12 +161,6 @@ void arm_early_mmu_cache_invalidate(void)
v7_mmu_cache_invalidate();
return;
 #endif
-#else
-#ifdef CONFIG_CPU_64v8
-   case CPU_ARCH_ARMv8:
-   v8_invalidate_icache_all();
-   return;
-#endif
 #endif
}
 }
diff --git a/arch/arm/cpu/cache_64.c b/arch/arm/cpu/cache_64.c
new file mode 100644
index 00..45f01e8dc9
--- /dev/null
+++ b/arch/arm/cpu/cache_64.c
@@ -0,0 +1,35 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+int arm_set_cache_functions(void)
+{
+   return 0;
+}
+
+/*
+ * Early function to flush the caches. This is for use when the
+ * C environment is not yet fully initialized.
+ */
+void arm_early_mmu_cache_flush(void)
+{
+   v8_flush_dcache_all();
+}
+
+void arm_early_mmu_cache_invalidate(void)
+{
+   v8_invalidate_icache_all();
+}
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 65/78] ARM: aarch64: implement ignoring data aborts

2018-03-16 Thread Sascha Hauer
Data aborts can be masked using the data_abort_mask() function.
Add the missing pieces for functionality on aarch64 so that the
'md' command is safe to call even with faulting addresses.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/interrupts_64.c | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index ee3dc3380b..32c8dfcb2d 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -143,8 +143,18 @@ void do_bad_error(struct pt_regs *pt_regs)
do_exception(pt_regs);
 }
 
+extern volatile int arm_ignore_data_abort;
+extern volatile int arm_data_abort_occurred;
+
 void do_sync(struct pt_regs *pt_regs, unsigned int esr, unsigned long far)
 {
+   if ((esr >> ESR_ELx_EC_SHIFT) == ESR_ELx_EC_DABT_CUR &&
+   arm_ignore_data_abort) {
+   arm_data_abort_occurred = 1;
+   pt_regs->elr += 4;
+   return;
+   }
+
printf("%s exception at 0x%016lx\n", esr_get_class_string(esr), far);
do_exception(pt_regs);
 }
@@ -156,9 +166,6 @@ void do_error(struct pt_regs *pt_regs)
do_exception(pt_regs);
 }
 
-extern volatile int arm_ignore_data_abort;
-extern volatile int arm_data_abort_occurred;
-
 void data_abort_mask(void)
 {
arm_data_abort_occurred = 0;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 57/78] ARM: aarch64: remove dead code in linker script

2018-03-16 Thread Sascha Hauer
CONFIG_ARM_UNWIND does not exist for aarch64. Remove the dead code.

Signed-off-by: Sascha Hauer 
---
 arch/arm/lib64/barebox.lds.S | 16 
 1 file changed, 16 deletions(-)

diff --git a/arch/arm/lib64/barebox.lds.S b/arch/arm/lib64/barebox.lds.S
index fa633e3699..08adc44e86 100644
--- a/arch/arm/lib64/barebox.lds.S
+++ b/arch/arm/lib64/barebox.lds.S
@@ -53,22 +53,6 @@ SECTIONS
. = ALIGN(4);
.rodata : { *(.rodata*) }
 
-#ifdef CONFIG_ARM_UNWIND
-   /*
-* Stack unwinding tables
-*/
-   . = ALIGN(8);
-   .ARM.unwind_idx : {
-   __start_unwind_idx = .;
-   *(.ARM.exidx*)
-   __stop_unwind_idx = .;
-   }
-   .ARM.unwind_tab : {
-   __start_unwind_tab = .;
-   *(.ARM.extab*)
-   __stop_unwind_tab = .;
-   }
-#endif
_etext = .; /* End of text and rodata section */
_sdata = .;
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 74/78] ARM: build: Remove duplicate file compilation

2018-03-16 Thread Sascha Hauer
cache.o is already obj-y, drop unnecessary obj-$(CONFIG_MMU)

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/Makefile | 1 -
 1 file changed, 1 deletion(-)

diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index eb783481ea..edd4a290ca 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -18,7 +18,6 @@ obj-pbl-y += setupc$(S64).o
 obj-$(CONFIG_CMD_ARM_CPUINFO) += cpuinfo.o
 obj-$(CONFIG_CMD_ARM_MMUINFO) += mmuinfo.o
 obj-$(CONFIG_OFDEVICE) += dtb.o
-obj-$(CONFIG_MMU) += cache.o
 
 ifeq ($(CONFIG_MMU),)
 obj-y += no-mmu.o
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 17/78] ARM: Use generic ffz()

2018-03-16 Thread Sascha Hauer
The generic ffz() from  works like our
ARM specific variant except that the generic variant has 64bit word size
support. Use the generic variant to fix ffz() for aarch64.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/bitops.h | 18 +-
 1 file changed, 1 insertion(+), 17 deletions(-)

diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index b51225efe5..344e288bae 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -156,23 +156,7 @@ static inline int constant_fls(int x)
 #define __ffs(x) (ffs(x) - 1)
 #define ffz(x) __ffs(~(x))
 #else  /* ! __ARM__USE_GENERIC_FF */
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long ffz(unsigned long word)
-{
-   int k;
-
-   word = ~word;
-   k = 31;
-   if (word & 0x) { k -= 16; word <<= 16; }
-   if (word & 0x00ff) { k -= 8;  word <<= 8;  }
-   if (word & 0x0f00) { k -= 4;  word <<= 4;  }
-   if (word & 0x3000) { k -= 2;  word <<= 2;  }
-   if (word & 0x4000) { k -= 1; }
-   return k;
-}
+#include 
 #include 
 #include 
 #include 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 73/78] ARM: cache-armv4: Fix wrong section

2018-03-16 Thread Sascha Hauer
The v4_mmu_cache_on function should be in a section with the same name.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/cache-armv4.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/cpu/cache-armv4.S b/arch/arm/cpu/cache-armv4.S
index 1d1a1e32bf..db87de17e9 100644
--- a/arch/arm/cpu/cache-armv4.S
+++ b/arch/arm/cpu/cache-armv4.S
@@ -3,7 +3,7 @@
 
 #define CACHE_DLINESIZE 32
 
-.section .text.__mmu_cache_on
+.section .text.v4_mmu_cache_on
 ENTRY(v4_mmu_cache_on)
mov r12, lr
 #ifdef CONFIG_MMU
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 32/78] ARM: aarch64: mmu: Allocate page tables dynamically

2018-03-16 Thread Sascha Hauer
Current code allocates 16KiB for page tables. Whenever a new
table is needed an index is increased, but the bounds of this
index are never checked. Allocate the page tables dynamically
to solve this.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 4803a85e4c..eb54097514 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -38,7 +38,6 @@
 #define UNCACHED_MEM(PMD_ATTRINDX(MT_NORMAL_NC) | PMD_SECT_S | PMD_SECT_AF 
| PMD_TYPE_SECT)
 
 static uint64_t *ttb;
-static int free_idx;
 
 static void arm_mmu_not_initialized_error(void)
 {
@@ -108,13 +107,11 @@ static void set_table(uint64_t *pt, uint64_t *table_addr)
 
 static uint64_t *create_table(void)
 {
-   uint64_t *new_table = ttb + free_idx * GRANULE_SIZE;
+   uint64_t *new_table = xmemalign(GRANULE_SIZE, GRANULE_SIZE);
 
/* Mark all entries as invalid */
memset(new_table, 0, GRANULE_SIZE);
 
-   free_idx++;
-
return new_table;
 }
 
@@ -253,8 +250,7 @@ static int mmu_init(void)
pr_crit("Critical Error: Can't request SDRAM region for 
ttb at %p\n",
ttb);
} else {
-   ttb = memalign(GRANULE_SIZE, SZ_16K);
-   free_idx = 1;
+   ttb = xmemalign(GRANULE_SIZE, GRANULE_SIZE);
 
memset(ttb, 0, GRANULE_SIZE);
 
@@ -312,7 +308,6 @@ void mmu_early_enable(uint64_t membase, uint64_t memsize, 
uint64_t _ttb)
ttb = (uint64_t *)_ttb;
 
memset(ttb, 0, GRANULE_SIZE);
-   free_idx = 1;
 
set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, UNCACHED_MEM);
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 34/78] ARM: aarch64: mmu: fix creation of flat mapping

2018-03-16 Thread Sascha Hauer
During initialization of the page tables we want to create a flat
uncached mapping for the whole address space. create_sections() takes
the size in bytes, not in MiB as assumed by the callees. Fix this.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 6e22da9a26..fe1e5410f7 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -258,8 +258,8 @@ static int mmu_init(void)
 
pr_debug("ttb: 0x%p\n", ttb);
 
-   /* create a flat mapping using 1MiB sections */
-   create_sections(0, 0, GRANULE_SIZE, UNCACHED_MEM);
+   /* create a flat mapping */
+   create_sections(0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
 
/* Map sdram cached. */
for_each_memory_bank(bank)
@@ -310,7 +310,7 @@ void mmu_early_enable(uint64_t membase, uint64_t memsize, 
uint64_t _ttb)
 
set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, UNCACHED_MEM);
 
-   create_sections(0, 0, 4096, UNCACHED_MEM);
+   create_sections(0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
 
create_sections(membase, membase, memsize, CACHED_MEM);
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 16/78] ARM: aarch64: implement get_pc()

2018-03-16 Thread Sascha Hauer
The arm32 version can't be used on aarch64, implement an aarch64
specific version.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/common.h | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/common.h b/arch/arm/include/asm/common.h
index 07ae619cea..97bfdc43f5 100644
--- a/arch/arm/include/asm/common.h
+++ b/arch/arm/include/asm/common.h
@@ -4,13 +4,19 @@
 static inline unsigned long get_pc(void)
 {
unsigned long pc;
-
+#ifdef CONFIG_CPU_32
__asm__ __volatile__(
 "mov%0, pc\n"
 : "=r" (pc)
 :
 : "memory");
-
+#else
+   __asm__ __volatile__(
+"adr%0, .\n"
+: "=r" (pc)
+:
+: "memory");
+#endif
return pc;
 }
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 18/78] ARM: bitops: remove unnecessary #ifdef

2018-03-16 Thread Sascha Hauer
__fls() must always be provided, not only for aarch64, so remove
unnecessary #ifdef. This didn't show up because nobody directly uses
__fls() in barebox. Only aarch64 indirectly uses __fls() for
implementing fls64().

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/bitops.h | 2 --
 1 file changed, 2 deletions(-)

diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 344e288bae..cd356c5e3d 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -162,9 +162,7 @@ static inline int constant_fls(int x)
 #include 
 #endif /* __ARM__USE_GENERIC_FF */
 
-#if __LINUX_ARM_ARCH__ == 8
 #include 
-#endif
 
 #include 
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 23/78] ARM: aarch64: cache: Add v8_inv_dcache_range

2018-03-16 Thread Sascha Hauer
implement v8_flush_dcache_range based on v8_inv_dcache_range. While
at it add a prototype for v8_inv_dcache_range.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/cache-armv8.S   | 19 +++
 arch/arm/include/asm/cache.h |  2 ++
 2 files changed, 21 insertions(+)

diff --git a/arch/arm/cpu/cache-armv8.S b/arch/arm/cpu/cache-armv8.S
index 82b2f81778..3e21b35913 100644
--- a/arch/arm/cpu/cache-armv8.S
+++ b/arch/arm/cpu/cache-armv8.S
@@ -148,6 +148,25 @@ ENTRY(v8_flush_dcache_range)
ret
 ENDPROC(v8_flush_dcache_range)
 
+.section .text.v8_inv_dcache_range
+ENTRY(v8_inv_dcache_range)
+   mrs x3, ctr_el0
+   lsr x3, x3, #16
+   and x3, x3, #0xf
+   mov x2, #4
+   lsl x2, x2, x3  /* cache line size */
+
+   /* x2 <- minimal cache line size in cache system */
+   sub x3, x2, #1
+   bic x0, x0, x3
+1: dc  ivac, x0/* invalidate data or unified cache */
+   add x0, x0, x2
+   cmp x0, x1
+   b.lo1b
+   dsb sy
+   ret
+ENDPROC(v8_inv_dcache_range)
+
 /*
  * void v8_invalidate_icache_all(void)
  *
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 406a9d5d99..503bf8a0f7 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -5,6 +5,8 @@
 extern void v8_invalidate_icache_all(void);
 void v8_flush_dcache_all(void);
 void v8_invalidate_dcache_all(void);
+void v8_flush_dcache_range(unsigned long start, unsigned long end);
+void v8_inv_dcache_range(unsigned long start, unsigned long end);
 #endif
 
 static inline void icache_invalidate(void)
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 24/78] ARM: aarch64: cache: no need to ifdef prototypes

2018-03-16 Thread Sascha Hauer
There's no need to ifdef function prototypes, so remove the
ifdefs. While there also remove unnecessary "export" for functions.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/cache.h | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 503bf8a0f7..0822cb78c3 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -1,13 +1,11 @@
 #ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
-#ifdef CONFIG_CPU_64v8
-extern void v8_invalidate_icache_all(void);
+void v8_invalidate_icache_all(void);
 void v8_flush_dcache_all(void);
 void v8_invalidate_dcache_all(void);
 void v8_flush_dcache_range(unsigned long start, unsigned long end);
 void v8_inv_dcache_range(unsigned long start, unsigned long end);
-#endif
 
 static inline void icache_invalidate(void)
 {
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 44/78] ARM: aarch64: mmu: No need to disable icache

2018-03-16 Thread Sascha Hauer
When the MMU is disabled there is no need to disable the icache. Leave
it enabled.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 7932185885..c8c3c9d84f 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -330,7 +330,7 @@ void mmu_disable(void)
arm_mmu_not_initialized_error();
 
cr = get_cr();
-   cr &= ~(CR_M | CR_C | CR_I);
+   cr &= ~(CR_M | CR_C);
 
tlb_invalidate();
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 15/78] ARM: aarch64: Add runtime-offset

2018-03-16 Thread Sascha Hauer
Signed-off-by: Sascha Hauer 
---
 arch/arm/lib64/Makefile |  1 +
 arch/arm/lib64/runtime-offset.S | 19 +++
 2 files changed, 20 insertions(+)
 create mode 100644 arch/arm/lib64/runtime-offset.S

diff --git a/arch/arm/lib64/Makefile b/arch/arm/lib64/Makefile
index 4b7b7e3cc5..679ca556e5 100644
--- a/arch/arm/lib64/Makefile
+++ b/arch/arm/lib64/Makefile
@@ -3,5 +3,6 @@ obj-y   += div0.o
 obj-$(CONFIG_ARM_OPTIMZED_STRING_FUNCTIONS)+= memcpy.o
 obj-$(CONFIG_ARM_OPTIMZED_STRING_FUNCTIONS)+= memset.o
 extra-y += barebox.lds
+obj-pbl-y   += runtime-offset.o
 
 pbl-y  += div0.o
diff --git a/arch/arm/lib64/runtime-offset.S b/arch/arm/lib64/runtime-offset.S
new file mode 100644
index 00..177ca64784
--- /dev/null
+++ b/arch/arm/lib64/runtime-offset.S
@@ -0,0 +1,19 @@
+#include 
+#include 
+
+.section ".text_bare_init","ax"
+
+/*
+ * Get the offset between the link address and the address
+ * we are currently running at.
+ */
+ENTRY(get_runtime_offset)
+1: adr x0, 1b
+   ldr x1, linkadr
+   subs x0, x0, x1
+   ret
+
+.align 3
+linkadr:
+.quad get_runtime_offset
+ENDPROC(get_runtime_offset)
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 14/78] ARM: get_runtime_offset() returns unsigned long

2018-03-16 Thread Sascha Hauer
Change return type from uint32_t to unsigned long which is suitable for
aarch64 aswell.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/barebox-arm.h| 2 +-
 arch/arm/mach-mvebu/include/mach/common.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/barebox-arm.h 
b/arch/arm/include/asm/barebox-arm.h
index 170839aaad..1dccb8965e 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -42,7 +42,7 @@ int   dram_init (void);
 extern char __exceptions_start[], __exceptions_stop[];
 
 void board_init_lowlevel(void);
-uint32_t get_runtime_offset(void);
+unsigned long get_runtime_offset(void);
 
 void setup_c(void);
 void relocate_to_current_adr(void);
diff --git a/arch/arm/mach-mvebu/include/mach/common.h 
b/arch/arm/mach-mvebu/include/mach/common.h
index aa4babcfa7..8e15723fbe 100644
--- a/arch/arm/mach-mvebu/include/mach/common.h
+++ b/arch/arm/mach-mvebu/include/mach/common.h
@@ -25,7 +25,7 @@
 #define MVEBU_REMAP_INT_REG_BASE   0xf100
 
 /* #including  yields a circle, so we need a forward decl */
-uint32_t get_runtime_offset(void);
+unsigned long get_runtime_offset(void);
 
 static inline void __iomem *mvebu_get_initial_int_reg_base(void)
 {
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 31/78] ARM: aarch64: fix pbl linker script for aarch64

2018-03-16 Thread Sascha Hauer
Fix output format for aarch64. Also, relocation fixup symbols are in
rela section rather than rel section.

Signed-off-by: Sascha Hauer 
---
 arch/arm/lib/pbl.lds.S | 9 +
 1 file changed, 9 insertions(+)

diff --git a/arch/arm/lib/pbl.lds.S b/arch/arm/lib/pbl.lds.S
index be0a4a3b1a..9df1800543 100644
--- a/arch/arm/lib/pbl.lds.S
+++ b/arch/arm/lib/pbl.lds.S
@@ -25,8 +25,13 @@
 #define BASE   (TEXT_BASE - SZ_2M)
 #endif
 
+#ifdef CONFIG_CPU_32
 OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
 OUTPUT_ARCH(arm)
+#else
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", 
"elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+#endif
 SECTIONS
 {
. = BASE;
@@ -62,7 +67,11 @@ SECTIONS
.data : { *(.data*) }
 
.rel_dyn_start : { *(.__rel_dyn_start) }
+#ifdef CONFIG_CPU_32
.rel.dyn : { *(.rel*) }
+#else
+   .rela.dyn : { *(.rela*) }
+#endif
.rel_dyn_end : { *(.__rel_dyn_end) }
 
.__dynsym_start :  { *(.__dynsym_start) }
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 30/78] ARM: aarch64: Add relocation support

2018-03-16 Thread Sascha Hauer
This adds aarch64 support for relocating binaries linked with -pie.

Support is integrated into the already exisiting
relocate_to_current_adr() function which is now used for both arm32
and aarch64.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/common.c| 38 ---
 arch/arm/cpu/setupc_64.S | 58 
 common/Kconfig   |  2 +-
 3 files changed, 89 insertions(+), 9 deletions(-)

diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
index 3766116d97..c317e502d0 100644
--- a/arch/arm/cpu/common.c
+++ b/arch/arm/cpu/common.c
@@ -24,39 +24,61 @@
 #include 
 #include 
 #include 
+#include 
+
+#define R_ARM_RELATIVE 23
+#define R_AARCH64_RELATIVE 1027
 
 /*
  * relocate binary to the currently running address
  */
 void relocate_to_current_adr(void)
 {
-   unsigned long offset;
+   unsigned long offset, offset_var;
unsigned long *dstart, *dend, *dynsym, *dynend;
 
/* Get offset between linked address and runtime address */
offset = get_runtime_offset();
+   offset_var = global_variable_offset();
 
-   dstart = (void *)__rel_dyn_start + offset;
-   dend = (void *)__rel_dyn_end + offset;
+   dstart = (void *)__rel_dyn_start + offset_var;
+   dend = (void *)__rel_dyn_end + offset_var;
 
-   dynsym = (void *)__dynsym_start + offset;
-   dynend = (void *)__dynsym_end + offset;
+   dynsym = (void *)__dynsym_start + offset_var;
+   dynend = (void *)__dynsym_end + offset_var;
 
while (dstart < dend) {
unsigned long *fixup = (unsigned long *)(*dstart + offset);
unsigned long type = *(dstart + 1);
+   int add;
+
+   if (ELF64_R_TYPE(type) == R_AARCH64_RELATIVE) {
+   unsigned long addend = *(dstart + 2);
 
-   if ((type & 0xff) == 0x17) {
+   *fixup = addend + offset;
+
+   add = 3;
+   } else if (ELF32_R_TYPE(type) == R_ARM_RELATIVE) {
*fixup = *fixup + offset;
-   } else {
+
+   add = 2;
+   } else if (ELF32_R_TYPE(type) == R_ARM_ABS32) {
int index = type >> 8;
unsigned long r = dynsym[index * 4 + 1];
 
*fixup = *fixup + r + offset;
+
+   add = 2;
+   } else {
+   putc_ll('>');
+   puthex_ll(type);
+   putc_ll('\n');
+   /* We're doomed */
+   panic(NULL);
}
 
*dstart += offset;
-   dstart += 2;
+   dstart += add;
}
 
memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
diff --git a/arch/arm/cpu/setupc_64.S b/arch/arm/cpu/setupc_64.S
index 3515854784..88c7899205 100644
--- a/arch/arm/cpu/setupc_64.S
+++ b/arch/arm/cpu/setupc_64.S
@@ -16,3 +16,61 @@ ENTRY(setup_c)
mov x30, x15
ret
 ENDPROC(setup_c)
+
+/*
+ * void relocate_to_adr(unsigned long targetadr)
+ *
+ * Copy binary to targetadr, relocate code and continue
+ * executing at new address.
+ */
+.section .text.relocate_to_adr
+ENTRY(relocate_to_adr)
+   /* x0: target address */
+
+   stp x19, x20, [sp, #-16]!
+
+   mov x19, lr
+
+   mov x6, x0
+
+   bl  get_runtime_offset
+   mov x5, x0
+
+   ldr x0, =_text
+   mov x8, x0
+
+   add x1, x0, x5  /* x1: from address */
+
+   cmp x1, x6  /* already at correct address? */
+   beq 1f  /* yes, skip copy to new address */
+
+   ldr x2, =__bss_start
+
+   sub x2, x2, x0  /* x2: size */
+   mov x0, x6  /* x0: target */
+
+   /* adjust return address */
+   sub x19, x19, x1/* sub address where we are actually 
running */
+   add x19, x19, x0/* add address where we are going to 
run */
+
+   bl  memcpy  /* copy binary */
+
+#ifdef CONFIG_MMU
+   bl  arm_early_mmu_cache_flush
+#endif
+   mov x0,#0
+   ic  ivau, x0/* flush icache */
+
+   ldr x0,=1f
+   sub x0, x0, x8
+   add x0, x0, x6
+   br  x0  /* jump to relocated address */
+1:
+   bl  relocate_to_current_adr /* relocate binary */
+
+   mov lr, x19
+
+   ldp x19, x20, [sp], #16
+   ret
+
+ENDPROC(relocate_to_adr)
diff --git a/common/Kconfig b/common/Kconfig
index af71d6888a..b7000c4d73 100644
--- a/common/Kconfig
+++ b/common/Kconfig
@@ -344,7 +344,7 @@ config KALLSYMS
  This is useful to print a nice backtrace when an exception occurs.
 
 config RELOCATABLE
-   depends on PPC || (ARM 

[PATCH 27/78] ARM: move linker variable declarations to sections.h

2018-03-16 Thread Sascha Hauer
We collected most linker variable declarations in asm/sections.h, so
move __exceptions_start/__exceptions_stop there aswell.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu.c | 1 +
 arch/arm/cpu/no-mmu.c  | 2 +-
 arch/arm/include/asm/barebox-arm.h | 2 --
 arch/arm/include/asm/sections.h| 2 ++
 4 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index 27d994d452..de0b631981 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "mmu.h"
 
diff --git a/arch/arm/cpu/no-mmu.c b/arch/arm/cpu/no-mmu.c
index e227b457a1..7268fa9b9d 100644
--- a/arch/arm/cpu/no-mmu.c
+++ b/arch/arm/cpu/no-mmu.c
@@ -28,7 +28,7 @@
 #include 
 #include 
 #include 
-
+#include 
 
 #define __exceptions_size (__exceptions_stop - __exceptions_start)
 
diff --git a/arch/arm/include/asm/barebox-arm.h 
b/arch/arm/include/asm/barebox-arm.h
index 6c7507a9c7..fa673a63a7 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -32,8 +32,6 @@
 #include 
 #include 
 
-extern char __exceptions_start[], __exceptions_stop[];
-
 unsigned long get_runtime_offset(void);
 
 /* global_variable_offset() - Access global variables when not running at link 
address
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
index b3ada62ee8..6933c7032d 100644
--- a/arch/arm/include/asm/sections.h
+++ b/arch/arm/include/asm/sections.h
@@ -8,6 +8,8 @@ extern char __rel_dyn_start[];
 extern char __rel_dyn_end[];
 extern char __dynsym_start[];
 extern char __dynsym_end[];
+extern char __exceptions_start[];
+extern char __exceptions_stop[];
 
 #endif
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 61/78] ARM: aarch64: mmu: Make zero page faulting

2018-03-16 Thread Sascha Hauer
Make zero page faulting which allows us to catch NULL pointer derefs.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 20f6c387f3..094bc0ac62 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -316,6 +316,8 @@ static int mmu_init(void)
for_each_memory_bank(bank)
create_sections(bank->start, bank->start, bank->size, 
CACHED_MEM);
 
+   create_sections(0x0, 0x0, 0x1000, 0x0);
+
mmu_enable();
 
return 0;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 63/78] ARM: aarch64: Add esr strings

2018-03-16 Thread Sascha Hauer
The Exception Syndrome Register (ESR) holds information over an
exception. This adds the strings necessary to dispatch this information.
Based on Linux code.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/interrupts_64.c |  46 +
 arch/arm/include/asm/esr.h   | 117 +++
 2 files changed, 163 insertions(+)
 create mode 100644 arch/arm/include/asm/esr.h

diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index 9ed6ed9761..c32cd4f051 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -23,6 +23,52 @@
 #include 
 #include 
 #include 
+#include 
+
+static const char *esr_class_str[] = {
+   [0 ... ESR_ELx_EC_MAX]  = "UNRECOGNIZED EC",
+   [ESR_ELx_EC_UNKNOWN]= "Unknown/Uncategorized",
+   [ESR_ELx_EC_WFx]= "WFI/WFE",
+   [ESR_ELx_EC_CP15_32]= "CP15 MCR/MRC",
+   [ESR_ELx_EC_CP15_64]= "CP15 MCRR/MRRC",
+   [ESR_ELx_EC_CP14_MR]= "CP14 MCR/MRC",
+   [ESR_ELx_EC_CP14_LS]= "CP14 LDC/STC",
+   [ESR_ELx_EC_FP_ASIMD]   = "ASIMD",
+   [ESR_ELx_EC_CP10_ID]= "CP10 MRC/VMRS",
+   [ESR_ELx_EC_CP14_64]= "CP14 MCRR/MRRC",
+   [ESR_ELx_EC_ILL]= "PSTATE.IL",
+   [ESR_ELx_EC_SVC32]  = "SVC (AArch32)",
+   [ESR_ELx_EC_HVC32]  = "HVC (AArch32)",
+   [ESR_ELx_EC_SMC32]  = "SMC (AArch32)",
+   [ESR_ELx_EC_SVC64]  = "SVC (AArch64)",
+   [ESR_ELx_EC_HVC64]  = "HVC (AArch64)",
+   [ESR_ELx_EC_SMC64]  = "SMC (AArch64)",
+   [ESR_ELx_EC_SYS64]  = "MSR/MRS (AArch64)",
+   [ESR_ELx_EC_IMP_DEF]= "EL3 IMP DEF",
+   [ESR_ELx_EC_IABT_LOW]   = "IABT (lower EL)",
+   [ESR_ELx_EC_IABT_CUR]   = "IABT (current EL)",
+   [ESR_ELx_EC_PC_ALIGN]   = "PC Alignment",
+   [ESR_ELx_EC_DABT_LOW]   = "DABT (lower EL)",
+   [ESR_ELx_EC_DABT_CUR]   = "DABT (current EL)",
+   [ESR_ELx_EC_SP_ALIGN]   = "SP Alignment",
+   [ESR_ELx_EC_FP_EXC32]   = "FP (AArch32)",
+   [ESR_ELx_EC_FP_EXC64]   = "FP (AArch64)",
+   [ESR_ELx_EC_SERROR] = "SError",
+   [ESR_ELx_EC_BREAKPT_LOW]= "Breakpoint (lower EL)",
+   [ESR_ELx_EC_BREAKPT_CUR]= "Breakpoint (current EL)",
+   [ESR_ELx_EC_SOFTSTP_LOW]= "Software Step (lower EL)",
+   [ESR_ELx_EC_SOFTSTP_CUR]= "Software Step (current EL)",
+   [ESR_ELx_EC_WATCHPT_LOW]= "Watchpoint (lower EL)",
+   [ESR_ELx_EC_WATCHPT_CUR]= "Watchpoint (current EL)",
+   [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
+   [ESR_ELx_EC_VECTOR32]   = "Vector catch (AArch32)",
+   [ESR_ELx_EC_BRK64]  = "BRK (AArch64)",
+};
+
+const char *esr_get_class_string(u32 esr)
+{
+   return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+}
 
 /**
  * Display current register set content
diff --git a/arch/arm/include/asm/esr.h b/arch/arm/include/asm/esr.h
new file mode 100644
index 00..77eeb2cc64
--- /dev/null
+++ b/arch/arm/include/asm/esr.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2013 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#ifndef __ASM_ESR_H
+#define __ASM_ESR_H
+
+#include 
+
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD(0x07)
+#define ESR_ELx_EC_CP10_ID (0x08)
+/* Unallocated EC: 0x09 - 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32   (0x11)
+#define ESR_ELx_EC_HVC32   (0x12)
+#define ESR_ELx_EC_SMC32   (0x13)
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64   (0x15)
+#define ESR_ELx_EC_HVC64   (0x16)
+#define ESR_ELx_EC_SMC64   (0x17)
+#define ESR_ELx_EC_SYS64   (0x18)
+/* Unallocated EC: 0x19 - 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f)
+#define ESR_ELx_EC_IABT_LOW(0x20)
+#define 

[PATCH 40/78] ARM: aarch64: mmu: use PTE_* definitions from U-Boot

2018-03-16 Thread Sascha Hauer
'PMD' (Page Middle Directory) is a Linuxism that is not really
helpful in the barebox MMU code. Use the U-Boot definitions
which only use PTE_* and seem to be more consistent for our
usecase.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c| 22 +++--
 arch/arm/include/asm/pgtable64.h | 67 +---
 2 files changed, 35 insertions(+), 54 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 695a73262d..baa443f9e2 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -34,8 +34,12 @@
 
 #include "mmu.h"
 
-#define CACHED_MEM  (PMD_ATTRINDX(MT_NORMAL) | PMD_SECT_S | PMD_SECT_AF | 
PMD_TYPE_SECT)
-#define UNCACHED_MEM(PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF)
+#define CACHED_MEM  (PTE_BLOCK_MEMTYPE(MT_NORMAL) | \
+PTE_BLOCK_OUTER_SHARE | \
+PTE_BLOCK_AF)
+#define UNCACHED_MEM(PTE_BLOCK_MEMTYPE(MT_DEVICE_nGnRnE) | \
+PTE_BLOCK_OUTER_SHARE | \
+PTE_BLOCK_AF)
 
 static uint64_t *ttb;
 
@@ -94,14 +98,14 @@ static uint64_t level2mask(int level)
 
 static int pte_type(uint64_t *pte)
 {
-   return *pte & PMD_TYPE_MASK;
+   return *pte & PTE_TYPE_MASK;
 }
 
 static void set_table(uint64_t *pt, uint64_t *table_addr)
 {
uint64_t val;
 
-   val = PMD_TYPE_TABLE | (uint64_t)table_addr;
+   val = PTE_TYPE_TABLE | (uint64_t)table_addr;
*pt = val;
 }
 
@@ -119,7 +123,7 @@ static uint64_t *get_level_table(uint64_t *pte)
 {
uint64_t *table = (uint64_t *)(*pte & XLAT_ADDR_MASK);
 
-   if (pte_type(pte) != PMD_TYPE_TABLE) {
+   if (pte_type(pte) != PTE_TYPE_TABLE) {
table = create_table();
set_table(pte, table);
}
@@ -141,7 +145,7 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
idx = (addr & level2mask(i)) >> block_shift;
pte += idx;
 
-   if ((pte_type(pte) != PMD_TYPE_TABLE) || (block_shift <= 
GRANULE_SIZE_SHIFT))
+   if ((pte_type(pte) != PTE_TYPE_TABLE) || (block_shift <= 
GRANULE_SIZE_SHIFT))
break;
else
pte = (uint64_t *)(*pte & XLAT_ADDR_MASK);
@@ -165,21 +169,21 @@ static void map_region(uint64_t virt, uint64_t phys, 
uint64_t size, uint64_t att
 
addr = virt;
 
-   attr &= ~(PMD_TYPE_SECT);
+   attr &= ~PTE_TYPE_MASK;
 
while (size) {
table = ttb;
for (level = 1; level < 4; level++) {
block_shift = level2shift(level);
idx = (addr & level2mask(level)) >> block_shift;
-   block_size = (1 << block_shift);
+   block_size = (1ULL << block_shift);
 
pte = table + idx;
 
if (level == 3)
attr |= PTE_TYPE_PAGE;
else
-   attr |= PMD_TYPE_SECT;
+   attr |= PTE_TYPE_BLOCK;
 
if (size >= block_size && IS_ALIGNED(addr, block_size)) 
{
*pte = phys | attr;
diff --git a/arch/arm/include/asm/pgtable64.h b/arch/arm/include/asm/pgtable64.h
index 7f7efa10ca..f2888c3ccd 100644
--- a/arch/arm/include/asm/pgtable64.h
+++ b/arch/arm/include/asm/pgtable64.h
@@ -47,51 +47,28 @@
 
 #define GRANULE_MASK   GRANULE_SIZE
 
-
-/*
- * Level 2 descriptor (PMD).
- */
-#define PMD_TYPE_MASK  (3 << 0)
-#define PMD_TYPE_FAULT (0 << 0)
-#define PMD_TYPE_TABLE (3 << 0)
-#define PMD_TYPE_SECT  (1 << 0)
-#define PMD_TABLE_BIT  (1 << 1)
-
-/*
- * Section
- */
-#define PMD_SECT_VALID (1 << 0)
-#define PMD_SECT_USER  (1 << 6)/* AP[1] */
-#define PMD_SECT_RDONLY(1 << 7)/* AP[2] */
-#define PMD_SECT_S (3 << 8)
-#define PMD_SECT_AF(1 << 10)
-#define PMD_SECT_NG(1 << 11)
-#define PMD_SECT_CONT  (1 << 52)
-#define PMD_SECT_PXN   (1 << 53)
-#define PMD_SECT_UXN   (1 << 54)
-
-/*
- * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
- */
-#define PMD_ATTRINDX(t)((t) << 2)
-#define PMD_ATTRINDX_MASK  (7 << 2)
-
-/*
- * Level 3 descriptor (PTE).
- */
-#define PTE_TYPE_MASK  (3 << 0)
-#define PTE_TYPE_FAULT (0 << 0)
-#define PTE_TYPE_PAGE  (3 << 0)
-#define PTE_TABLE_BIT  (1 << 1)
-#define PTE_USER   (1 << 6)/* AP[1] */
-#define PTE_RDONLY (1 << 7)/* AP[2] */
-#define PTE_SHARED (3 << 8)/* SH[1:0], inner 
shareable */
-#define PTE_AF (1 << 10)   /* Access Flag */
-#define PTE_NG (1 << 11)   /* nG */
-#define PTE_DBM 

[PATCH 36/78] ARM: aarch64: mmu: by default map as device memory

2018-03-16 Thread Sascha Hauer
By default map the whole address space as unshared device memory
as this is what barebox drivers normally expect.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 357e244042..27b5acd6a7 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -35,7 +35,7 @@
 #include "mmu.h"
 
 #define CACHED_MEM  (PMD_ATTRINDX(MT_NORMAL) | PMD_SECT_S | PMD_SECT_AF | 
PMD_TYPE_SECT)
-#define UNCACHED_MEM(PMD_ATTRINDX(MT_NORMAL_NC) | PMD_SECT_S | PMD_SECT_AF 
| PMD_TYPE_SECT)
+#define UNCACHED_MEM(PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF)
 
 static uint64_t *ttb;
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 76/78] ARM: create separate mmu_64.h file

2018-03-16 Thread Sascha Hauer
cpu/mmu.h has nothing in common for the 32bit and 64bit variant. Make it
two separate files.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu.h| 47 ---
 arch/arm/cpu/mmu_64.c |  2 +-
 arch/arm/cpu/mmu_64.h | 37 +
 3 files changed, 38 insertions(+), 48 deletions(-)
 create mode 100644 arch/arm/cpu/mmu_64.h

diff --git a/arch/arm/cpu/mmu.h b/arch/arm/cpu/mmu.h
index 5803cb6a83..79ebc80d7d 100644
--- a/arch/arm/cpu/mmu.h
+++ b/arch/arm/cpu/mmu.h
@@ -1,53 +1,6 @@
 #ifndef __ARM_MMU_H
 #define __ARM_MMU_H
 
-#ifdef CONFIG_CPU_64v8
-
-#ifndef __ASSEMBLY__
-
-static inline void set_ttbr_tcr_mair(int el, uint64_t table, uint64_t tcr, 
uint64_t attr)
-{
-   asm volatile("dsb sy");
-   if (el == 1) {
-   asm volatile("msr ttbr0_el1, %0" : : "r" (table) : "memory");
-   asm volatile("msr tcr_el1, %0" : : "r" (tcr) : "memory");
-   asm volatile("msr mair_el1, %0" : : "r" (attr) : "memory");
-   } else if (el == 2) {
-   asm volatile("msr ttbr0_el2, %0" : : "r" (table) : "memory");
-   asm volatile("msr tcr_el2, %0" : : "r" (tcr) : "memory");
-   asm volatile("msr mair_el2, %0" : : "r" (attr) : "memory");
-   } else if (el == 3) {
-   asm volatile("msr ttbr0_el3, %0" : : "r" (table) : "memory");
-   asm volatile("msr tcr_el3, %0" : : "r" (tcr) : "memory");
-   asm volatile("msr mair_el3, %0" : : "r" (attr) : "memory");
-   } else {
-   hang();
-   }
-   asm volatile("isb");
-}
-
-static inline uint64_t get_ttbr(int el)
-{
-   uint64_t val;
-   if (el == 1) {
-   asm volatile("mrs %0, ttbr0_el1" : "=r" (val));
-   } else if (el == 2) {
-   asm volatile("mrs %0, ttbr0_el2" : "=r" (val));
-   } else if (el == 3) {
-   asm volatile("mrs %0, ttbr0_el3" : "=r" (val));
-   } else {
-   hang();
-   }
-
-   return val;
-}
-
-void mmu_early_enable(uint64_t membase, uint64_t memsize, uint64_t _ttb);
-
-#endif
-
-#endif /* CONFIG_CPU_64v8 */
-
 #ifdef CONFIG_MMU
 void __mmu_cache_on(void);
 void __mmu_cache_off(void);
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 094bc0ac62..7f29ae7623 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -32,7 +32,7 @@
 #include 
 #include 
 
-#include "mmu.h"
+#include "mmu_64.h"
 
 #define CACHED_MEM  (PTE_BLOCK_MEMTYPE(MT_NORMAL) | \
 PTE_BLOCK_OUTER_SHARE | \
diff --git a/arch/arm/cpu/mmu_64.h b/arch/arm/cpu/mmu_64.h
new file mode 100644
index 00..cc01db0db9
--- /dev/null
+++ b/arch/arm/cpu/mmu_64.h
@@ -0,0 +1,37 @@
+
+static inline void set_ttbr_tcr_mair(int el, uint64_t table, uint64_t tcr, 
uint64_t attr)
+{
+   asm volatile("dsb sy");
+   if (el == 1) {
+   asm volatile("msr ttbr0_el1, %0" : : "r" (table) : "memory");
+   asm volatile("msr tcr_el1, %0" : : "r" (tcr) : "memory");
+   asm volatile("msr mair_el1, %0" : : "r" (attr) : "memory");
+   } else if (el == 2) {
+   asm volatile("msr ttbr0_el2, %0" : : "r" (table) : "memory");
+   asm volatile("msr tcr_el2, %0" : : "r" (tcr) : "memory");
+   asm volatile("msr mair_el2, %0" : : "r" (attr) : "memory");
+   } else if (el == 3) {
+   asm volatile("msr ttbr0_el3, %0" : : "r" (table) : "memory");
+   asm volatile("msr tcr_el3, %0" : : "r" (tcr) : "memory");
+   asm volatile("msr mair_el3, %0" : : "r" (attr) : "memory");
+   } else {
+   hang();
+   }
+   asm volatile("isb");
+}
+
+static inline uint64_t get_ttbr(int el)
+{
+   uint64_t val;
+   if (el == 1) {
+   asm volatile("mrs %0, ttbr0_el1" : "=r" (val));
+   } else if (el == 2) {
+   asm volatile("mrs %0, ttbr0_el2" : "=r" (val));
+   } else if (el == 3) {
+   asm volatile("mrs %0, ttbr0_el3" : "=r" (val));
+   } else {
+   hang();
+   }
+
+   return val;
+}
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 35/78] ARM: aarch64: mmu: remove unused map_io_sections()

2018-03-16 Thread Sascha Hauer
Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 10 --
 1 file changed, 10 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index fe1e5410f7..357e244042 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -202,16 +202,6 @@ static void create_sections(uint64_t virt, uint64_t phys, 
uint64_t size, uint64_
tlb_invalidate();
 }
 
-void *map_io_sections(unsigned long phys, void *_start, size_t size)
-{
-
-   map_region((uint64_t)_start, phys, (uint64_t)size, UNCACHED_MEM);
-
-   tlb_invalidate();
-   return _start;
-}
-
-
 int arch_remap_range(void *_start, size_t size, unsigned flags)
 {
map_region((uint64_t)_start, (uint64_t)_start, (uint64_t)size, flags);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 41/78] ARM: aarch64: mmu: Fix adding additional page table levels

2018-03-16 Thread Sascha Hauer
When we create a higher level page table we have to initialize it
with the settings from the previous lower level page table so that
we do not modify unrelated mappings. split_block() is taken from
U-Boot code and does this job.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 39 ++-
 1 file changed, 34 insertions(+), 5 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index baa443f9e2..2934ad12cd 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -123,10 +123,8 @@ static uint64_t *get_level_table(uint64_t *pte)
 {
uint64_t *table = (uint64_t *)(*pte & XLAT_ADDR_MASK);
 
-   if (pte_type(pte) != PTE_TYPE_TABLE) {
-   table = create_table();
-   set_table(pte, table);
-   }
+   if (pte_type(pte) != PTE_TYPE_TABLE)
+   BUG();
 
return table;
 }
@@ -154,6 +152,36 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
return pte;
 }
 
+#define MAX_PTE_ENTRIES 512
+
+/* Splits a block PTE into table with subpages spanning the old block */
+static void split_block(uint64_t *pte, int level)
+{
+   uint64_t old_pte = *pte;
+   uint64_t *new_table;
+   uint64_t i = 0;
+   int levelshift;
+
+   if ((*pte & PTE_TYPE_MASK) == PTE_TYPE_TABLE)
+   return;
+
+   /* level describes the parent level, we need the child ones */
+   levelshift = level2shift(level + 1);
+
+   new_table = create_table();
+
+   for (i = 0; i < MAX_PTE_ENTRIES; i++) {
+   new_table[i] = old_pte | (i << levelshift);
+
+   /* Level 3 block PTEs have the table type */
+   if ((level + 1) == 3)
+   new_table[i] |= PTE_TYPE_TABLE;
+   }
+
+   /* Set the new table into effect */
+   set_table(pte, new_table);
+}
+
 static void map_region(uint64_t virt, uint64_t phys, uint64_t size, uint64_t 
attr)
 {
uint64_t block_size;
@@ -191,7 +219,8 @@ static void map_region(uint64_t virt, uint64_t phys, 
uint64_t size, uint64_t att
phys += block_size;
size -= block_size;
break;
-
+   } else {
+   split_block(pte, level);
}
 
table = get_level_table(pte);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 69/78] common: Add functions to find free RAM

2018-03-16 Thread Sascha Hauer
The bootm code needs to put the Kernel image and initrd into free
RAM. Add some functions to find free RAM chunks to help this code.

Signed-off-by: Sascha Hauer 
---
 common/memory.c  | 51 +++
 include/memory.h |  5 +
 2 files changed, 56 insertions(+)

diff --git a/common/memory.c b/common/memory.c
index ff5bdc14e2..00fa7c50ff 100644
--- a/common/memory.c
+++ b/common/memory.c
@@ -171,6 +171,57 @@ int release_sdram_region(struct resource *res)
return release_region(res);
 }
 
+void memory_bank_find_space(struct memory_bank *bank, resource_size_t 
*retstart,
+  resource_size_t *retend)
+{
+   resource_size_t freeptr, size, maxfree = 0;
+   struct resource *last, *child;
+
+   if (list_empty(>res->children)) {
+   /* No children - return the whole bank */
+   *retstart = bank->res->start;
+   *retend = bank->res->end;
+   return;
+   }
+
+   freeptr = bank->res->start;
+
+   list_for_each_entry(child, >res->children, sibling) {
+   /* Check gaps between child resources */
+   size = child->start - freeptr;
+   if (size > maxfree) {
+   *retstart = freeptr;
+   *retend = child->start - 1;
+   maxfree = size;
+   }
+   freeptr = child->start + resource_size(child);
+   }
+
+   last = list_last_entry(>res->children, struct resource, sibling);
+
+   /* Check gap between last child and end of memory bank */
+   freeptr = last->start + resource_size(last);
+   size = bank->res->start + resource_size(bank->res) - freeptr;
+
+   if (size > maxfree) {
+   *retstart = freeptr;
+   *retend = bank->res->end;
+   }
+}
+
+int memory_bank_first_find_space(resource_size_t *retstart,
+resource_size_t *retend)
+{
+   struct memory_bank *bank;
+
+   for_each_memory_bank(bank) {
+   memory_bank_find_space(bank, retstart, retend);
+   return 0;
+   }
+
+   return -ENOENT;
+}
+
 #ifdef CONFIG_OFTREE
 
 static int of_memory_fixup(struct device_node *node, void *unused)
diff --git a/include/memory.h b/include/memory.h
index 165d2dc52a..56d16d20c8 100644
--- a/include/memory.h
+++ b/include/memory.h
@@ -27,4 +27,9 @@ struct resource *request_sdram_region(const char *name, 
resource_size_t start,
resource_size_t size);
 int release_sdram_region(struct resource *res);
 
+void memory_bank_find_space(struct memory_bank *bank, resource_size_t 
*retstart,
+   resource_size_t *retend);
+int memory_bank_first_find_space(resource_size_t *retstart,
+resource_size_t *retend);
+
 #endif
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 78/78] ARM: aarch64: Make early MMU support work

2018-03-16 Thread Sascha Hauer
Until now it was not possible to enable the MMU in PBL because
create_section needs memory allocations which are not available. With
this patch we move the early MMU support to a separate file and all
necessary aux functions to mmu_64.h. create_sections is reimplmented
for the early case to only create 1st level pagetables.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/Makefile   |   5 +-
 arch/arm/cpu/mmu-early_64.c |  88 +
 arch/arm/cpu/mmu_64.c   | 134 +++-
 arch/arm/cpu/mmu_64.h   |  84 +++
 4 files changed, 182 insertions(+), 129 deletions(-)
 create mode 100644 arch/arm/cpu/mmu-early_64.c

diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index f79cedd085..0316d251c0 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -3,10 +3,7 @@ obj-y += cpu.o
 obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions$(S64).o interrupts$(S64).o
 obj-$(CONFIG_MMU) += mmu$(S64).o
 lwl-y += lowlevel$(S64).o
-
-ifeq ($(CONFIG_CPU_32), y)
-obj-pbl-$(CONFIG_MMU) += mmu-early.o
-endif
+obj-pbl-$(CONFIG_MMU) += mmu-early$(S64).o
 
 obj-y += start.o entry.o
 
diff --git a/arch/arm/cpu/mmu-early_64.c b/arch/arm/cpu/mmu-early_64.c
new file mode 100644
index 00..f07d107e0d
--- /dev/null
+++ b/arch/arm/cpu/mmu-early_64.c
@@ -0,0 +1,88 @@
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "mmu_64.h"
+
+static void create_sections(void *ttb, uint64_t virt, uint64_t phys,
+   uint64_t size, uint64_t attr)
+{
+   uint64_t block_size;
+   uint64_t block_shift;
+   uint64_t *pte;
+   uint64_t idx;
+   uint64_t addr;
+   uint64_t *table;
+
+   addr = virt;
+
+   attr &= ~PTE_TYPE_MASK;
+
+   table = ttb;
+
+   while (1) {
+   block_shift = level2shift(1);
+   idx = (addr & level2mask(1)) >> block_shift;
+   block_size = (1ULL << block_shift);
+
+   pte = table + idx;
+
+   *pte = phys | attr | PTE_TYPE_BLOCK;
+
+   if (size < block_size)
+   break;
+
+   addr += block_size;
+   phys += block_size;
+   size -= block_size;
+   }
+}
+
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long ttb)
+{
+   int el;
+
+   /*
+* For the early code we only create level 1 pagetables which only
+* allow for a 1GiB granularity. If our membase is not aligned to that
+* bail out without enabling the MMU.
+*/
+   if (membase & ((1ULL << level2shift(1)) - 1))
+   return;
+
+   memset((void *)ttb, 0, GRANULE_SIZE);
+
+   el = current_el();
+   set_ttbr_tcr_mair(el, ttb, calc_tcr(el), MEMORY_ATTRIBUTES);
+   create_sections((void *)ttb, 0, 0, 1UL << (BITS_PER_VA - 1), 
UNCACHED_MEM);
+   create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
+   tlb_invalidate();
+   isb();
+   set_cr(get_cr() | CR_M);
+}
+
+void mmu_early_disable(void)
+{
+   unsigned int cr;
+
+   cr = get_cr();
+   cr &= ~(CR_M | CR_C);
+
+   set_cr(cr);
+   v8_flush_dcache_all();
+   tlb_invalidate();
+
+   dsb();
+   isb();
+}
\ No newline at end of file
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 31658de910..8355a4c6e8 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -34,13 +34,6 @@
 
 #include "mmu_64.h"
 
-#define CACHED_MEM  (PTE_BLOCK_MEMTYPE(MT_NORMAL) | \
-PTE_BLOCK_OUTER_SHARE | \
-PTE_BLOCK_AF)
-#define UNCACHED_MEM(PTE_BLOCK_MEMTYPE(MT_DEVICE_nGnRnE) | \
-PTE_BLOCK_OUTER_SHARE | \
-PTE_BLOCK_AF)
-
 static uint64_t *ttb;
 
 static void arm_mmu_not_initialized_error(void)
@@ -54,74 +47,6 @@ static void arm_mmu_not_initialized_error(void)
panic("MMU not initialized\n");
 }
 
-static uint64_t calc_tcr(int el)
-{
-   u64 ips, va_bits;
-   u64 tcr;
-
-   ips = 2;
-   va_bits = BITS_PER_VA;
-
-   if (el == 1)
-   tcr = (ips << 32) | TCR_EPD1_DISABLE;
-   else if (el == 2)
-   tcr = (ips << 16);
-   else
-   tcr = (ips << 16);
-
-   /* PTWs cacheable, inner/outer WBWA and inner shareable */
-   tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
-   tcr |= TCR_T0SZ(va_bits);
-
-   return tcr;
-}
-
-/*
- * Do it the simple way for now and invalidate the entire
- * tlb
- */
-static inline void tlb_invalidate(void)
-{
-   unsigned int el = current_el();
-
-   dsb();
-
-   if (el == 1)
-   __asm__ __volatile__("tlbi vmalle1\n\t" : : : "memory");
-   else if (el == 2)
-   __asm__ 

[PATCH 49/78] dma: Add prototypes for dma mapping functions

2018-03-16 Thread Sascha Hauer
Right now we only have the dma_sync_single_* functions, but no functions
for actually mapping a pointer. The mapping functions become necessary
when casting a pointer to unsigned long to get a dma address is not
enough. (I'm not even going so far that we'll add IOMMU support, but on
some architectures we need a place where we can check if a pointer is
DMA mappable at all)

Signed-off-by: Sascha Hauer 
---
 include/dma.h | 12 
 1 file changed, 12 insertions(+)

diff --git a/include/dma.h b/include/dma.h
index 29d94c0a52..5fdcb1733c 100644
--- a/include/dma.h
+++ b/include/dma.h
@@ -30,6 +30,18 @@ static inline void dma_free(void *mem)
 }
 #endif
 
+dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
+ enum dma_data_direction dir);
+void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+
+#define DMA_ERROR_CODE  (~(dma_addr_t)0)
+
+static inline int dma_mapping_error(struct device_d *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == DMA_ERROR_CODE;
+}
+
 /* streaming DMA - implement the below calls to support HAS_DMA */
 void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
 enum dma_data_direction dir);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 77/78] ARM: change mmu_early_enable() prototype

2018-03-16 Thread Sascha Hauer
Change the arguements to type unsigned long which is suitable for both
arm32 and arm64. While at it move the prototype to arch/arm/include/.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu-early.c   | 3 ++-
 arch/arm/cpu/mmu-early.h   | 6 --
 arch/arm/cpu/mmu_64.c  | 3 ++-
 arch/arm/cpu/start-pbl.c   | 3 +--
 arch/arm/cpu/start.c   | 2 +-
 arch/arm/cpu/uncompress.c  | 3 +--
 arch/arm/include/asm/mmu.h | 3 +++
 7 files changed, 10 insertions(+), 13 deletions(-)
 delete mode 100644 arch/arm/cpu/mmu-early.h

diff --git a/arch/arm/cpu/mmu-early.c b/arch/arm/cpu/mmu-early.c
index 1549f08985..70cb5fe31b 100644
--- a/arch/arm/cpu/mmu-early.c
+++ b/arch/arm/cpu/mmu-early.c
@@ -30,7 +30,8 @@ static void map_cachable(unsigned long start, unsigned long 
size)
PMD_SECT_AP_READ | PMD_TYPE_SECT | PMD_SECT_WB);
 }
 
-void mmu_early_enable(uint32_t membase, uint32_t memsize, uint32_t _ttb)
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long _ttb)
 {
int i;
 
diff --git a/arch/arm/cpu/mmu-early.h b/arch/arm/cpu/mmu-early.h
deleted file mode 100644
index af21f52131..00
--- a/arch/arm/cpu/mmu-early.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARM_CPU_MMU_EARLY_H
-#define __ARM_CPU_MMU_EARLY_H
-
-void mmu_early_enable(uint32_t membase, uint32_t memsize, uint32_t ttb);
-
-#endif /* __ARM_CPU_MMU_EARLY_H */
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 7f29ae7623..31658de910 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -339,7 +339,8 @@ void mmu_disable(void)
isb();
 }
 
-void mmu_early_enable(uint64_t membase, uint64_t memsize, uint64_t _ttb)
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long _ttb)
 {
int el;
 
diff --git a/arch/arm/cpu/start-pbl.c b/arch/arm/cpu/start-pbl.c
index 25f91b02f3..16159d7f9d 100644
--- a/arch/arm/cpu/start-pbl.c
+++ b/arch/arm/cpu/start-pbl.c
@@ -27,10 +27,9 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
-#include "mmu-early.h"
-
 unsigned long free_mem_ptr;
 unsigned long free_mem_end_ptr;
 
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index 9f4213bfb9..68fff892e8 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -26,12 +26,12 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
 
 #include 
-#include "mmu-early.h"
 
 unsigned long arm_stack_top;
 static unsigned long arm_barebox_size;
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index c721eef0ad..b07087e4cf 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -28,12 +28,11 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include 
 
-#include "mmu-early.h"
-
 unsigned long free_mem_ptr;
 unsigned long free_mem_end_ptr;
 
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 840851858f..99833ac5b4 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -54,4 +54,7 @@ void __dma_clean_range(unsigned long, unsigned long);
 void __dma_flush_range(unsigned long, unsigned long);
 void __dma_inv_range(unsigned long, unsigned long);
 
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long ttb);
+
 #endif /* __ASM_MMU_H */
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 33/78] ARM: aarch64: mmu: create_sections() takes size in bytes

2018-03-16 Thread Sascha Hauer
create_sections() takes size in bytes, not in megabytes, so
drop the _m prefix from the size argument.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index eb54097514..6e22da9a26 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -196,10 +196,9 @@ static void map_region(uint64_t virt, uint64_t phys, 
uint64_t size, uint64_t att
}
 }
 
-static void create_sections(uint64_t virt, uint64_t phys, uint64_t size_m, 
uint64_t flags)
+static void create_sections(uint64_t virt, uint64_t phys, uint64_t size, 
uint64_t flags)
 {
-
-   map_region(virt, phys, size_m, flags);
+   map_region(virt, phys, size, flags);
tlb_invalidate();
 }
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 12/78] ARM: aarch64: silence compiler warning

2018-03-16 Thread Sascha Hauer
find_pte is currently unused, nevertheless it's useful for debugging
purposes. Add a __maybe_unused to silence the compiler warning.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index fd41435746..4803a85e4c 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -130,7 +130,7 @@ static uint64_t *get_level_table(uint64_t *pte)
return table;
 }
 
-static uint64_t *find_pte(uint64_t addr)
+static __maybe_unused uint64_t *find_pte(uint64_t addr)
 {
uint64_t *pte;
uint64_t block_shift;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 26/78] ARM: remove function prototypes from the past

2018-03-16 Thread Sascha Hauer
Several functions do not exist anymore for a long time now. Remove their
prototypes.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/barebox-arm.h | 8 
 arch/openrisc/cpu/cpu.c| 5 -
 2 files changed, 13 deletions(-)

diff --git a/arch/arm/include/asm/barebox-arm.h 
b/arch/arm/include/asm/barebox-arm.h
index 9fc8afbe7d..6c7507a9c7 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -32,16 +32,8 @@
 #include 
 #include 
 
-/* cpu/.../cpu.c */
-intcleanup_before_linux(void);
-
-/* arch/board(s)/.../... */
-intboard_init(void);
-intdram_init (void);
-
 extern char __exceptions_start[], __exceptions_stop[];
 
-void board_init_lowlevel(void);
 unsigned long get_runtime_offset(void);
 
 /* global_variable_offset() - Access global variables when not running at link 
address
diff --git a/arch/openrisc/cpu/cpu.c b/arch/openrisc/cpu/cpu.c
index e7f944555e..cae42d41b8 100644
--- a/arch/openrisc/cpu/cpu.c
+++ b/arch/openrisc/cpu/cpu.c
@@ -23,11 +23,6 @@
 #include 
 #include 
 
-int cleanup_before_linux(void)
-{
-   return 0;
-}
-
 extern void __reset(void);
 
 static void __noreturn openrisc_restart_cpu(struct restart_handler *rst)
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 60/78] ARM: aarch64: implement stacktraces

2018-03-16 Thread Sascha Hauer
Implement stacktraces as a great debugging aid. On aarch64 this is cheap
enough to be enabled unconditionally. Unwinding code is taken from the
Kernel.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/interrupts_64.c  |  4 +-
 arch/arm/include/asm/barebox.h|  4 +-
 arch/arm/include/asm/stacktrace.h |  2 +
 arch/arm/lib64/Makefile   |  1 +
 arch/arm/lib64/stacktrace.c   | 86 +++
 5 files changed, 95 insertions(+), 2 deletions(-)
 create mode 100644 arch/arm/lib64/stacktrace.c

diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index ffdb87af94..9ed6ed9761 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -44,7 +44,9 @@ static void __noreturn do_exception(struct pt_regs *pt_regs)
 {
show_regs(pt_regs);
 
-   panic("");
+   unwind_backtrace(pt_regs);
+
+   panic("panic: unhandled exception");
 }
 
 /**
diff --git a/arch/arm/include/asm/barebox.h b/arch/arm/include/asm/barebox.h
index 5a6622235b..4e89466593 100644
--- a/arch/arm/include/asm/barebox.h
+++ b/arch/arm/include/asm/barebox.h
@@ -2,9 +2,11 @@
 #define _BAREBOX_H_1
 
 #ifdef CONFIG_ARM_UNWIND
-#ifndef CONFIG_CPU_V8
 #define ARCH_HAS_STACK_DUMP
 #endif
+
+#ifdef CONFIG_CPU_V8
+#define ARCH_HAS_STACK_DUMP
 #endif
 
 #ifdef CONFIG_ARM_EXCEPTIONS
diff --git a/arch/arm/include/asm/stacktrace.h 
b/arch/arm/include/asm/stacktrace.h
index 10f70e1675..602e79ced4 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -4,7 +4,9 @@
 struct stackframe {
unsigned long fp;
unsigned long sp;
+#ifdef CONFIG_CPU_32
unsigned long lr;
+#endif
unsigned long pc;
 };
 
diff --git a/arch/arm/lib64/Makefile b/arch/arm/lib64/Makefile
index 679ca556e5..77647128a5 100644
--- a/arch/arm/lib64/Makefile
+++ b/arch/arm/lib64/Makefile
@@ -1,3 +1,4 @@
+obj-y += stacktrace.o
 obj-$(CONFIG_ARM_LINUX)+= armlinux.o
 obj-y  += div0.o
 obj-$(CONFIG_ARM_OPTIMZED_STRING_FUNCTIONS)+= memcpy.o
diff --git a/arch/arm/lib64/stacktrace.c b/arch/arm/lib64/stacktrace.c
new file mode 100644
index 00..b8352c1454
--- /dev/null
+++ b/arch/arm/lib64/stacktrace.c
@@ -0,0 +1,86 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include 
+#include 
+
+#define THREAD_SIZE 16384
+
+/*
+ * AArch64 PCS assigns the frame pointer to x29.
+ *
+ * A simple function prologue looks like this:
+ * sub sp, sp, #0x10
+ * stp x29, x30, [sp]
+ * mov x29, sp
+ *
+ * A simple function epilogue looks like this:
+ * mov sp, x29
+ * ldp x29, x30, [sp]
+ * add sp, sp, #0x10
+ */
+int unwind_frame(struct stackframe *frame)
+{
+   unsigned long high, low;
+   unsigned long fp = frame->fp;
+
+   low  = frame->sp;
+   high = ALIGN(low, THREAD_SIZE);
+
+   if (fp < low || fp > high - 0x18 || fp & 0xf)
+   return -EINVAL;
+
+   frame->sp = fp + 0x10;
+   frame->fp = *(unsigned long *)(fp);
+   frame->pc = *(unsigned long *)(fp + 8);
+
+   return 0;
+}
+
+void dump_backtrace_entry(unsigned long where, unsigned long from)
+{
+#ifdef CONFIG_KALLSYMS
+   printf("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, 
from, (void *)from);
+#else
+   printf("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+#endif
+}
+
+void unwind_backtrace(struct pt_regs *regs)
+{
+struct stackframe frame = {};
+   register unsigned long current_sp asm ("sp");
+
+   if (regs) {
+   frame.fp = regs->regs[29];
+   frame.pc = regs->elr;
+   } else {
+   frame.fp = (unsigned long)__builtin_frame_address(0);
+   frame.sp = current_sp;
+   frame.pc = (unsigned long)unwind_backtrace;
+   }
+
+   printf("Call trace:\n");
+   while (1) {
+   unsigned long where = frame.pc;
+   int ret;
+
+   ret = unwind_frame();
+   if (ret < 0)
+   break;
+   dump_backtrace_entry(where, frame.pc);
+   }
+}
+
+void dump_stack(void)
+{
+   unwind_backtrace(NULL);
+}
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 71/78] ARM: aarch64: disable 32bit boot commands

2018-03-16 Thread Sascha Hauer
The 32bit boot support is full of legacy cruft. While it should be
possible to merge the aarch64 support there, a fresh start looks more
promising.

Signed-off-by: Sascha Hauer 
---
 arch/arm/Makefile   | 1 -
 arch/arm/lib32/Makefile | 2 ++
 arch/arm/{lib => lib32}/bootm.c | 0
 arch/arm/{lib => lib32}/bootu.c | 0
 commands/Kconfig| 4 ++--
 5 files changed, 4 insertions(+), 3 deletions(-)
 rename arch/arm/{lib => lib32}/bootm.c (100%)
 rename arch/arm/{lib => lib32}/bootu.c (100%)

diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index cf84a9a1c1..ac97de1e83 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -298,7 +298,6 @@ endif
 
 common-y += $(BOARD) arch/arm/boards/ $(MACH)
 common-y += arch/arm/cpu/
-common-y += arch/arm/lib/
 
 ifeq ($(CONFIG_CPU_V8), y)
 common-y += arch/arm/lib64/
diff --git a/arch/arm/lib32/Makefile b/arch/arm/lib32/Makefile
index cdd07322cf..3c02a0bf96 100644
--- a/arch/arm/lib32/Makefile
+++ b/arch/arm/lib32/Makefile
@@ -1,5 +1,7 @@
 obj-$(CONFIG_ARM_LINUX)+= armlinux.o
 obj-$(CONFIG_CMD_BOOTZ)+= bootz.o
+obj-$(CONFIG_BOOTM) += bootm.o
+obj-$(CONFIG_CMD_BOOTU) += bootu.o
 obj-y  += div0.o
 obj-y  += findbit.o
 obj-y  += io.o
diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib32/bootm.c
similarity index 100%
rename from arch/arm/lib/bootm.c
rename to arch/arm/lib32/bootm.c
diff --git a/arch/arm/lib/bootu.c b/arch/arm/lib32/bootu.c
similarity index 100%
rename from arch/arm/lib/bootu.c
rename to arch/arm/lib32/bootu.c
diff --git a/commands/Kconfig b/commands/Kconfig
index 17bbe0f27a..951a86963e 100644
--- a/commands/Kconfig
+++ b/commands/Kconfig
@@ -348,7 +348,7 @@ config CMD_BOOTM
 config CMD_BOOTU
tristate
default y
-   depends on ARM
+   depends on ARM && !CPU_64v8
prompt "bootu"
help
  Boot into already loaded Linux kernel, which must be raw 
(uncompressed).
@@ -357,7 +357,7 @@ config CMD_BOOTU
 
 config CMD_BOOTZ
tristate
-   depends on ARM
+   depends on ARM && !CPU_64v8
prompt "bootz"
help
  Boot Linux zImage
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 58/78] ARM: aarch64: hide some config options

2018-03-16 Thread Sascha Hauer
EABI and ATAGS have no meaning on aarch64, so hide the options from the
user.

Signed-off-by: Sascha Hauer 
---
 arch/arm/Kconfig | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 563475205d..37cde0c0c5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -286,6 +286,7 @@ config ARM_ASM_UNIFIED
 
 config AEABI
bool "Use the ARM EABI to compile barebox"
+   depends on !CPU_V8
help
  This option allows for barebox to be compiled using the latest
  ARM ABI (aka EABI).
@@ -308,7 +309,7 @@ config THUMB2_BAREBOX
 
 config ARM_BOARD_APPEND_ATAG
bool "Let board specific code to add ATAGs to be passed to the kernel"
-   depends on ARM_LINUX
+   depends on ARM_LINUX && !CPU_V8
help
  This option is purely to start some vendor provided kernels.
  ** DO NOT USE FOR YOUR OWN DESIGNS! **
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 22/78] ARM: aarch64: fix early cache flushing

2018-03-16 Thread Sascha Hauer
v8_dcache_all() should not be used directly, but only called from
v8_flush_dcache_all() and v8_invalidate_dcache_all() which set
pass the type of operation in x0. While at it add the missing prototype
for v8_invalidate_dcache_all().

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/cache.c | 2 +-
 arch/arm/include/asm/cache.h | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/arm/cpu/cache.c b/arch/arm/cpu/cache.c
index 929c385df5..1a8f49d301 100644
--- a/arch/arm/cpu/cache.c
+++ b/arch/arm/cpu/cache.c
@@ -146,7 +146,7 @@ void arm_early_mmu_cache_flush(void)
 #endif
 #ifdef CONFIG_CPU_64v8
case CPU_ARCH_ARMv8:
-   v8_dcache_all();
+   v8_flush_dcache_all();
return;
 #endif
}
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index b5460a7876..406a9d5d99 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -3,7 +3,8 @@
 
 #ifdef CONFIG_CPU_64v8
 extern void v8_invalidate_icache_all(void);
-extern void v8_dcache_all(void);
+void v8_flush_dcache_all(void);
+void v8_invalidate_dcache_all(void);
 #endif
 
 static inline void icache_invalidate(void)
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 67/78] ARM: aarch64: Add barebox head support

2018-03-16 Thread Sascha Hauer
Allow aarch64 images to use the same image header as arm32 images.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/barebox-arm-head.h | 13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/arm/include/asm/barebox-arm-head.h 
b/arch/arm/include/asm/barebox-arm-head.h
index bd9c9b1c4f..4d0da6c491 100644
--- a/arch/arm/include/asm/barebox-arm-head.h
+++ b/arch/arm/include/asm/barebox-arm-head.h
@@ -21,6 +21,7 @@ void cortex_a7_lowlevel_init(void);
 static inline void __barebox_arm_head(void)
 {
__asm__ __volatile__ (
+#ifdef CONFIG_CPU_32
 #ifdef CONFIG_THUMB2_BAREBOX
".arm\n"
"adr r9, 1f + 1\n"
@@ -40,11 +41,23 @@ static inline void __barebox_arm_head(void)
"1: b 1b\n"
"1: b 1b\n"
"1: b 1b\n"
+#endif
+#else
+   "b 2f\n"
+   "nop\n"
+   "nop\n"
+   "nop\n"
+   "nop\n"
+   "nop\n"
 #endif
".asciz \"barebox\"\n"
+#ifdef CONFIG_CPU_32
".word _text\n" /* text base. If copied 
there,
 * barebox can skip 
relocation
 */
+#else
+   ".word 0x\n"
+#endif
".word _barebox_image_size\n"   /* image size to copy */
".rept 8\n"
".word 0x\n"
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 51/78] ARM: aarch64: implement dma operations

2018-03-16 Thread Sascha Hauer
For proper DMA support dma_alloc_coherent and DMA sync operations are
needed. Implement them.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c | 55 +++
 1 file changed, 55 insertions(+)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 6606405b21..20f6c387f3 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -365,3 +365,58 @@ void *phys_to_virt(unsigned long phys)
 {
return (void *)phys;
 }
+
+void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
+{
+   void *ret;
+
+   size = PAGE_ALIGN(size);
+   ret = xmemalign(PAGE_SIZE, size);
+   if (dma_handle)
+   *dma_handle = (dma_addr_t)ret;
+
+   map_region((unsigned long)ret, (unsigned long)ret, size, UNCACHED_MEM);
+   tlb_invalidate();
+
+   return ret;
+}
+
+void dma_free_coherent(void *mem, dma_addr_t dma_handle, size_t size)
+{
+   size = PAGE_ALIGN(size);
+
+   map_region((unsigned long)mem, (unsigned long)mem, size, CACHED_MEM);
+
+   free(mem);
+}
+
+void dma_sync_single_for_cpu(dma_addr_t address, size_t size,
+ enum dma_data_direction dir)
+{
+   if (dir != DMA_TO_DEVICE)
+   v8_inv_dcache_range(address, address + size - 1);
+}
+
+void dma_sync_single_for_device(dma_addr_t address, size_t size,
+enum dma_data_direction dir)
+{
+   if (dir == DMA_FROM_DEVICE)
+   v8_inv_dcache_range(address, address + size - 1);
+   v8_flush_dcache_range(address, address + size - 1);
+}
+
+dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+   unsigned long addr = (unsigned long)ptr;
+
+   dma_sync_single_for_device(addr, size, dir);
+
+   return addr;
+}
+
+void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+   dma_sync_single_for_cpu(addr, size, dir);
+}
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 50/78] ARM: implement dma mapping functions

2018-03-16 Thread Sascha Hauer
Implement basic dma mapping functions. For now just assume every address
is valid for dma mapping.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu.c | 16 
 1 file changed, 16 insertions(+)

diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index fc71cf03cd..6ccd5893b4 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -598,3 +598,19 @@ void dma_sync_single_for_device(dma_addr_t address, size_t 
size,
outer_cache.clean_range(address, address + size);
}
 }
+
+dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+   unsigned long addr = (unsigned long)ptr;
+
+   dma_sync_single_for_device(addr, size, dir);
+
+   return addr;
+}
+
+void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+   dma_sync_single_for_cpu(addr, size, dir);
+}
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 28/78] ARM: relocate_to_current_adr: Use unsigned long for variables

2018-03-16 Thread Sascha Hauer
relocate_to_current_adr() fixes up pointers in the binary, so unsigned
long is a better match as it works on aarch64 aswell.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/common.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
index 7c07d00c1b..3766116d97 100644
--- a/arch/arm/cpu/common.c
+++ b/arch/arm/cpu/common.c
@@ -30,8 +30,8 @@
  */
 void relocate_to_current_adr(void)
 {
-   uint32_t offset;
-   uint32_t *dstart, *dend, *dynsym, *dynend;
+   unsigned long offset;
+   unsigned long *dstart, *dend, *dynsym, *dynend;
 
/* Get offset between linked address and runtime address */
offset = get_runtime_offset();
@@ -43,14 +43,14 @@ void relocate_to_current_adr(void)
dynend = (void *)__dynsym_end + offset;
 
while (dstart < dend) {
-   uint32_t *fixup = (uint32_t *)(*dstart + offset);
-   uint32_t type = *(dstart + 1);
+   unsigned long *fixup = (unsigned long *)(*dstart + offset);
+   unsigned long type = *(dstart + 1);
 
if ((type & 0xff) == 0x17) {
*fixup = *fixup + offset;
} else {
int index = type >> 8;
-   uint32_t r = dynsym[index * 4 + 1];
+   unsigned long r = dynsym[index * 4 + 1];
 
*fixup = *fixup + r + offset;
}
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 47/78] ARM: Make some variables 64bit aware

2018-03-16 Thread Sascha Hauer
Use unsigned long as type for variables that are used as addresses.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/start-pbl.c  | 10 +-
 arch/arm/cpu/uncompress.c |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/arm/cpu/start-pbl.c b/arch/arm/cpu/start-pbl.c
index 73c27429da..25f91b02f3 100644
--- a/arch/arm/cpu/start-pbl.c
+++ b/arch/arm/cpu/start-pbl.c
@@ -48,10 +48,10 @@ extern void *input_data_end;
 __noreturn void barebox_single_pbl_start(unsigned long membase,
unsigned long memsize, void *boarddata)
 {
-   uint32_t offset;
-   uint32_t pg_start, pg_end, pg_len, uncompressed_len;
+   unsigned long offset;
+   unsigned long pg_start, pg_end, pg_len, uncompressed_len;
void __noreturn (*barebox)(unsigned long, unsigned long, void *);
-   uint32_t endmem = membase + memsize;
+   unsigned long endmem = membase + memsize;
unsigned long barebox_base;
 
if (IS_ENABLED(CONFIG_PBL_RELOCATABLE))
@@ -60,8 +60,8 @@ __noreturn void barebox_single_pbl_start(unsigned long 
membase,
/* Get offset between linked address and runtime address */
offset = get_runtime_offset();
 
-   pg_start = (uint32_t)_data + global_variable_offset();
-   pg_end = (uint32_t)_data_end + global_variable_offset();
+   pg_start = (unsigned long)_data + global_variable_offset();
+   pg_end = (unsigned long)_data_end + global_variable_offset();
pg_len = pg_end - pg_start;
uncompressed_len = get_unaligned((const u32 *)(pg_start + pg_len - 4));
 
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 37845b2259..c721eef0ad 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -45,7 +45,7 @@ void __noreturn barebox_multi_pbl_start(unsigned long membase,
 {
uint32_t pg_len, uncompressed_len;
void __noreturn (*barebox)(unsigned long, unsigned long, void *);
-   uint32_t endmem = membase + memsize;
+   unsigned long endmem = membase + memsize;
unsigned long barebox_base;
uint32_t *image_end;
void *pg_start;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 59/78] ARM: aarch64: implement show_regs()

2018-03-16 Thread Sascha Hauer
Do something useful in an exception and at least print the current
register contents.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/interrupts_64.c  |  8 
 arch/arm/include/asm/ptrace.h | 19 +++
 2 files changed, 27 insertions(+)

diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index b3bd0aa5a4..ffdb87af94 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -30,6 +30,14 @@
  */
 void show_regs(struct pt_regs *regs)
 {
+   int i;
+
+   printf("elr: %016lx lr : %016lx\n", regs->elr, regs->regs[30]);
+
+   for (i = 0; i < 29; i += 2)
+   printf("x%-2d: %016lx x%-2d: %016lx\n",
+   i, regs->regs[i], i + 1, regs->regs[i + 1]);
+   printf("\n");
 }
 
 static void __noreturn do_exception(struct pt_regs *pt_regs)
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 6520a0a73a..7fbd8d9b6f 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -10,6 +10,23 @@
 #ifndef __ASM_ARM_PTRACE_H
 #define __ASM_ARM_PTRACE_H
 
+#ifdef CONFIG_CPU_64
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored
+ * on the stack during an exception.
+ */
+struct pt_regs {
+   unsigned long elr;
+   unsigned long regs[31];
+};
+
+#endif  /* __ASSEMBLY__ */
+
+#else   /* CONFIG_CPU_64 */
+
 #define PTRACE_GETREGS 12
 #define PTRACE_SETREGS 13
 #define PTRACE_GETFPREGS   14
@@ -141,4 +158,6 @@ extern void show_regs(struct pt_regs *);
 
 #endif /* __ASSEMBLY__ */
 
+#endif /* CONFIG_CPU_64 */
+
 #endif
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 39/78] ARM: aarch64: mmu: enable mmu in generic code

2018-03-16 Thread Sascha Hauer
Using board code to enable the MMU is not nice. Do it in generic
code. Since mmu_enable() is now done in mmu_64.c we no longer have
to export it.

Signed-off-by: Sascha Hauer 
---
 arch/arm/boards/qemu-virt64/init.c | 10 --
 arch/arm/cpu/mmu_64.c  | 20 
 arch/arm/include/asm/mmu.h |  1 -
 3 files changed, 8 insertions(+), 23 deletions(-)

diff --git a/arch/arm/boards/qemu-virt64/init.c 
b/arch/arm/boards/qemu-virt64/init.c
index 686231696e..19cfcae1f0 100644
--- a/arch/arm/boards/qemu-virt64/init.c
+++ b/arch/arm/boards/qemu-virt64/init.c
@@ -62,13 +62,3 @@ static int virt_core_init(void)
return 0;
 }
 postcore_initcall(virt_core_init);
-
-#ifdef CONFIG_MMU
-static int virt_mmu_enable(void)
-{
-   mmu_enable();
-
-   return 0;
-}
-postmmu_initcall(virt_mmu_enable);
-#endif
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 639aa6d189..695a73262d 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -210,6 +210,12 @@ int arch_remap_range(void *_start, size_t size, unsigned 
flags)
return 0;
 }
 
+static void mmu_enable(void)
+{
+   isb();
+   set_cr(get_cr() | CR_M | CR_C | CR_I);
+}
+
 /*
  * Prepare MMU for usage enable it.
  */
@@ -256,22 +262,12 @@ static int mmu_init(void)
for_each_memory_bank(bank)
create_sections(bank->start, bank->start, bank->size, 
CACHED_MEM);
 
+   mmu_enable();
+
return 0;
 }
 mmu_initcall(mmu_init);
 
-void mmu_enable(void)
-{
-   if (!ttb)
-   arm_mmu_not_initialized_error();
-
-   if (!(get_cr() & CR_M)) {
-
-   isb();
-   set_cr(get_cr() | CR_M | CR_C | CR_I);
-   }
-}
-
 void mmu_disable(void)
 {
unsigned int cr;
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 2a1daeafe3..840851858f 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -8,7 +8,6 @@
 
 struct arm_memory;
 
-void mmu_enable(void);
 void mmu_disable(void);
 static inline void arm_create_section(unsigned long virt, unsigned long phys, 
int size_m,
unsigned int flags)
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 62/78] ARM: aarch64: Allow to leave exceptions

2018-03-16 Thread Sascha Hauer
So far exceptions can only be entered but never left. Add code to leave
exceptions based on U-Boot commit 4c2cc7c4e (arm64: Allow exceptions to return).
This will be useful to implement ignoring data aborts on a 'md' command.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/exceptions_64.S | 34 ++
 1 file changed, 34 insertions(+)

diff --git a/arch/arm/cpu/exceptions_64.S b/arch/arm/cpu/exceptions_64.S
index a7069a8475..58ca50b709 100644
--- a/arch/arm/cpu/exceptions_64.S
+++ b/arch/arm/cpu/exceptions_64.S
@@ -80,34 +80,68 @@ vectors:
 _do_bad_sync:
exception_entry
bl  do_bad_sync
+   b   exception_exit
 
 _do_bad_irq:
exception_entry
bl  do_bad_irq
+   b   exception_exit
 
 _do_bad_fiq:
exception_entry
bl  do_bad_fiq
+   b   exception_exit
 
 _do_bad_error:
exception_entry
bl  do_bad_error
+   b   exception_exit
 
 _do_sync:
exception_entry
bl  do_sync
+   b   exception_exit
 
 _do_irq:
exception_entry
bl  do_irq
+   b   exception_exit
 
 _do_fiq:
exception_entry
bl  do_fiq
+   b   exception_exit
 
 _do_error:
exception_entry
bl  do_error
+   b   exception_exit
+
+exception_exit:
+   ldp x2, x0, [sp],#16
+   switch_el x11, 3f, 2f, 1f
+3: msr elr_el3, x2
+   b   0f
+2: msr elr_el2, x2
+   b   0f
+1: msr elr_el1, x2
+0:
+   ldp x1, x2, [sp],#16
+   ldp x3, x4, [sp],#16
+   ldp x5, x6, [sp],#16
+   ldp x7, x8, [sp],#16
+   ldp x9, x10, [sp],#16
+   ldp x11, x12, [sp],#16
+   ldp x13, x14, [sp],#16
+   ldp x15, x16, [sp],#16
+   ldp x17, x18, [sp],#16
+   ldp x19, x20, [sp],#16
+   ldp x21, x22, [sp],#16
+   ldp x23, x24, [sp],#16
+   ldp x25, x26, [sp],#16
+   ldp x27, x28, [sp],#16
+   ldp x29, x30, [sp],#16
+   eret
 
 .section .data
 .align 4
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 54/78] ARM: aarch64: fix exception level mixup

2018-03-16 Thread Sascha Hauer
When entering an exception the we currently jump to the code handling
EL1 when we are actually at EL3 and the other way round. Fix this by
introducing and using the switch_el macro from U-Boot.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/exceptions_64.S   | 10 ++
 arch/arm/include/asm/assembler64.h | 21 +
 2 files changed, 23 insertions(+), 8 deletions(-)
 create mode 100644 arch/arm/include/asm/assembler64.h

diff --git a/arch/arm/cpu/exceptions_64.S b/arch/arm/cpu/exceptions_64.S
index 58120253a1..a7069a8475 100644
--- a/arch/arm/cpu/exceptions_64.S
+++ b/arch/arm/cpu/exceptions_64.S
@@ -7,6 +7,7 @@
 
 #include 
 #include 
+#include 
 #include 
 
 /*
@@ -31,14 +32,7 @@
stp x3, x4, [sp, #-16]!
stp x1, x2, [sp, #-16]!
 
-   /* Could be running at EL3/EL2/EL1 */
-   mrs x11, CurrentEL
-   cmp x11, #0xC   /* Check EL3 state */
-   b.eq1f
-   cmp x11, #0x8   /* Check EL2 state */
-   b.eq2f
-   cmp x11, #0x4   /* Check EL1 state */
-   b.eq3f
+   switch_el x11, 3f, 2f, 1f
 3: mrs x1, esr_el3
mrs x2, elr_el3
b   0f
diff --git a/arch/arm/include/asm/assembler64.h 
b/arch/arm/include/asm/assembler64.h
new file mode 100644
index 00..26182aa5f6
--- /dev/null
+++ b/arch/arm/include/asm/assembler64.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_ARCH_ASSEMBLY_H
+#define __ASM_ARCH_ASSEMBLY_H
+
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+/*
+ * Branch according to exception level
+ */
+.macro  switch_el, xreg, el3_label, el2_label, el1_label
+   mrs \xreg, CurrentEL
+   cmp \xreg, 0xc
+   b.eq\el3_label
+   cmp \xreg, 0x8
+   b.eq\el2_label
+   cmp \xreg, 0x4
+   b.eq\el1_label
+.endm
+
+#endif /* __ASM_ARCH_ASSEMBLY_H */
\ No newline at end of file
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 29/78] clocksource: Add armv8 generic timer support

2018-03-16 Thread Sascha Hauer
armv8 has a generic time used in many SoCs. Add support for it.

Signed-off-by: Sascha Hauer 
---
 drivers/clocksource/Kconfig   |  5 +++
 drivers/clocksource/Makefile  |  1 +
 drivers/clocksource/armv8-timer.c | 65 +++
 3 files changed, 71 insertions(+)
 create mode 100644 drivers/clocksource/armv8-timer.c

diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 23ad20afcf..3d63f7ff16 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -65,3 +65,8 @@ config CLOCKSOURCE_ROCKCHIP
 config CLOCKSOURCE_ATMEL_PIT
bool
depends on SOC_AT91SAM9 || SOC_SAMA5
+
+config CLOCKSOURCE_ARMV8_TIMER
+   bool
+   default y
+   depends on ARM && CPU_64v8
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f774edee46..ea33fff502 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_CLOCKSOURCE_ORION)   += orion.o
 obj-$(CONFIG_CLOCKSOURCE_UEMD)+= uemd.o
 obj-$(CONFIG_CLOCKSOURCE_ROCKCHIP)+= rk_timer.o
 obj-$(CONFIG_CLOCKSOURCE_ATMEL_PIT) += timer-atmel-pit.o
+obj-$(CONFIG_CLOCKSOURCE_ARMV8_TIMER) += armv8-timer.o
diff --git a/drivers/clocksource/armv8-timer.c 
b/drivers/clocksource/armv8-timer.c
new file mode 100644
index 00..57b0b694c7
--- /dev/null
+++ b/drivers/clocksource/armv8-timer.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 Sascha Hauer 
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+uint64_t armv8_clocksource_read(void)
+{
+   unsigned long cntpct;
+
+   isb();
+   asm volatile("mrs %0, cntpct_el0" : "=r" (cntpct));
+
+   return cntpct;
+}
+
+static struct clocksource cs = {
+   .read   = armv8_clocksource_read,
+   .mask   = CLOCKSOURCE_MASK(64),
+   .shift  = 0,
+};
+
+static int armv8_timer_probe(struct device_d *dev)
+{
+   unsigned long cntfrq;
+
+   asm volatile("mrs %0, cntfrq_el0" : "=r" (cntfrq));
+
+   cs.mult = clocksource_hz2mult(cntfrq, cs.shift);
+
+   return init_clock();
+}
+
+static struct of_device_id armv8_timer_dt_ids[] = {
+   { .compatible = "arm,armv8-timer", },
+   { }
+};
+
+static struct driver_d armv8_timer_driver = {
+   .name = "armv8-timer",
+   .probe = armv8_timer_probe,
+   .of_compatible = DRV_OF_COMPAT(armv8_timer_dt_ids),
+};
+
+static int armv8_timer_init(void)
+{
+   return platform_driver_register(_timer_driver);
+}
+postcore_initcall(armv8_timer_init);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 56/78] ARM: aarch64: lowlevel: Use switch_el

2018-03-16 Thread Sascha Hauer
Use switch_el macro rather than open coded version. While at it rename the
labels so that the name matches the exception level.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/lowlevel_64.S | 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/arch/arm/cpu/lowlevel_64.S b/arch/arm/cpu/lowlevel_64.S
index a66556f1ad..af1cd8b5bc 100644
--- a/arch/arm/cpu/lowlevel_64.S
+++ b/arch/arm/cpu/lowlevel_64.S
@@ -1,18 +1,13 @@
 #include 
 #include 
 #include 
+#include 
 
 .section ".text_bare_init_","ax"
 ENTRY(arm_cpu_lowlevel_init)
-   mrs x1, CurrentEL
-   cmp x1, #0xC/* Check EL3 state */
-   b.eq1f
-   cmp x1, #0x8/* Check EL2 state */
-   b.eq2f
-   cmp x1, #0x4/* Check EL1 state */
-   b.eq3f
+   switch_el x1, 3f, 2f, 1f
 
-1:
+3:
mov x0, #1  /* Non-Secure EL0/1 */
orr x0, x0, #(1 << 10)  /* 64-bit EL2 */
msr scr_el3, x0
@@ -25,7 +20,7 @@ ENTRY(arm_cpu_lowlevel_init)
b   done
 
 
-3:
+1:
mov x0, #(3 << 20)  /* Enable FP/SIMD */
msr cpacr_el1, x0
b   done
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 37/78] ARM: aarch64: mmu: Fix mair register setting

2018-03-16 Thread Sascha Hauer
The memory attributes register contains the memory attribute settings
for the corresponding to the possible AttrIndx values in the page
table entries. Passing UNCACHED_MEM makes no sense here, pass the
desired attributes instead.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/mmu_64.c| 5 +++--
 arch/arm/include/asm/pgtable64.h | 7 +++
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 27b5acd6a7..639aa6d189 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -243,7 +243,8 @@ static int mmu_init(void)
 
memset(ttb, 0, GRANULE_SIZE);
 
-   set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, 
UNCACHED_MEM);
+   set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS,
+ MEMORY_ATTRIBUTES);
}
 
pr_debug("ttb: 0x%p\n", ttb);
@@ -298,7 +299,7 @@ void mmu_early_enable(uint64_t membase, uint64_t memsize, 
uint64_t _ttb)
 
memset(ttb, 0, GRANULE_SIZE);
 
-   set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, UNCACHED_MEM);
+   set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, 
MEMORY_ATTRIBUTES);
 
create_sections(0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM);
 
diff --git a/arch/arm/include/asm/pgtable64.h b/arch/arm/include/asm/pgtable64.h
index 20bea5b28a..7f7efa10ca 100644
--- a/arch/arm/include/asm/pgtable64.h
+++ b/arch/arm/include/asm/pgtable64.h
@@ -109,6 +109,13 @@
 #define MT_NORMAL  4
 #define MT_NORMAL_WT   5
 
+#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_nGnRnE * 8))| \
+ (0x04 << (MT_DEVICE_nGnRE * 8))   | \
+ (0x0c << (MT_DEVICE_GRE * 8)) | \
+ (0x44 << (MT_NORMAL_NC * 8))  | \
+ (UL(0xff) << (MT_NORMAL * 8)) | \
+ (UL(0xbb) << (MT_NORMAL_WT * 8)))
+
 /*
  * TCR flags.
  */
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 13/78] ARM: aarch64: Add dummy naked attribute

2018-03-16 Thread Sascha Hauer
The naked attribute is not supported on aarch64. To silence the compiler
warning add a dummy naked attribute.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/entry.c   |  2 +-
 arch/arm/cpu/start.c   |  4 ++--
 arch/arm/include/asm/barebox-arm.h | 17 +++--
 3 files changed, 18 insertions(+), 5 deletions(-)

diff --git a/arch/arm/cpu/entry.c b/arch/arm/cpu/entry.c
index 33b1429d4a..b48c1ca11d 100644
--- a/arch/arm/cpu/entry.c
+++ b/arch/arm/cpu/entry.c
@@ -24,7 +24,7 @@
  * be fine.
  */
 
-void __naked __noreturn barebox_arm_entry(unsigned long membase,
+void NAKED __noreturn barebox_arm_entry(unsigned long membase,
  unsigned long memsize, void 
*boarddata)
 {
arm_setup_stack(arm_mem_stack_top(membase, membase + memsize) - 16);
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index 171e6ad0eb..9f4213bfb9 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -229,7 +229,7 @@ __noreturn void barebox_non_pbl_start(unsigned long membase,
 
 #ifndef CONFIG_PBL_IMAGE
 
-void __naked __section(.text_entry) start(void)
+void NAKED __section(.text_entry) start(void)
 {
barebox_arm_head();
 }
@@ -239,7 +239,7 @@ void __naked __section(.text_entry) start(void)
  * First function in the uncompressed image. We get here from
  * the pbl. The stack already has been set up by the pbl.
  */
-void __naked __section(.text_entry) start(unsigned long membase,
+void NAKED __section(.text_entry) start(unsigned long membase,
unsigned long memsize, void *boarddata)
 {
barebox_non_pbl_start(membase, memsize, boarddata);
diff --git a/arch/arm/include/asm/barebox-arm.h 
b/arch/arm/include/asm/barebox-arm.h
index 3aea2e070e..170839aaad 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -161,13 +161,13 @@ static inline unsigned long 
arm_mem_barebox_image(unsigned long membase,
 #define ENTRY_FUNCTION(name, arg0, arg1, arg2) \
static void __##name(uint32_t, uint32_t, uint32_t); \
\
-   void __naked __section(.text_head_entry_##name) name\
+   void NAKED __section(.text_head_entry_##name)   name\
(uint32_t r0, uint32_t r1, uint32_t r2) \
{   \
__barebox_arm_head();   \
__##name(r0, r1, r2);   \
}   \
-   static void __naked noinline __##name   \
+   static void NAKED noinline __##name \
(uint32_t arg0, uint32_t arg1, uint32_t arg2)
 
 /*
@@ -181,4 +181,17 @@ static inline unsigned long arm_mem_barebox_image(unsigned 
long membase,
 
 #define barebox_image_size (__image_end - __image_start)
 
+#ifdef CONFIG_CPU_32
+#define NAKED __naked
+#else
+/*
+ * There is no naked support for aarch64, so do not rely on it.
+ * This basically means we must have a stack configured when a
+ * function with the naked attribute is entered. On nowadays hardware
+ * the ROM should have some basic stack already. If not, set one
+ * up before jumping into the barebox entry functions.
+ */
+#define NAKED
+#endif
+
 #endif /* _BAREBOX_ARM_H_ */
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 53/78] ARM: aarch64: move aarch64 exception support to separate file

2018-03-16 Thread Sascha Hauer
The exception support for arm32 and aarch64 does not have much in
common. Move aarch64 exception support to a separate file to avoid
more ifdeffery.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/Makefile|   3 +-
 arch/arm/cpu/interrupts.c|  48 +-
 arch/arm/cpu/interrupts_64.c | 116 +++
 3 files changed, 118 insertions(+), 49 deletions(-)
 create mode 100644 arch/arm/cpu/interrupts_64.c

diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index b2fed2be51..eb783481ea 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -1,6 +1,6 @@
 obj-y += cpu.o
 
-obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions$(S64).o
+obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions$(S64).o interrupts$(S64).o
 obj-$(CONFIG_MMU) += mmu$(S64).o
 lwl-y += lowlevel$(S64).o
 
@@ -8,7 +8,6 @@ ifeq ($(CONFIG_CPU_32), y)
 obj-pbl-$(CONFIG_MMU) += mmu-early.o
 endif
 
-obj-$(CONFIG_ARM_EXCEPTIONS) += interrupts.o
 obj-y += start.o entry.o
 
 obj-pbl-y += setupc$(S64).o
diff --git a/arch/arm/cpu/interrupts.c b/arch/arm/cpu/interrupts.c
index c34108a4f8..73f023bd71 100644
--- a/arch/arm/cpu/interrupts.c
+++ b/arch/arm/cpu/interrupts.c
@@ -26,9 +26,8 @@
 #include 
 #include 
 #include 
+#include 
 
-
-#if __LINUX_ARM_ARCH__ <= 7
 /**
  * Display current register set content
  * @param[in] regs Guess what
@@ -72,13 +71,10 @@ void show_regs (struct pt_regs *regs)
unwind_backtrace(regs);
 #endif
 }
-#endif
 
 static void __noreturn do_exception(struct pt_regs *pt_regs)
 {
-#if __LINUX_ARM_ARCH__ <= 7
show_regs(pt_regs);
-#endif
 
panic("");
 }
@@ -126,8 +122,6 @@ void do_prefetch_abort (struct pt_regs *pt_regs)
  */
 void do_data_abort (struct pt_regs *pt_regs)
 {
-
-#if __LINUX_ARM_ARCH__ <= 7
u32 far;
 
asm volatile ("mrc p15, 0, %0, c6, c0, 0" : "=r" (far) : : "cc");
@@ -135,7 +129,6 @@ void do_data_abort (struct pt_regs *pt_regs)
printf("unable to handle %s at address 0x%08x\n",
far < PAGE_SIZE ? "NULL pointer dereference" :
"paging request", far);
-#endif
 
do_exception(pt_regs);
 }
@@ -164,45 +157,6 @@ void do_irq (struct pt_regs *pt_regs)
do_exception(pt_regs);
 }
 
-#ifdef CONFIG_CPU_64v8
-void do_bad_sync(struct pt_regs *pt_regs)
-{
-   printf("bad sync\n");
-   do_exception(pt_regs);
-}
-
-void do_bad_irq(struct pt_regs *pt_regs)
-{
-   printf("bad irq\n");
-   do_exception(pt_regs);
-}
-
-void do_bad_fiq(struct pt_regs *pt_regs)
-{
-   printf("bad fiq\n");
-   do_exception(pt_regs);
-}
-
-void do_bad_error(struct pt_regs *pt_regs)
-{
-   printf("bad error\n");
-   do_exception(pt_regs);
-}
-
-void do_sync(struct pt_regs *pt_regs)
-{
-   printf("sync exception\n");
-   do_exception(pt_regs);
-}
-
-
-void do_error(struct pt_regs *pt_regs)
-{
-   printf("error exception\n");
-   do_exception(pt_regs);
-}
-#endif
-
 extern volatile int arm_ignore_data_abort;
 extern volatile int arm_data_abort_occurred;
 
diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
new file mode 100644
index 00..81fd941cfa
--- /dev/null
+++ b/arch/arm/cpu/interrupts_64.c
@@ -0,0 +1,116 @@
+/*
+ * interrupts_64.c - Interrupt Support Routines
+ *
+ * Copyright (c) 2018 Sascha Hauer , Pengutronix
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+/**
+ * Display current register set content
+ * @param[in] regs Guess what
+ */
+void show_regs(struct pt_regs *regs)
+{
+}
+
+static void __noreturn do_exception(struct pt_regs *pt_regs)
+{
+   show_regs(pt_regs);
+
+   panic("");
+}
+
+/**
+ * The CPU catches a fast interrupt request.
+ * @param[in] pt_regs Register set content when the interrupt happens
+ *
+ * We never enable FIQs, so this should not happen
+ */
+void do_fiq(struct pt_regs *pt_regs)
+{
+   printf ("fast interrupt request\n");
+   do_exception(pt_regs);
+}
+
+/**
+ * The CPU catches a regular interrupt.
+ * @param[in] pt_regs Register set content when the interrupt happens
+ *
+ * We never enable interrupts, so this should not happen
+ */
+void do_irq(struct pt_regs *pt_regs)
+{
+   printf ("interrupt request\n");
+   do_exception(pt_regs);
+}
+
+void do_bad_sync(struct pt_regs *pt_regs)
+{
+   printf("bad sync\n");
+   do_exception(pt_regs);
+}
+
+void do_bad_irq(struct 

[PATCH 25/78] ARM: Add function to return offset to global variables

2018-03-16 Thread Sascha Hauer
ARM and aarch64 differ in the way global variables are addressed. This
adds a function which abstracts the differences.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/start-pbl.c   |  4 ++--
 arch/arm/cpu/uncompress.c  |  2 +-
 arch/arm/include/asm/barebox-arm.h | 14 ++
 3 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/arch/arm/cpu/start-pbl.c b/arch/arm/cpu/start-pbl.c
index 68b2bbf6fb..73c27429da 100644
--- a/arch/arm/cpu/start-pbl.c
+++ b/arch/arm/cpu/start-pbl.c
@@ -60,8 +60,8 @@ __noreturn void barebox_single_pbl_start(unsigned long 
membase,
/* Get offset between linked address and runtime address */
offset = get_runtime_offset();
 
-   pg_start = (uint32_t)_data + offset;
-   pg_end = (uint32_t)_data_end + offset;
+   pg_start = (uint32_t)_data + global_variable_offset();
+   pg_end = (uint32_t)_data_end + global_variable_offset();
pg_len = pg_end - pg_start;
uncompressed_len = get_unaligned((const u32 *)(pg_start + pg_len - 4));
 
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index b600c1e7ec..37845b2259 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -51,7 +51,7 @@ void __noreturn barebox_multi_pbl_start(unsigned long membase,
void *pg_start;
unsigned long pc = get_pc();
 
-   image_end = (void *)_end_marker + get_runtime_offset();
+   image_end = (void *)_end_marker + global_variable_offset();
 
if (IS_ENABLED(CONFIG_PBL_RELOCATABLE)) {
/*
diff --git a/arch/arm/include/asm/barebox-arm.h 
b/arch/arm/include/asm/barebox-arm.h
index 1dccb8965e..9fc8afbe7d 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -44,6 +44,20 @@ extern char __exceptions_start[], __exceptions_stop[];
 void board_init_lowlevel(void);
 unsigned long get_runtime_offset(void);
 
+/* global_variable_offset() - Access global variables when not running at link 
address
+ *
+ * Get the offset of global variables when not running at the address we are
+ * linked at. ARM uses absolute addresses, so we must add the runtime offset
+ * whereas aarch64 uses PC relative addresses, so nothing must be done here.
+ */
+static inline unsigned long global_variable_offset(void)
+{
+   if (IS_ENABLED(CONFIG_CPU_32))
+   return get_runtime_offset();
+   else
+   return 0;
+}
+
 void setup_c(void);
 void relocate_to_current_adr(void);
 void relocate_to_adr(unsigned long target);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 19/78] ARM: aarch64: Do not use 32bit optimized fls

2018-03-16 Thread Sascha Hauer
The clz operation only works with 32bit values, so use the generic
fls() variants on aarch64. With this tlsf_malloc works as expected.

Signed-off-by: Sascha Hauer 
---
 arch/arm/include/asm/bitops.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index cd356c5e3d..348a76b2c1 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -115,7 +115,7 @@ extern int _find_next_bit_be(const unsigned long *p, int 
size, int offset);
 
 #endif /* __ARMEB__ */
 
-#if defined(__LINUX_ARM_ARCH__) && (__LINUX_ARM_ARCH__ >= 5)
+#if defined (CONFIG_CPU_32) && defined(__LINUX_ARM_ARCH__) && 
(__LINUX_ARM_ARCH__ >= 5)
 static inline int constant_fls(int x)
 {
int r = 32;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 38/78] ARM: aarch64: qemu board: remove unnecessary mapping

2018-03-16 Thread Sascha Hauer
Now that we do the initial flat mapping as device memory all addresses
that are not SDRAM are already mapped as device memory, so we can drop
the mapping from the board file.

Signed-off-by: Sascha Hauer 
---
 arch/arm/boards/qemu-virt64/init.c | 5 -
 1 file changed, 5 deletions(-)

diff --git a/arch/arm/boards/qemu-virt64/init.c 
b/arch/arm/boards/qemu-virt64/init.c
index 18831755df..686231696e 100644
--- a/arch/arm/boards/qemu-virt64/init.c
+++ b/arch/arm/boards/qemu-virt64/init.c
@@ -66,11 +66,6 @@ postcore_initcall(virt_core_init);
 #ifdef CONFIG_MMU
 static int virt_mmu_enable(void)
 {
-   /* Mapping all periph and flash range */
-   arch_remap_range((void *)0x, 0x4000,
-PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF |
-PMD_TYPE_SECT);
-
mmu_enable();
 
return 0;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 11/78] ARM: shutdown: Fix compiler warning

2018-03-16 Thread Sascha Hauer
On aarch64 we get a unused variable warning. Move the variable
declaration into the #ifdef where the variable is used.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/cpu.c | 28 +---
 1 file changed, 17 insertions(+), 11 deletions(-)

diff --git a/arch/arm/cpu/cpu.c b/arch/arm/cpu/cpu.c
index bf604fd60d..0e70a23260 100644
--- a/arch/arm/cpu/cpu.c
+++ b/arch/arm/cpu/cpu.c
@@ -89,6 +89,22 @@ void mmu_disable(void)
 }
 #endif
 
+static void disable_interrupts(void)
+{
+#if __LINUX_ARM_ARCH__ <= 7
+   uint32_t r;
+
+   /*
+* barebox normally does not use interrupts, but some functionalities
+* (eg. OMAP4_USBBOOT) require them enabled. So be sure interrupts are
+* disabled before exiting.
+*/
+   __asm__ __volatile__("mrs %0, cpsr" : "=r"(r));
+   r |= PSR_I_BIT;
+   __asm__ __volatile__("msr cpsr, %0" : : "r"(r));
+#endif
+}
+
 /**
  * Disable MMU and D-cache, flush caches
  * @return 0 (always)
@@ -98,23 +114,13 @@ void mmu_disable(void)
  */
 static void arch_shutdown(void)
 {
-   uint32_t r;
 
 #ifdef CONFIG_MMU
mmu_disable();
 #endif
icache_invalidate();
 
-#if __LINUX_ARM_ARCH__ <= 7
-   /*
-* barebox normally does not use interrupts, but some functionalities
-* (eg. OMAP4_USBBOOT) require them enabled. So be sure interrupts are
-* disabled before exiting.
-*/
-   __asm__ __volatile__("mrs %0, cpsr" : "=r"(r));
-   r |= PSR_I_BIT;
-   __asm__ __volatile__("msr cpsr, %0" : : "r"(r));
-#endif
+   disable_interrupts();
 }
 archshutdown_exitcall(arch_shutdown);
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 52/78] ARM: aarch64: compile with strict alignment

2018-03-16 Thread Sascha Hauer
barebox runs with MMU disabled at least during startup. We need
-mstrict-alignment for these parts to avoid alignment aborts.

Signed-off-by: Sascha Hauer 
---
 arch/arm/Makefile | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index da307343c7..cf84a9a1c1 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -23,6 +23,8 @@ endif
 # accesses
 ifneq ($(CONFIG_CPU_V8),y)
 CFLAGS += -mno-unaligned-access
+else
+CFLAGS += -mstrict-align
 endif
 
 
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 10/78] ARM: bootm: Fix wrong format specifier

2018-03-16 Thread Sascha Hauer
Signed-off-by: Sascha Hauer 
---
 arch/arm/lib/bootm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c
index 9c6174adfe..c8bf72f0e0 100644
--- a/arch/arm/lib/bootm.c
+++ b/arch/arm/lib/bootm.c
@@ -362,7 +362,7 @@ static int do_bootz_linux(struct image_data *data)
 
data->os_res = request_sdram_region("zimage", load_address, image_size);
if (!data->os_res) {
-   pr_err("bootm/zImage: failed to request memory at 0x%lx to 
0x%lx (%d).\n",
+   pr_err("bootm/zImage: failed to request memory at 0x%lx to 
0x%lx (%zu).\n",
   load_address, load_address + image_size, image_size);
ret = -ENOMEM;
goto err_out;
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 21/78] debug_ll: support 64bit longs

2018-03-16 Thread Sascha Hauer
puthex_ll outputs an unsigned long, so print all digits in case unsigned
long is 64bit.

Signed-off-by: Sascha Hauer 
---
 include/debug_ll.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/debug_ll.h b/include/debug_ll.h
index b0eb7cd7d9..5047516399 100644
--- a/include/debug_ll.h
+++ b/include/debug_ll.h
@@ -42,7 +42,7 @@ static inline void puthex_ll(unsigned long value)
 {
int i; unsigned char ch;
 
-   for (i = 8; i--; ) {
+   for (i = sizeof(unsigned long) * 2; i--; ) {
ch = ((value >> (i * 4)) & 0xf);
ch += (ch >= 10) ? 'a' - 10 : '0';
putc_ll(ch);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 04/78] ARM: mmu: include pgtable header from where it's needed

2018-03-16 Thread Sascha Hauer
Instead of #ifdefing the correct pgtable header file to include,
include it where it's needed. Also, move the memory type attributes
into there consumers, namely the mmu.c files.

Signed-off-by: Sascha Hauer 
---
 arch/arm/boards/qemu-virt64/init.c |  5 -
 arch/arm/cpu/common.c  |  1 -
 arch/arm/cpu/mmu-early.c   |  1 +
 arch/arm/cpu/mmu.c |  4 
 arch/arm/cpu/mmu_64.c  |  4 
 arch/arm/cpu/start-pbl.c   |  1 -
 arch/arm/cpu/uncompress.c  |  1 -
 arch/arm/include/asm/mmu.h | 15 ---
 8 files changed, 13 insertions(+), 19 deletions(-)

diff --git a/arch/arm/boards/qemu-virt64/init.c 
b/arch/arm/boards/qemu-virt64/init.c
index a85bd84db7..18831755df 100644
--- a/arch/arm/boards/qemu-virt64/init.c
+++ b/arch/arm/boards/qemu-virt64/init.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -66,7 +67,9 @@ postcore_initcall(virt_core_init);
 static int virt_mmu_enable(void)
 {
/* Mapping all periph and flash range */
-   arch_remap_range((void *)0x, 0x4000, DEV_MEM);
+   arch_remap_range((void *)0x, 0x4000,
+PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF |
+PMD_TYPE_SECT);
 
mmu_enable();
 
diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
index 6670f4b9f8..3c9864c0db 100644
--- a/arch/arm/cpu/common.c
+++ b/arch/arm/cpu/common.c
@@ -23,7 +23,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 
 /*
diff --git a/arch/arm/cpu/mmu-early.c b/arch/arm/cpu/mmu-early.c
index 2e4d316924..1549f08985 100644
--- a/arch/arm/cpu/mmu-early.c
+++ b/arch/arm/cpu/mmu-early.c
@@ -5,6 +5,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "mmu.h"
 
diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index 459abe56ba..2c16579d79 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -27,11 +27,15 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
 #include "mmu.h"
 
+#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | 
PMD_TYPE_SECT)
+#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
+
 static unsigned long *ttb;
 
 static void create_sections(unsigned long virt, unsigned long phys, int size_m,
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index bfd80c0913..fd41435746 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -25,6 +25,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -33,6 +34,9 @@
 
 #include "mmu.h"
 
+#define CACHED_MEM  (PMD_ATTRINDX(MT_NORMAL) | PMD_SECT_S | PMD_SECT_AF | 
PMD_TYPE_SECT)
+#define UNCACHED_MEM(PMD_ATTRINDX(MT_NORMAL_NC) | PMD_SECT_S | PMD_SECT_AF 
| PMD_TYPE_SECT)
+
 static uint64_t *ttb;
 static int free_idx;
 
diff --git a/arch/arm/cpu/start-pbl.c b/arch/arm/cpu/start-pbl.c
index 865aa1b1c4..68b2bbf6fb 100644
--- a/arch/arm/cpu/start-pbl.c
+++ b/arch/arm/cpu/start-pbl.c
@@ -26,7 +26,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 5530919118..2588e84b66 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -27,7 +27,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index f68ab37143..2a1daeafe3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,21 +6,6 @@
 #include 
 #include 
 
-#ifdef CONFIG_CPU_64v8
-#include 
-
-#define DEV_MEM(PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF | 
PMD_TYPE_SECT)
-#define CACHED_MEM (PMD_ATTRINDX(MT_NORMAL) | PMD_SECT_S | PMD_SECT_AF | 
PMD_TYPE_SECT)
-#define UNCACHED_MEM   (PMD_ATTRINDX(MT_NORMAL_NC) | PMD_SECT_S | PMD_SECT_AF 
| PMD_TYPE_SECT)
-#else
-#include 
-
-#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | 
PMD_TYPE_SECT)
-#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
-#endif
-
-
-
 struct arm_memory;
 
 void mmu_enable(void);
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 07/78] ARM: move away from ld_var

2018-03-16 Thread Sascha Hauer
The ld_var solves the issue that when compiled with -pie the linker
provided variables are all 0x0. This mechanism however refuses to
compile with aarch64 support.

This patch replaces the ld_var mechanism with a nice little trick
learned from U-Boot: Instead of using linker provided variables
directly with "__bss_start = ." we put a zero size array into
a separate section and use the address of that array instead of
the linker variable. This properly works before relocation.

Signed-off-by: Sascha Hauer 
---
 arch/arm/boards/friendlyarm-tiny210/lowlevel.c |  4 ++--
 arch/arm/cpu/Makefile  |  2 +-
 arch/arm/cpu/common.c  | 11 ---
 arch/arm/cpu/sections.c| 11 +++
 arch/arm/cpu/setupc.S  |  9 -
 arch/arm/include/asm/sections.h|  5 +
 arch/arm/lib/pbl.lds.S | 22 +-
 arch/arm/lib32/barebox.lds.S   | 23 ++-
 arch/arm/lib64/barebox.lds.S   | 22 +-
 arch/arm/mach-imx/xload-common.c   |  4 ++--
 10 files changed, 57 insertions(+), 56 deletions(-)
 create mode 100644 arch/arm/cpu/sections.c

diff --git a/arch/arm/boards/friendlyarm-tiny210/lowlevel.c 
b/arch/arm/boards/friendlyarm-tiny210/lowlevel.c
index fea00ef503..4b9ba87d70 100644
--- a/arch/arm/boards/friendlyarm-tiny210/lowlevel.c
+++ b/arch/arm/boards/friendlyarm-tiny210/lowlevel.c
@@ -96,7 +96,7 @@ void __bare_init barebox_arm_reset_vector(void)
 
debug_led(1, 1);
 
-   if (! load_stage2((void*)(ld_var(_text) - 16),
+   if (! load_stage2((void*)(_text - 16),
barebox_image_size + 16)) {
debug_led(3, 1);
while (1) { } /* hang */
@@ -104,7 +104,7 @@ void __bare_init barebox_arm_reset_vector(void)
 
debug_led(2, 1);
 
-   jump_sdram(IRAM_CODE_BASE - ld_var(_text));
+   jump_sdram(IRAM_CODE_BASE - (unsigned long)_text);
 
debug_led(1, 0);
 
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index ba729fb6e4..b2fed2be51 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -46,4 +46,4 @@ pbl-y += entry.o
 pbl-$(CONFIG_PBL_SINGLE_IMAGE) += start-pbl.o
 pbl-$(CONFIG_PBL_MULTI_IMAGES) += uncompress.o
 
-obj-pbl-y += common.o cache.o
+obj-pbl-y += common.o cache.o sections.o
diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
index 3c9864c0db..7c07d00c1b 100644
--- a/arch/arm/cpu/common.c
+++ b/arch/arm/cpu/common.c
@@ -36,11 +36,11 @@ void relocate_to_current_adr(void)
/* Get offset between linked address and runtime address */
offset = get_runtime_offset();
 
-   dstart = (void *)(ld_var(__rel_dyn_start) + offset);
-   dend = (void *)(ld_var(__rel_dyn_end) + offset);
+   dstart = (void *)__rel_dyn_start + offset;
+   dend = (void *)__rel_dyn_end + offset;
 
-   dynsym = (void *)(ld_var(__dynsym_start) + offset);
-   dynend = (void *)(ld_var(__dynsym_end) + offset);
+   dynsym = (void *)__dynsym_start + offset;
+   dynend = (void *)__dynsym_end + offset;
 
while (dstart < dend) {
uint32_t *fixup = (uint32_t *)(*dstart + offset);
@@ -77,6 +77,3 @@ int __pure cpu_architecture(void)
return __cpu_architecture;
 }
 #endif
-
-char __image_start[0] __attribute__((section(".__image_start")));
-char __image_end[0] __attribute__((section(".__image_end")));
\ No newline at end of file
diff --git a/arch/arm/cpu/sections.c b/arch/arm/cpu/sections.c
new file mode 100644
index 00..5874da2b82
--- /dev/null
+++ b/arch/arm/cpu/sections.c
@@ -0,0 +1,11 @@
+#include 
+
+char __rel_dyn_start[0] __attribute__((section(".__rel_dyn_start")));
+char __rel_dyn_end[0] __attribute__((section(".__rel_dyn_end")));
+char __dynsym_start[0] __attribute__((section(".__dynsym_start")));
+char __dynsym_end[0] __attribute__((section(".__dynsym_end")));
+char _text[0] __attribute__((section("._text")));
+char __bss_start[0] __attribute__((section(".__bss_start")));
+char __bss_stop[0] __attribute__((section(".__bss_stop")));
+char __image_start[0] __attribute__((section(".__image_start")));
+char __image_end[0] __attribute__((section(".__image_end")));
diff --git a/arch/arm/cpu/setupc.S b/arch/arm/cpu/setupc.S
index 30e88330e7..717500cfff 100644
--- a/arch/arm/cpu/setupc.S
+++ b/arch/arm/cpu/setupc.S
@@ -55,17 +55,16 @@ ENTRY(relocate_to_adr)
 
mov r5, r0
 
-   ld_var  _text, r0, r4
-   mov r8, r0
+   ldr r8, =_text
 
-   add r1, r0, r5  /* r1: from address */
+   add r1, r8, r5  /* r1: from address */
 
cmp r1, r6  /* already at correct address? */
beq 1f  /* yes, skip copy to new address */
 
-   ld_var  __bss_start, r2, r4
+   ldr r2, =__bss_start
 
-   

[PATCH 05/78] ARM: For relocatable image force TEXT_BASE 0x0

2018-03-16 Thread Sascha Hauer
Nothing else should be used for the relocatable image case, so
force TEXT_BASE to 0x0 and do not show it in the menu.

Signed-off-by: Sascha Hauer 
---
 arch/arm/Kconfig | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e7edc2ad44..563475205d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2,7 +2,7 @@ config ARM
bool
select HAS_KALLSYMS
select HAS_CACHE
-   select HAVE_CONFIGURABLE_TEXT_BASE
+   select HAVE_CONFIGURABLE_TEXT_BASE if !RELOCATABLE
select HAVE_IMAGE_COMPRESSION
default y
 
@@ -19,6 +19,10 @@ config ARM_USE_COMPRESSED_DTB
select UNCOMPRESS
select LZO_DECOMPRESS
 
+config TEXT_BASE
+   hex
+   default 0x0
+
 menu "System Type"
 
 config BUILTIN_DTB
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 01/78] ARM: Use obj-pbl- where appropriate

2018-03-16 Thread Sascha Hauer
When sourcefiles are compiled for barebox and pbl we have obj-pbl-. Use
it where appropriate.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/Makefile | 28 ++--
 1 file changed, 10 insertions(+), 18 deletions(-)

diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index 13fe12c31f..94f7d1bf45 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -6,8 +6,8 @@ obj-$(CONFIG_MMU) += mmu_64.o
 lwl-y += lowlevel_64.o
 else
 obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions.o
-obj-$(CONFIG_MMU) += mmu.o mmu-early.o
-pbl-$(CONFIG_MMU) += mmu-early.o
+obj-$(CONFIG_MMU) += mmu.o
+obj-pbl-$(CONFIG_MMU) += mmu-early.o
 lwl-y += lowlevel.o
 endif
 
@@ -15,11 +15,9 @@ obj-$(CONFIG_ARM_EXCEPTIONS) += interrupts.o
 obj-y += start.o entry.o
 
 ifeq ($(CONFIG_CPU_64v8), y)
-obj-y += setupc_64.o
-pbl-y += setupc_64.o
+obj-pbl-y += setupc_64.o
 else
-obj-y += setupc.o
-pbl-y += setupc.o
+obj-pbl-y += setupc.o
 endif
 
 #
@@ -40,25 +38,19 @@ AFLAGS_smccc-call.o :=-Wa,-march=armv7-a
 obj-$(CONFIG_ARM_SECURE_MONITOR) += sm.o sm_as.o
 AFLAGS_sm_as.o :=-Wa,-march=armv7-a
 
-obj-$(CONFIG_CPU_32v4T) += cache-armv4.o
-pbl-$(CONFIG_CPU_32v4T) += cache-armv4.o
-obj-$(CONFIG_CPU_32v5) += cache-armv5.o
-pbl-$(CONFIG_CPU_32v5) += cache-armv5.o
-obj-$(CONFIG_CPU_32v6) += cache-armv6.o
-pbl-$(CONFIG_CPU_32v6) += cache-armv6.o
+obj-pbl-$(CONFIG_CPU_32v4T) += cache-armv4.o
+obj-pbl-$(CONFIG_CPU_32v5) += cache-armv5.o
+obj-pbl-$(CONFIG_CPU_32v6) += cache-armv6.o
 AFLAGS_cache-armv7.o   :=-Wa,-march=armv7-a
-obj-$(CONFIG_CPU_32v7) += cache-armv7.o
+obj-pbl-$(CONFIG_CPU_32v7) += cache-armv7.o
 AFLAGS_pbl-cache-armv7.o   :=-Wa,-march=armv7-a
-pbl-$(CONFIG_CPU_32v7) += cache-armv7.o
 obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
 AFLAGS_cache-armv8.o   :=-Wa,-march=armv8-a
-obj-$(CONFIG_CPU_64v8) += cache-armv8.o
+obj-pbl-$(CONFIG_CPU_64v8) += cache-armv8.o
 AFLAGS_pbl-cache-armv8.o   :=-Wa,-march=armv8-a
-pbl-$(CONFIG_CPU_64v8) += cache-armv8.o
 
 pbl-y += entry.o
 pbl-$(CONFIG_PBL_SINGLE_IMAGE) += start-pbl.o
 pbl-$(CONFIG_PBL_MULTI_IMAGES) += uncompress.o
 
-obj-y += common.o cache.o
-pbl-y += common.o cache.o
+obj-pbl-y += common.o cache.o
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 06/78] ARM: scroll past image end without ld_var

2018-03-16 Thread Sascha Hauer
ld_var is going to be removed, cope without it. In the PBL image
we want to get the location after the binary to the place where
the compressed image is located. To do this Put a variable at
the very end of the binary, get it's location and add an offset.

Signed-off-by: Sascha Hauer 
---
 arch/arm/cpu/uncompress.c | 17 +
 arch/arm/lib/pbl.lds.S|  5 ++---
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 2588e84b66..b600c1e7ec 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -37,9 +37,8 @@
 unsigned long free_mem_ptr;
 unsigned long free_mem_end_ptr;
 
-static int __attribute__((__used__))
-   __attribute__((__section__(".image_end")))
-   __image_end_dummy = 0xdeadbeef;
+static int __attribute__((__section__(".image_end")))
+   image_end_marker = 0xdeadbeef;
 
 void __noreturn barebox_multi_pbl_start(unsigned long membase,
unsigned long memsize, void *boarddata)
@@ -52,7 +51,7 @@ void __noreturn barebox_multi_pbl_start(unsigned long membase,
void *pg_start;
unsigned long pc = get_pc();
 
-   image_end = (void *)ld_var(__image_end) + get_runtime_offset();
+   image_end = (void *)_end_marker + get_runtime_offset();
 
if (IS_ENABLED(CONFIG_PBL_RELOCATABLE)) {
/*
@@ -67,11 +66,13 @@ void __noreturn barebox_multi_pbl_start(unsigned long 
membase,
}
 
/*
-* image_end is the first location after the executable. It contains
-* the size of the appended compressed binary followed by the binary.
+* image_end is the image_end_marker defined above. It is the last 
location
+* in the executable. Right after the executable the build process adds
+* the size of the appended compressed binary followed by the compressed
+* binary itself.
 */
-   pg_start = image_end + 1;
-   pg_len = *(image_end);
+   pg_start = image_end + 2;
+   pg_len = *(image_end + 1);
uncompressed_len = get_unaligned((const u32 *)(pg_start + pg_len - 4));
 
if (IS_ENABLED(CONFIG_RELOCATABLE))
diff --git a/arch/arm/lib/pbl.lds.S b/arch/arm/lib/pbl.lds.S
index 73baff0ca5..7de7791b71 100644
--- a/arch/arm/lib/pbl.lds.S
+++ b/arch/arm/lib/pbl.lds.S
@@ -87,10 +87,9 @@ SECTIONS
__piggydata_end = .;
 
. = ALIGN(4);
-   .image_end : {
-   KEEP(*(.image_end))
-   }
+   .image_end : { *(.image_end) }
__image_end = .;
+
_barebox_image_size = __image_end - BASE;
_barebox_pbl_size = __bss_start - BASE;
 }
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 09/78] ARM: android image: Fix compiler warning on aarch64

2018-03-16 Thread Sascha Hauer
The android image format has a u32 value which tells the loader
where to put the ATAG list. Casting this value to a pointer directly
causes a warning on aarch64. Silence it by casting it to unsigned
long first. The code is of no use on aarch64, so no need to fix anything
really.

Signed-off-by: Sascha Hauer 
---
 arch/arm/lib/bootm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c
index 25efb42541..9c6174adfe 100644
--- a/arch/arm/lib/bootm.c
+++ b/arch/arm/lib/bootm.c
@@ -522,7 +522,7 @@ static int do_bootm_aimage(struct image_data *data)
linux_bootargs_overwrite(header->cmdline);
 
if (!getenv("aimage_noverwrite_tags"))
-   armlinux_set_bootparams((void*)header->tags_addr);
+   armlinux_set_bootparams((void *)(unsigned 
long)header->tags_addr);
 
cmp = >second_stage;
if (cmp->size) {
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


[PATCH 02/78] ARM: Add 64bit compilation alternative

2018-03-16 Thread Sascha Hauer
Our 64bit file variants have the suffix "_64". This adds a make variable
that is either empty or contains "_64" which can be used to easily
alternatively compile the 32bit or 64bit variant.

Signed-off-by: Sascha Hauer 
---
 arch/arm/Makefile |  1 +
 arch/arm/cpu/Makefile | 19 ++-
 2 files changed, 7 insertions(+), 13 deletions(-)

diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 620a3ccb0b..da307343c7 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -65,6 +65,7 @@ endif
 ifeq ($(CONFIG_CPU_V8), y)
 CPPFLAGS += $(CFLAGS_ABI) $(arch-y) $(tune-y)
 AFLAGS   += -include asm/unified.h
+export S64 = _64
 else
 CPPFLAGS += $(CFLAGS_ABI) $(arch-y) $(tune-y) -msoft-float $(CFLAGS_THUMB2)
 AFLAGS   += -include asm/unified.h -msoft-float $(AFLAGS_THUMB2)
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index 94f7d1bf45..ba729fb6e4 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -1,24 +1,17 @@
 obj-y += cpu.o
 
-ifeq ($(CONFIG_CPU_64v8), y)
-obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions_64.o
-obj-$(CONFIG_MMU) += mmu_64.o
-lwl-y += lowlevel_64.o
-else
-obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions.o
-obj-$(CONFIG_MMU) += mmu.o
+obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions$(S64).o
+obj-$(CONFIG_MMU) += mmu$(S64).o
+lwl-y += lowlevel$(S64).o
+
+ifeq ($(CONFIG_CPU_32), y)
 obj-pbl-$(CONFIG_MMU) += mmu-early.o
-lwl-y += lowlevel.o
 endif
 
 obj-$(CONFIG_ARM_EXCEPTIONS) += interrupts.o
 obj-y += start.o entry.o
 
-ifeq ($(CONFIG_CPU_64v8), y)
-obj-pbl-y += setupc_64.o
-else
-obj-pbl-y += setupc.o
-endif
+obj-pbl-y += setupc$(S64).o
 
 #
 # Any variants can be called as start-armxyz.S
-- 
2.16.1


___
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox