Ls1046a-rdb platform support in Zephyr has been upstreamed to Zephyr community.
Best Regards, Jiafei. > -----Original Message----- > From: liandao <[email protected]> > Sent: Friday, May 21, 2021 7:30 PM > To: Jiafei Pan <[email protected]> > Cc: [email protected] > Subject: [EXT] Re: [PATCH] nxp: ls1046ardb: add configure file for running > zephyr inmate > > Caution: EXT Email > > where is Zephyr demo source code for ls1046a-rdb? > > On 5/21/21, Jiafei Pan <[email protected]> wrote: > > 1. Use virt_addr to be same with phys_addr for memory 2. Loading > > inmate binary from start of virt_addr 3. Use 1M bytes memory in order > > to load Zephyr 4. Using CPU Core2 and Core3 to demo Zephyr SMP > > > > Signed-off-by: Jiafei Pan <[email protected]> > > --- > > configs/arm64/ls1046a-rdb-zephyr-demo.c | 141 > > ++++++++++++++++++++++++ > > 1 file changed, 141 insertions(+) > > create mode 100644 configs/arm64/ls1046a-rdb-zephyr-demo.c > > > > diff --git a/configs/arm64/ls1046a-rdb-zephyr-demo.c > > b/configs/arm64/ls1046a-rdb-zephyr-demo.c > > new file mode 100644 > > index 00000000..55c0e78f > > --- /dev/null > > +++ b/configs/arm64/ls1046a-rdb-zephyr-demo.c > > @@ -0,0 +1,141 @@ > > +/* > > + * ls1046a RDB - inmate demo > > + * > > + * Copyright NXP 2020 > > + * > > + * Authors: > > + * Jiafei Pan <[email protected]> > > + * > > + * This work is licensed under the terms of the GNU GPL, version 2. > > +See > > + * the COPYING file in the top-level directory. > > + */ > > + > > +#include <jailhouse/types.h> > > +#include <jailhouse/cell-config.h> > > + > > +struct { > > + struct jailhouse_cell_desc cell; > > + __u64 cpus[1]; > > + struct jailhouse_memory mem_regions[8]; > > + struct jailhouse_irqchip irqchips[2]; > > + struct jailhouse_pci_device pci_devices[1]; } > > +__attribute__((packed)) config = { > > + .cell = { > > + .signature = JAILHOUSE_CELL_DESC_SIGNATURE, > > + .revision = JAILHOUSE_CONFIG_REVISION, > > + .name = "inmate-demo", > > + .flags = JAILHOUSE_CELL_PASSIVE_COMMREG, > > + > > + .cpu_set_size = sizeof(config.cpus), > > + .num_memory_regions = > ARRAY_SIZE(config.mem_regions), > > + .num_irqchips = ARRAY_SIZE(config.irqchips), > > + .num_pci_devices = ARRAY_SIZE(config.pci_devices), > > + .vpci_irq_base = 60 - 32, /* vPCI INTx */ > > + > > + .cpu_reset_address = 0xc0000000, > > + > > + .console = { > > + .address = 0x21c0600, /* Uart1 in DUART1 > */ > > + .divider = 0xbd, /* baudrate: 115200 */ > > + .type = JAILHOUSE_CON_TYPE_8250, > > + .flags = JAILHOUSE_CON_ACCESS_MMIO | > > + JAILHOUSE_CON_REGDIST_1, > > + }, > > + }, > > + > > + .cpus = { > > + 0xc, > > + }, > > + > > + .mem_regions = { > > + /* IVSHMEM shared memory region for 00:00.0 */ { > > + .phys_start = 0xfb700000, > > + .virt_start = 0xfb700000, > > + .size = 0x1000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_ROOTSHARED, > > + }, > > + { > > + .phys_start = 0xfb701000, > > + .virt_start = 0xfb701000, > > + .size = 0x9000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_WRITE | > > + JAILHOUSE_MEM_ROOTSHARED, > > + }, > > + { > > + .phys_start = 0xfb70a000, > > + .virt_start = 0xfb70a000, > > + .size = 0x2000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_ROOTSHARED, > > + }, > > + { > > + .phys_start = 0xfb70c000, > > + .virt_start = 0xfb70c000, > > + .size = 0x2000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_WRITE | > > + JAILHOUSE_MEM_ROOTSHARED, > > + }, > > + { > > + .phys_start = 0xfb70e000, > > + .virt_start = 0xfb70e000, > > + .size = 0x2000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_ROOTSHARED, > > + }, > > + /* DUART1 */ { > > + .phys_start = 0x21c0000, > > + .virt_start = 0x21c0000, > > + .size = 0x10000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_WRITE | > > + JAILHOUSE_MEM_IO | > JAILHOUSE_MEM_ROOTSHARED, > > + }, > > + /* RAM: Top at 2GB DRAM1 Space */ { > > + .phys_start = 0xc0000000, > > + .virt_start = 0xc0000000, > > + .size = 0x00100000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_WRITE | > > + JAILHOUSE_MEM_EXECUTE | > JAILHOUSE_MEM_LOADABLE, > > + }, > > + /* communication region */ { > > + .virt_start = 0x80000000, > > + .size = 0x00001000, > > + .flags = JAILHOUSE_MEM_READ | > JAILHOUSE_MEM_WRITE | > > + JAILHOUSE_MEM_COMM_REGION, > > + }, > > + }, > > + > > + .irqchips = { > > + /* GIC-400 */ { > > + .address = 0x1410000, > > + .pin_base = 32, > > + .pin_bitmap = { > > + 1 << (60 - 32), /* vPCI */ > > + 0, > > + 0, > > + 0, > > + }, > > + }, > > + /* GIC-400 */ { > > + .address = 0x1410000, > > + .pin_base = 160, > > + .pin_bitmap = { > > + 0, > > + 0, > > + 0, > > + 0, > > + }, > > + }, > > + }, > > + > > + .pci_devices = { > > + { /* IVSHMEM 00:00.0 (demo) */ > > + .type = JAILHOUSE_PCI_TYPE_IVSHMEM, > > + .domain = 0, > > + .bdf = 0 << 3, > > + .bar_mask = > JAILHOUSE_IVSHMEM_BAR_MASK_INTX, > > + .shmem_regions_start = 0, > > + .shmem_dev_id = 1, > > + .shmem_peers = 1, > > + .shmem_protocol = > JAILHOUSE_SHMEM_PROTO_UNDEFINED, > > + }, > > + }, > > + > > +}; > > -- > > 2.17.1 > > > > -- > > You received this message because you are subscribed to the Google > > Groups "Jailhouse" group. > > To unsubscribe from this group and stop receiving emails from it, send > > an email to [email protected]. > > To view this discussion on the web visit > > > https://eur01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgroups > .google.com%2Fd%2Fmsgid%2Fjailhouse-dev%2F20210521062144.11659-1-Ji > afei.Pan%2540nxp.com&data=04%7C01%7CJiafei.Pan%40nxp.com%7C5 > ebb6ac421734ff1e81608d91c4bc0ea%7C686ea1d3bc2b4c6fa92cd99c5c3016 > 35%7C0%7C1%7C637571933949905985%7CUnknown%7CTWFpbGZsb3d8ey > JWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D > %7C3000&sdata=%2BenZUziveuQ2rXXZ26vosUTS29X3p7KXjgDjcLOX5dA > %3D&reserved=0. > > -- You received this message because you are subscribed to the Google Groups "Jailhouse" group. To unsubscribe from this group and stop receiving emails from it, send an email to [email protected]. To view this discussion on the web visit https://groups.google.com/d/msgid/jailhouse-dev/AS8PR04MB818293A8AD7EE178D696E0F38A299%40AS8PR04MB8182.eurprd04.prod.outlook.com.
