This is an automated email from Gerrit. "Erhan Kurubas <erhan.kuru...@espressif.com>" just uploaded a new patch set to Gerrit, which you can find at https://review.openocd.org/c/openocd/+/6991
-- gerrit commit 7da084523a0839bf1b382f1b9f66cb7a97d66e8a Author: Erhan Kurubas <erhan.kuru...@espressif.com> Date: Thu Apr 21 07:53:54 2022 +0200 target: add alternative work area config During stub flasher operations Espressif targets use 2 different working area in sram (for code and data) Currently OpenOCD support single work-area system which is not suitable for Harward architecture CPU. In this patch, we added another command set to specify working area for the data space. (-alt-work-area-xxx) Signed-off-by: Erhan Kurubas <erhan.kuru...@espressif.com> Change-Id: I50aab41a6405a353ac8ce6fdf70ca6f1198169f6 diff --git a/src/flash/nand/lpc3180.c b/src/flash/nand/lpc3180.c index bda7b87c32..b15065e818 100644 --- a/src/flash/nand/lpc3180.c +++ b/src/flash/nand/lpc3180.c @@ -593,22 +593,22 @@ static int lpc3180_write_page(struct nand_device *nand, return retval; /* allocate a working area */ - if (target->working_area_size < (uint32_t) nand->page_size + 0x200) { + if (target->working_area_cfg.size < (uint32_t) nand->page_size + 0x200) { LOG_ERROR("Reserve at least 0x%x physical target working area", nand->page_size + 0x200); return ERROR_FLASH_OPERATION_FAILED; } - if (target->working_area_phys%4) { + if (target->working_area_cfg.phys % 4) { LOG_ERROR( "Reserve the physical target working area at word boundary"); return ERROR_FLASH_OPERATION_FAILED; } - if (target_alloc_working_area(target, target->working_area_size, + if (target_alloc_working_area(target, target->working_area_cfg.size, &pworking_area) != ERROR_OK) { LOG_ERROR("no working area specified, can't read LPC internal flash"); return ERROR_FLASH_OPERATION_FAILED; } - target_mem_base = target->working_area_phys; + target_mem_base = target->working_area_cfg.phys; if (nand->page_size == 2048) page_buffer = malloc(2048); @@ -974,22 +974,22 @@ static int lpc3180_read_page(struct nand_device *nand, return retval; /* allocate a working area */ - if (target->working_area_size < (uint32_t) nand->page_size + 0x200) { + if (target->working_area_cfg.size < (uint32_t) nand->page_size + 0x200) { LOG_ERROR("Reserve at least 0x%x physical target working area", nand->page_size + 0x200); return ERROR_FLASH_OPERATION_FAILED; } - if (target->working_area_phys%4) { + if (target->working_area_cfg.phys % 4) { LOG_ERROR( "Reserve the physical target working area at word boundary"); return ERROR_FLASH_OPERATION_FAILED; } - if (target_alloc_working_area(target, target->working_area_size, + if (target_alloc_working_area(target, target->working_area_cfg.size, &pworking_area) != ERROR_OK) { LOG_ERROR("no working area specified, can't read LPC internal flash"); return ERROR_FLASH_OPERATION_FAILED; } - target_mem_base = target->working_area_phys; + target_mem_base = target->working_area_cfg.phys; if (nand->page_size == 2048) page_buffer = malloc(2048); diff --git a/src/flash/nor/fm3.c b/src/flash/nor/fm3.c index 831f342571..d5c1d011f7 100644 --- a/src/flash/nor/fm3.c +++ b/src/flash/nor/fm3.c @@ -362,8 +362,8 @@ static int fm3_write_block(struct flash_bank *bank, const uint8_t *buffer, uint32_t u32_flash_seq_address2; /* Increase buffer_size if needed */ - if (buffer_size < (target->working_area_size / 2)) - buffer_size = (target->working_area_size / 2); + if (buffer_size < target->working_area_cfg.size / 2) + buffer_size = target->working_area_cfg.size / 2; u32_flash_type = (uint32_t) fm3_info->flashtype; diff --git a/src/flash/nor/numicro.c b/src/flash/nor/numicro.c index cb7c1df836..58b85bbdfb 100644 --- a/src/flash/nor/numicro.c +++ b/src/flash/nor/numicro.c @@ -1345,8 +1345,8 @@ static int numicro_writeblock(struct flash_bank *bank, const uint8_t *buffer, */ /* Increase buffer_size if needed */ - if (buffer_size < (target->working_area_size/2)) - buffer_size = (target->working_area_size/2); + if (buffer_size < target->working_area_cfg.size / 2) + buffer_size = target->working_area_cfg.size / 2; /* check code alignment */ if (offset & 0x1) { diff --git a/src/flash/nor/psoc5lp.c b/src/flash/nor/psoc5lp.c index f383213ba6..80a9db49a7 100644 --- a/src/flash/nor/psoc5lp.c +++ b/src/flash/nor/psoc5lp.c @@ -1195,7 +1195,7 @@ static int psoc5lp_write(struct flash_bank *bank, const uint8_t *buffer, LOG_DEBUG("Get_Temp: sign 0x%02" PRIx8 ", magnitude 0x%02" PRIx8, temp[0], temp[1]); - assert(target_get_working_area_avail(target) == target->working_area_size); + assert(target_get_working_area_avail(target) == target->working_area_cfg.size); retval = target_alloc_working_area(target, target_get_working_area_avail(target) / 2, &code_area); if (retval != ERROR_OK) { diff --git a/src/target/arm7_9_common.c b/src/target/arm7_9_common.c index da047c3d02..45daa1474b 100644 --- a/src/target/arm7_9_common.c +++ b/src/target/arm7_9_common.c @@ -2717,7 +2717,7 @@ int arm7_9_check_reset(struct target *target) LOG_WARNING( "NOTE! DCC downloads have not been enabled, defaulting to slow memory writes. Type 'help dcc'."); - if (get_target_reset_nag() && (target->working_area_size == 0)) + if (get_target_reset_nag() && (target->working_area_cfg.size == 0)) LOG_WARNING("NOTE! Severe performance degradation without working memory enabled."); if (get_target_reset_nag() && !arm7_9->fast_memory_access) diff --git a/src/target/target.c b/src/target/target.c index 25e58f11dc..3c90c04a7a 100644 --- a/src/target/target.c +++ b/src/target/target.c @@ -1968,9 +1968,9 @@ int64_t target_timer_next_event(void) } /* Prints the working area layout for debug purposes */ -static void print_wa_layout(struct target *target) +static void print_wa_layout(struct working_area_config *wa_cfg) { - struct working_area *c = target->working_areas; + struct working_area *c = wa_cfg->areas; while (c) { LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)", @@ -2011,9 +2011,9 @@ static void target_split_working_area(struct working_area *area, uint32_t size) } /* Merge all adjacent free areas into one */ -static void target_merge_working_areas(struct target *target) +static void target_merge_working_areas(struct working_area_config *wa_cfg) { - struct working_area *c = target->working_areas; + struct working_area *c = wa_cfg->areas; while (c && c->next) { assert(c->next->address == c->address + c->size); /* This is an invariant */ @@ -2039,10 +2039,14 @@ static void target_merge_working_areas(struct target *target) } } -int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area) +static int alloc_working_area_try_do( + struct target *target, + struct working_area_config *wa_cfg, + uint32_t size, + struct working_area **area) { /* Reevaluate working area address based on MMU state*/ - if (!target->working_areas) { + if (!wa_cfg->areas) { int retval; int enabled; @@ -2051,22 +2055,22 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w return retval; if (!enabled) { - if (target->working_area_phys_spec) { + if (wa_cfg->phys_spec) { LOG_DEBUG("MMU disabled, using physical " - "address for working memory " TARGET_ADDR_FMT, - target->working_area_phys); - target->working_area = target->working_area_phys; + "address for working memory "TARGET_ADDR_FMT, + wa_cfg->phys); + wa_cfg->area = wa_cfg->phys; } else { LOG_ERROR("No working memory available. " "Specify -work-area-phys to target."); return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; } } else { - if (target->working_area_virt_spec) { + if (wa_cfg->virt_spec) { LOG_DEBUG("MMU enabled, using virtual " - "address for working memory " TARGET_ADDR_FMT, - target->working_area_virt); - target->working_area = target->working_area_virt; + "address for working memory "TARGET_ADDR_FMT, + wa_cfg->virt); + wa_cfg->area = wa_cfg->virt; } else { LOG_ERROR("No working memory available. " "Specify -work-area-virt to target."); @@ -2078,21 +2082,21 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w struct working_area *new_wa = malloc(sizeof(*new_wa)); if (new_wa) { new_wa->next = NULL; - new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */ - new_wa->address = target->working_area; + new_wa->size = ALIGN_DOWN(wa_cfg->size, 4); + new_wa->address = wa_cfg->area; new_wa->backup = NULL; new_wa->user = NULL; new_wa->free = true; } - target->working_areas = new_wa; + wa_cfg->areas = new_wa; } /* only allocate multiples of 4 byte */ if (size % 4) size = (size + 3) & (~3UL); - struct working_area *c = target->working_areas; + struct working_area *c = wa_cfg->areas; /* Find the first large enough working area */ while (c) { @@ -2110,7 +2114,7 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT, size, c->address); - if (target->backup_working_area) { + if (wa_cfg->backup) { if (!c->backup) { c->backup = malloc(c->size); if (!c->backup) @@ -2129,27 +2133,49 @@ int target_alloc_working_area_try(struct target *target, uint32_t size, struct w /* user pointer */ c->user = area; - print_wa_layout(target); + print_wa_layout(wa_cfg); return ERROR_OK; } +int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area) +{ + return alloc_working_area_try_do(target, &target->working_area_cfg, size, area); +} + +int target_alloc_alt_working_area_try(struct target *target, uint32_t size, struct working_area **area) +{ + return alloc_working_area_try_do(target, &target->alt_working_area_cfg, size, area); +} + int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area) { int retval; retval = target_alloc_working_area_try(target, size, area); if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE) - LOG_WARNING("not enough working area available(requested %"PRIu32")", size); + LOG_WARNING("not enough working area available(requested %" PRIu32 ")", size); return retval; +} + +int target_alloc_alt_working_area(struct target *target, uint32_t size, struct working_area **area) +{ + int retval; + retval = target_alloc_alt_working_area_try(target, size, area); + if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE) + LOG_WARNING("not enough working area available(requested %" PRIu32 ")", size); + return retval; } -static int target_restore_working_area(struct target *target, struct working_area *area) +static int target_restore_working_area( + struct target *target, + struct working_area_config *wa_cfg, + struct working_area *area) { int retval = ERROR_OK; - if (target->backup_working_area && area->backup) { + if (wa_cfg->backup && area->backup) { retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup); if (retval != ERROR_OK) LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT, @@ -2160,14 +2186,18 @@ static int target_restore_working_area(struct target *target, struct working_are } /* Restore the area's backup memory, if any, and return the area to the allocation pool */ -static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore) +static int target_free_working_area_restore( + struct target *target, + struct working_area_config *wa_cfg, + struct working_area *area, + int restore) { if (!area || area->free) return ERROR_OK; int retval = ERROR_OK; if (restore) { - retval = target_restore_working_area(target, area); + retval = target_restore_working_area(target, wa_cfg, area); /* REVISIT: Perhaps the area should be freed even if restoring fails. */ if (retval != ERROR_OK) return retval; @@ -2185,24 +2215,32 @@ static int target_free_working_area_restore(struct target *target, struct workin *area->user = NULL; area->user = NULL; - target_merge_working_areas(target); + target_merge_working_areas(wa_cfg); - print_wa_layout(target); + print_wa_layout(wa_cfg); return retval; } int target_free_working_area(struct target *target, struct working_area *area) { - return target_free_working_area_restore(target, area, 1); + return target_free_working_area_restore(target, &target->working_area_cfg, area, 1); +} + +int target_free_alt_working_area(struct target *target, struct working_area *area) +{ + return target_free_working_area_restore(target, &target->alt_working_area_cfg, area, 1); } /* free resources and restore memory, if restoring memory fails, * free up resources anyway */ -static void target_free_all_working_areas_restore(struct target *target, int restore) +static void target_free_all_working_areas_restore( + struct target *target, + struct working_area_config *wa_cfg, + int restore) { - struct working_area *c = target->working_areas; + struct working_area *c = wa_cfg->areas; LOG_DEBUG("freeing all working areas"); @@ -2210,7 +2248,7 @@ static void target_free_all_working_areas_restore(struct target *target, int res while (c) { if (!c->free) { if (restore) - target_restore_working_area(target, c); + target_restore_working_area(target, wa_cfg, c); c->free = true; *c->user = NULL; /* Same as above */ c->user = NULL; @@ -2219,32 +2257,41 @@ static void target_free_all_working_areas_restore(struct target *target, int res } /* Run a merge pass to combine all areas into one */ - target_merge_working_areas(target); + target_merge_working_areas(wa_cfg); - print_wa_layout(target); + print_wa_layout(wa_cfg); } -void target_free_all_working_areas(struct target *target) +static void target_free_all_working_areas_do(struct target *target, struct working_area_config *wa_cfg) { - target_free_all_working_areas_restore(target, 1); - + target_free_all_working_areas_restore(target, wa_cfg, 1); /* Now we have none or only one working area marked as free */ - if (target->working_areas) { + if (wa_cfg->areas) { /* Free the last one to allow on-the-fly moving and resizing */ - free(target->working_areas->backup); - free(target->working_areas); - target->working_areas = NULL; + free(wa_cfg->areas->backup); + free(wa_cfg->areas); + wa_cfg->areas = NULL; } } +void target_free_all_working_areas(struct target *target) +{ + target_free_all_working_areas_do(target, &target->working_area_cfg); +} + +void target_free_all_alt_working_areas(struct target *target) +{ + target_free_all_working_areas_do(target, &target->alt_working_area_cfg); +} + /* Find the largest number of bytes that can be allocated */ -uint32_t target_get_working_area_avail(struct target *target) +static uint32_t get_working_area_avail_do(struct target *target, struct working_area_config *wa_cfg) { - struct working_area *c = target->working_areas; + struct working_area *c = wa_cfg->areas; uint32_t max_size = 0; if (!c) - return target->working_area_size; + return wa_cfg->size; while (c) { if (c->free && max_size < c->size) @@ -2256,6 +2303,16 @@ uint32_t target_get_working_area_avail(struct target *target) return max_size; } +uint32_t target_get_working_area_avail(struct target *target) +{ + return get_working_area_avail_do(target, &target->working_area_cfg); +} + +uint32_t target_get_alt_working_area_avail(struct target *target) +{ + return get_working_area_avail_do(target, &target->alt_working_area_cfg); +} + static void target_destroy(struct target *target) { if (target->type->deinit_target) @@ -5303,6 +5360,10 @@ enum target_cfg_param { TCFG_CHAIN_POSITION, TCFG_DBGBASE, TCFG_RTOS, + TCFG_ALT_WORK_AREA_VIRT, + TCFG_ALT_WORK_AREA_PHYS, + TCFG_ALT_WORK_AREA_SIZE, + TCFG_ALT_WORK_AREA_BACKUP, TCFG_DEFER_EXAMINE, TCFG_GDB_PORT, TCFG_GDB_MAX_CONNECTIONS, @@ -5320,6 +5381,10 @@ static struct jim_nvp nvp_config_opts[] = { { .name = "-chain-position", .value = TCFG_CHAIN_POSITION }, { .name = "-dbgbase", .value = TCFG_DBGBASE }, { .name = "-rtos", .value = TCFG_RTOS }, + { .name = "-alt-work-area-virt", .value = TCFG_ALT_WORK_AREA_VIRT }, + { .name = "-alt-work-area-phys", .value = TCFG_ALT_WORK_AREA_PHYS }, + { .name = "-alt-work-area-size", .value = TCFG_ALT_WORK_AREA_SIZE }, + { .name = "-alt-work-area-backup", .value = TCFG_ALT_WORK_AREA_BACKUP }, { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE }, { .name = "-gdb-port", .value = TCFG_GDB_PORT }, { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS }, @@ -5460,65 +5525,107 @@ no_params: break; case TCFG_WORK_AREA_VIRT: + case TCFG_ALT_WORK_AREA_VIRT: if (goi->isconfigure) { - target_free_all_working_areas(target); + target_free_all_working_areas_restore(target, n->value == TCFG_ALT_WORK_AREA_VIRT ? + &target->alt_working_area_cfg : &target->working_area_cfg, 1); e = jim_getopt_wide(goi, &w); if (e != JIM_OK) return e; - target->working_area_virt = w; - target->working_area_virt_spec = true; + if (n->value == TCFG_ALT_WORK_AREA_VIRT) { + target->alt_working_area_cfg.virt = w; + target->alt_working_area_cfg.virt_spec = true; + } else { + target->working_area_cfg.virt = w; + target->working_area_cfg.virt_spec = true; + } } else { if (goi->argc != 0) goto no_params; } - Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt)); + if (n->value == TCFG_ALT_WORK_AREA_VIRT) + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->alt_working_area_cfg.virt)); + else + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_cfg.virt)); + /* loop for more */ break; case TCFG_WORK_AREA_PHYS: + case TCFG_ALT_WORK_AREA_PHYS: if (goi->isconfigure) { - target_free_all_working_areas(target); + target_free_all_working_areas_restore(target, n->value == TCFG_ALT_WORK_AREA_VIRT ? + &target->alt_working_area_cfg : &target->working_area_cfg, 1); e = jim_getopt_wide(goi, &w); if (e != JIM_OK) return e; - target->working_area_phys = w; - target->working_area_phys_spec = true; + if (n->value == TCFG_ALT_WORK_AREA_PHYS) { + target->alt_working_area_cfg.phys = w; + target->alt_working_area_cfg.phys_spec = true; + } else { + target->working_area_cfg.phys = w; + target->working_area_cfg.phys_spec = true; + } } else { if (goi->argc != 0) goto no_params; } - Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys)); + if (n->value == TCFG_ALT_WORK_AREA_PHYS) + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->alt_working_area_cfg.phys)); + else + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_cfg.phys)); + /* loop for more */ break; case TCFG_WORK_AREA_SIZE: + case TCFG_ALT_WORK_AREA_SIZE: if (goi->isconfigure) { - target_free_all_working_areas(target); + target_free_all_working_areas_restore(target, n->value == TCFG_ALT_WORK_AREA_VIRT ? + &target->alt_working_area_cfg : &target->working_area_cfg, 1); e = jim_getopt_wide(goi, &w); if (e != JIM_OK) return e; - target->working_area_size = w; + if (n->value == TCFG_ALT_WORK_AREA_SIZE) + target->alt_working_area_cfg.size = w; + else + target->working_area_cfg.size = w; + } else { if (goi->argc != 0) goto no_params; } - Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size)); + if (n->value == TCFG_ALT_WORK_AREA_SIZE) + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->alt_working_area_cfg.size)); + else + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_cfg.size)); + /* loop for more */ break; case TCFG_WORK_AREA_BACKUP: + case TCFG_ALT_WORK_AREA_BACKUP: if (goi->isconfigure) { - target_free_all_working_areas(target); + target_free_all_working_areas_restore(target, n->value == TCFG_ALT_WORK_AREA_VIRT ? + &target->alt_working_area_cfg : &target->working_area_cfg, 1); e = jim_getopt_wide(goi, &w); if (e != JIM_OK) return e; /* make this exactly 1 or 0 */ - target->backup_working_area = (!!w); + if (n->value == TCFG_ALT_WORK_AREA_BACKUP) + target->alt_working_area_cfg.backup = (!!w); + else + target->working_area_cfg.backup = (!!w); + } else { if (goi->argc != 0) goto no_params; } - Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area)); + if (n->value == TCFG_ALT_WORK_AREA_BACKUP) + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->alt_working_area_cfg.backup)); + else + Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_cfg.backup)); + /* loop for more e*/ break; @@ -5570,6 +5677,7 @@ no_params: } target_free_all_working_areas(target); + target_free_all_alt_working_areas(target); e = jim_getopt_obj(goi, &o_t); if (e != JIM_OK) return e; @@ -5850,7 +5958,8 @@ static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv) /* determine if we should halt or not. */ target->reset_halt = (a != 0); /* When this happens - all workareas are invalid. */ - target_free_all_working_areas_restore(target, 0); + target_free_all_working_areas_restore(target, &target->working_area_cfg, 0); + target_free_all_working_areas_restore(target, &target->alt_working_area_cfg, 0); /* do the assert */ if (n->value == NVP_ASSERT) @@ -6251,10 +6360,14 @@ static int target_create(struct jim_getopt_info *goi) /* default to first core, override with -coreid */ target->coreid = 0; - target->working_area = 0x0; - target->working_area_size = 0x0; - target->working_areas = NULL; - target->backup_working_area = 0; + target->working_area_cfg.area = 0x0; + target->working_area_cfg.size = 0x0; + target->working_area_cfg.areas = NULL; + target->working_area_cfg.backup = 0; + target->alt_working_area_cfg.area = 0x0; + target->alt_working_area_cfg.size = 0x0; + target->alt_working_area_cfg.areas = NULL; + target->alt_working_area_cfg.backup = 0; target->state = TARGET_UNKNOWN; target->debug_reason = DBG_REASON_UNDEFINED; diff --git a/src/target/target.h b/src/target/target.h index 1f1a354207..110ada50cc 100644 --- a/src/target/target.h +++ b/src/target/target.h @@ -127,6 +127,18 @@ enum target_register_class { REG_CLASS_GENERAL, }; +struct working_area_config { + target_addr_t area; /* working area (initialised RAM). Evaluated + * upon first allocation from virtual/physical address. */ + bool virt_spec; /* virtual address specified? */ + target_addr_t virt; /* virtual address */ + bool phys_spec; /* physical address specified? */ + target_addr_t phys; /* physical address */ + uint32_t size; /* size in bytes */ + uint32_t backup; /* whether the content of the working area has to be preserved */ + struct working_area *areas; /* list of allocated working areas */ +}; + /* target_type.h contains the full definition of struct target_type */ struct target { struct target_type *type; /* target type definition (name, access functions) */ @@ -158,15 +170,10 @@ struct target { struct target_event_action *event_action; bool reset_halt; /* attempt resetting the CPU into the halted mode? */ - target_addr_t working_area; /* working area (initialised RAM). Evaluated - * upon first allocation from virtual/physical address. */ - bool working_area_virt_spec; /* virtual address specified? */ - target_addr_t working_area_virt; /* virtual address */ - bool working_area_phys_spec; /* physical address specified? */ - target_addr_t working_area_phys; /* physical address */ - uint32_t working_area_size; /* size in bytes */ - uint32_t backup_working_area; /* whether the content of the working area has to be preserved */ - struct working_area *working_areas;/* list of allocated working areas */ + + struct working_area_config working_area_cfg; + struct working_area_config alt_working_area_cfg; + enum target_debug_reason debug_reason;/* reason why the target entered debug state */ enum target_endianness endianness; /* target endianness */ /* also see: target_state_name() */ @@ -729,6 +736,8 @@ const char *target_reset_mode_name(enum target_reset_mode reset_mode); */ int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area); +int target_alloc_alt_working_area(struct target *target, + uint32_t size, struct working_area **area); /* Same as target_alloc_working_area, except that no error is logged * when ERROR_TARGET_RESOURCE_NOT_AVAILABLE is returned. * @@ -737,6 +746,8 @@ int target_alloc_working_area(struct target *target, */ int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area); +int target_alloc_alt_working_area_try(struct target *target, + uint32_t size, struct working_area **area); /** * Free a working area. * Restore target data if area backup is configured. @@ -745,8 +756,10 @@ int target_alloc_working_area_try(struct target *target, * @returns ERROR_OK if successful; error code if restore failed */ int target_free_working_area(struct target *target, struct working_area *area); +int target_free_alt_working_area(struct target *target, struct working_area *area); void target_free_all_working_areas(struct target *target); uint32_t target_get_working_area_avail(struct target *target); +uint32_t target_get_alt_working_area_avail(struct target *target); /** * Free all the resources allocated by targets and the target layer --