This is an automated email from Gerrit. "Name of user not set <cc...@nuvoton.com>" just uploaded a new patch set to Gerrit, which you can find at https://review.openocd.org/c/openocd/+/7655
-- gerrit commit 1c2be1b04018fb757d95866f486b8a3b03c78bbb Author: ccli0 <cc...@nuvoton.com> Date: Wed May 3 17:57:34 2023 +0800 src/flash/nor, src/target: add support for Nuvoton km1m7xx M7 series. add support for km1m7xx M7 series. Change-Id: I682f927f8e4e6c6216c6d0b259b3c24b9f3f6342 Signed-off-by: CCLI0 <cc...@nuvoton.com> diff --git a/src/flash/nor/Makefile.am b/src/flash/nor/Makefile.am index f04f0d206a..cf129bfd52 100644 --- a/src/flash/nor/Makefile.am +++ b/src/flash/nor/Makefile.am @@ -36,6 +36,7 @@ NOR_DRIVERS = \ %D%/jtagspi.c \ %D%/kinetis.c \ %D%/kinetis_ke.c \ + %D%/km1m7xx.c \ %D%/lpc2000.c \ %D%/lpc288x.c \ %D%/lpc2900.c \ diff --git a/src/flash/nor/drivers.c b/src/flash/nor/drivers.c index b9353d8208..69191429fb 100644 --- a/src/flash/nor/drivers.c +++ b/src/flash/nor/drivers.c @@ -36,6 +36,7 @@ extern const struct flash_driver fespi_flash; extern const struct flash_driver jtagspi_flash; extern const struct flash_driver kinetis_flash; extern const struct flash_driver kinetis_ke_flash; +extern const struct flash_driver km1m7xx_flash; extern const struct flash_driver lpc2000_flash; extern const struct flash_driver lpc288x_flash; extern const struct flash_driver lpc2900_flash; @@ -112,6 +113,7 @@ static const struct flash_driver * const flash_drivers[] = { &jtagspi_flash, &kinetis_flash, &kinetis_ke_flash, + &km1m7xx_flash, &lpc2000_flash, &lpc288x_flash, &lpc2900_flash, diff --git a/src/flash/nor/km1m7xx.c b/src/flash/nor/km1m7xx.c new file mode 100644 index 0000000000..57d154e36c --- /dev/null +++ b/src/flash/nor/km1m7xx.c @@ -0,0 +1,854 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/*************************************************************************** + * Copyright (C) 2021 by Nuvoton Technology Corporation Japan * + * Yoshikazu Yamaguchi <yamaguchi.yoshik...@nuvoton.com> * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see <http://www.gnu.org/licenses/>. * + ***************************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "imp.h" +#include "km1mxxx.h" +#include <helper/binarybuffer.h> +#include <helper/time_support.h> +#include <target/algorithm.h> +#include <target/armv7m.h> + +#include <target/image.h> + +/* Definition for Flash Memory Interface Register */ +#define FI_BASE_ADDRESS 0x4001C000 + +#define FEWEN 0x4001C000 +#define FEWEN_KEY_CODE 0x2900 +#define FEWEN_ENABLE 0x004B + +#define FISPROSTR 0x4001C018 +#define FISPROEND 0x4001C01C +#define FISPROSTR_KM1M7C 0x4001C020 +#define FISPROEND_KM1M7C 0x4001C024 + +#define FISPROSTR_ENABLE 0x00000000 +#define FISPROEND_ENABLE 0xFFFFFF00 + +#define FWCNT 0x4001C004 +#define FWCNT_ERASE 0x00000002 +#define FWCNT_START 0x00000001 + +#define FMON 0x4001C008 +#define FMON_ERROR 0x0000FF00 +#define FIFMON_ERROR 0x00FFFF00 +#define FMON_WBUSY 0x00000001 + +#define PEADR 0x4001C00C + +/* Definition for System Control Register */ +#define CCR 0xE000ED14 +#define CCR_IC 0x00020000 +#define CCR_DC 0x00010000 + +#define CCSIDR 0xE000ED80 +#define CCSIDR_SSOCIATIVITY_POS 3 +#define CCSIDR_SSOCIATIVITY_MASK ((uint32_t)0x3FF << CCSIDR_SSOCIATIVITY_POS) +#define CCSIDR_WAYS(cssidr) (((cssidr) & CCSIDR_SSOCIATIVITY_MASK) \ + >> CCSIDR_SSOCIATIVITY_POS) +#define CCSIDR_NUMSETS_POS 13 +#define CCSIDR_NUMSETS_MASK ((uint32_t)0x7FFF << CCSIDR_NUMSETS_POS) +#define CCSIDR_SETS(cssidr) (((cssidr) & CCSIDR_NUMSETS_MASK) \ + >> CCSIDR_NUMSETS_POS) + +#define CSSELR 0xE000ED84 +#define CSSELR_IND_DATA 0x00000000 +#define CSSELR_IND_INSTRUCTION 0x00000001 + +#define ICIALLU 0xE000EF50 +#define ICIALLU_INVALIDATE 0x00000000 + +#define DCCISW 0xE000EF74 +#define DCCISW_SET_POS 5 +#define DCCISW_SET_MASK ((uint32_t)0x1FF << DCCISW_SET_POS) +#define DCCISW_SET(set) (((set) << DCCISW_SET_POS) & DCCISW_SET_MASK) + +#define DCCISW_WAY_POS 30 +#define DCCISW_WAY_MASK ((uint32_t)0x00000003 << DCCISW_WAY_POS) +#define DCCISW_WAY(way) (((way) << DCCISW_WAY_POS) & DCCISW_WAY_MASK) + +/* Definition KM1M7XX Flash Memory Address */ +#define KM1M7XX_APROM_BASE 0x00800000 +#define KM1M7XX_DATA_BASE 0x10800000 +#define KM1M7XX_DATA0_BASE 0x00C04000 +#define KM1M7XX_DATA1_BASE 0x00E04000 + +/* Definition KM1M4X Flash Memory Type */ +#define KM1M7XX_FLASH_TYPE_KM1M7AB 0x00000000 +#define KM1M7XX_FLASH_TYPE_KM1M7C 0x00000001 + +#define KM1M7ABX_BANKS(aprom_size, d_flash_size) \ + .flash_type = KM1M7XX_FLASH_TYPE_KM1M7AB, \ + .n_banks = 2, \ + { {KM1M7XX_APROM_BASE, (aprom_size)}, {KM1M7XX_DATA_BASE, (d_flash_size)} } + +#define KM1M7CX_BANKS(aprom_size, d_flash0_size, d_flash1_size) \ + .flash_type = KM1M7XX_FLASH_TYPE_KM1M7C, \ + .n_banks = 3, \ + { {KM1M7XX_APROM_BASE, (aprom_size)}, {KM1M7XX_DATA0_BASE, (d_flash0_size)}, \ + {KM1M7XX_DATA1_BASE, (d_flash1_size)} } + +static const struct km1mxxx_cpu_type km1m7xx_parts_km1m7ab[] = { + /*PART NO*/ /*PART ID*/ /*Banks*/ + /* KM1M7A/B Series */ + {"KM1M7A/BFxxK", 0x00000000, KM1M7ABX_BANKS(256 * 1024, 64 * 1024)}, + {"KM1M7A/BFxxM", 0x00000000, KM1M7ABX_BANKS(384 * 1024, 64 * 1024)}, + {"KM1M7A/BFxxN", 0x00000000, KM1M7ABX_BANKS(512 * 1024, 64 * 1024)}, +}; + +static const struct km1mxxx_cpu_type km1m7xx_parts[] = { + /*PART NO*/ /*PART ID*/ /*Banks*/ + /* KM1M7C Series */ + {"KM1M7CF03N", 0x08700100, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF03K", 0x08700000, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF04N", 0x08700101, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF04K", 0x08700001, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF05N", 0x08700102, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF05K", 0x08700002, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF06N", 0x08700103, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF06K", 0x08700003, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF13N", 0x08701100, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF13K", 0x08701000, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF14N", 0x08701101, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF14K", 0x08701001, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF15N", 0x08701102, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF15K", 0x08701002, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF16N", 0x08701103, KM1M7CX_BANKS(512 * 1024, 16 * 1024, 16 * 1024)}, + {"KM1M7CF16K", 0x08701003, KM1M7CX_BANKS(256 * 1024, 16 * 1024, 16 * 1024)}, +}; + +/* Definition for static variable */ +static uint32_t backup_ccr; + +/* Definition for static functions */ +static int km1m7xx_get_cpu_type(struct target *target, const struct km1mxxx_cpu_type **cpu); +static int km1m7xx_get_flash_size(struct flash_bank *bank, const struct km1mxxx_cpu_type *cpu, uint32_t *flash_size); + +/* Cache control functions */ +static void enable_icache(struct flash_bank *bank); +static void disable_icache(struct flash_bank *bank); +static void enable_dcache(struct flash_bank *bank); +static void disable_dcache(struct flash_bank *bank); +static void invalidate_dcache(struct flash_bank *bank); + +static void enable_icache(struct flash_bank *bank) +{ + uint32_t read_ccr = 0; + + /* Do nothing if I-Cache is invalid before writing */ + if ((backup_ccr & CCR_IC) == 0) + return; + + /* Invalidate I-Cache */ + target_write_u32(bank->target, ICIALLU, ICIALLU_INVALIDATE); + + /* Enable I-Cache */ + target_read_u32(bank->target, CCR, &read_ccr); + target_write_u32(bank->target, CCR, (read_ccr | CCR_IC)); +} + +static void disable_icache(struct flash_bank *bank) +{ + uint32_t read_ccr = 0; + + /* Do nothing if I-Cache is disabeled */ + if ((backup_ccr & CCR_IC) == 0) + return; + + /* Disable I-Cache */ + target_read_u32(bank->target, CCR, &read_ccr); + target_write_u32(bank->target, CCR, (read_ccr & ~CCR_IC)); + target_write_u32(bank->target, ICIALLU, ICIALLU_INVALIDATE); +} + +static void enable_dcache(struct flash_bank *bank) +{ + uint32_t read_ccr = 0; + + /* Do nothing if D-Cache is invalid before writing */ + if ((backup_ccr & CCR_DC) == 0) + return; + + /* Invalidate D-Cache */ + invalidate_dcache(bank); + + /* Enable D-Cache */ + target_read_u32(bank->target, CCR, &read_ccr); + target_write_u32(bank->target, CCR, (read_ccr | CCR_DC)); +} + +static void disable_dcache(struct flash_bank *bank) +{ + uint32_t read_ccr = 0; + + /* Do nothing if D-Cache is disabeled */ + if ((backup_ccr & CCR_DC) == 0) + return; + + /* Disable D-Cache */ + target_read_u32(bank->target, CCR, &read_ccr); + target_write_u32(bank->target, CCR, (read_ccr & ~CCR_DC)); + + /* Invalidate D-Cache */ + invalidate_dcache(bank); +} + +static void invalidate_dcache(struct flash_bank *bank) +{ + uint32_t read_ccsidr; + uint32_t sets; + uint32_t ways; + + /* Select Level 1 data cache */ + target_write_u32(bank->target, CSSELR, CSSELR_IND_DATA); + + /* Invalidate D-Cache */ + target_read_u32(bank->target, CCSIDR, &read_ccsidr); + sets = CCSIDR_SETS(read_ccsidr); + do { + ways = CCSIDR_WAYS(read_ccsidr); + do { + target_write_u32(bank->target, DCCISW, DCCISW_SET(sets) | DCCISW_WAY(ways)); + } while (ways--); + } while (sets--); +} + +/** + * @brief "flash bank" Command + * @date October, 2018 + * @note [Usage] flash bank $_FLASHNAME km1m7xx + * <Address> <size> <ChipWidth> <BusWidth> <Target> <Type> + * <Address> : Flash memory base address + * <Size> : Flash memory size + * <ChipWidth> : Chip width in byte (Not use) + * <BusWidth> : Bus width in byte (Not use) + * <Target> : Target device (***.cpu) + * <Type> : Write control type + * @param + * @return int ERROR_OK or the non-zero + **/ +FLASH_BANK_COMMAND_HANDLER(km1m7xx_flash_bank_command) +{ + struct km1mxxx_flash_bank *flash_bank_info; + + flash_bank_info = malloc(sizeof(struct km1mxxx_flash_bank)); + if (!flash_bank_info) { + LOG_ERROR("NuMicro flash driver: Out of memory"); + return ERROR_FAIL; + } + + memset(flash_bank_info, 0, sizeof(struct km1mxxx_flash_bank)); + + bank->driver_priv = flash_bank_info; + flash_bank_info->probed = 0; + + return ERROR_OK; +} + +static int km1m7xx_erase(struct flash_bank *bank, unsigned int first, unsigned int last) +{ + uint32_t read_fwcnt = 0; + uint32_t read_fmon = 0; + uint64_t timeout = 0; + uint32_t sector_index = 0; + uint32_t address = 0; + uint32_t flash_type = KM1M7XX_FLASH_TYPE_KM1M7AB; + uint32_t cache_ctrl_flag = 0; + struct km1mxxx_flash_bank *flash_bank_info; + + /* Flash Memory type */ + flash_bank_info = bank->driver_priv; + if (flash_bank_info) { + flash_type = flash_bank_info->cpu->flash_type; + } else { + LOG_ERROR("NuMicro flash driver: Unknown flash type\n"); + return ERROR_FLASH_OPERATION_FAILED; + } + + /* Set flash type parameter */ + if (flash_type == KM1M7XX_FLASH_TYPE_KM1M7C) + cache_ctrl_flag = 1; + else + cache_ctrl_flag = 0; + + /* Flash Cache disable */ + if (cache_ctrl_flag) { + target_read_u32(bank->target, CCR, &backup_ccr); + disable_icache(bank); + disable_dcache(bank); + } + + /* Flash memory write enable */ + target_write_u32(bank->target, FEWEN, (FEWEN_KEY_CODE | FEWEN_ENABLE)); + if (flash_type == KM1M7XX_FLASH_TYPE_KM1M7C) { + target_write_u32(bank->target, FISPROSTR_KM1M7C, FISPROSTR_ENABLE); + target_write_u32(bank->target, FISPROEND_KM1M7C, FISPROEND_ENABLE); + } else { + target_write_u32(bank->target, FISPROSTR, FISPROSTR_ENABLE); + target_write_u32(bank->target, FISPROEND, FISPROEND_ENABLE); + } + + /* Erase specified sectors */ + for (sector_index = first; sector_index <= last; sector_index++) { + /* Get sector address */ + address = bank->base + bank->sectors[sector_index].offset; + LOG_INFO("Erase at 0x%08x (Index:%d) ", address, sector_index); + + /* Set parameter */ + target_write_u32(bank->target, PEADR, + (bank->base + bank->sectors[sector_index].offset)); + + /* Start erase */ + target_write_u32(bank->target, FWCNT, (FWCNT_ERASE | FWCNT_START)); + + /* Read FMON three times to wait for FMON.BUSY to be set. */ + target_read_u32(bank->target, FMON, &read_fmon); + target_read_u32(bank->target, FMON, &read_fmon); + target_read_u32(bank->target, FMON, &read_fmon); + + /* Wait for erase completion */ + target_read_u32(bank->target, FMON, &read_fmon); + read_fmon &= 0xFFFF; + timeout = timeval_ms(); + while (1) { + /* Check for completion */ + target_read_u32(bank->target, FMON, &read_fmon); + if ((read_fmon & FMON_WBUSY) == 0x0000) + break; + + /* Check error */ + if ((read_fmon & FMON_ERROR) != 0) { + LOG_DEBUG("%s Error : FMON = %d\n", __func__, read_fmon); + return ERROR_FAIL; + } + + /* Check timeout */ + if ((timeval_ms() - timeout) > TIMEOUT_ERASE) { + LOG_DEBUG("%s timeout : FMON = %d\n", __func__, read_fmon); + /* Flash Cache disable */ + if (cache_ctrl_flag) { + enable_icache(bank); + enable_dcache(bank); + } + return ERROR_FAIL; + } + } + + /* Clear START bit of FWCNT */ + target_read_u32(bank->target, FWCNT, &read_fwcnt); + read_fwcnt &= ~(FWCNT_ERASE | FWCNT_START); + target_write_u32(bank->target, FWCNT, read_fwcnt); + + /* Check error */ + if ((read_fmon & FMON_ERROR) != 0) { + LOG_DEBUG("%s Error : FMON = %d\n", __func__, read_fmon); + /* Flash Cache disable */ + if (cache_ctrl_flag) { + enable_icache(bank); + enable_dcache(bank); + } + return ERROR_FAIL; + } + } + + /* Flash Cache disable */ + if (cache_ctrl_flag) { + enable_icache(bank); + enable_dcache(bank); + } + + return ERROR_OK; +} + +static int km1m7xx_write(struct flash_bank *bank, const uint8_t *buffer, uint32_t offset, uint32_t count) +{ + int result = ERROR_OK; + struct target *target = bank->target; + struct working_area *algorithm = NULL; + struct working_area *source = NULL; + struct armv7m_algorithm armv7m_info; + + struct reg_param reg_params[2]; + uint32_t mem_params32[5] = {0, 0, 0, 0, 0}; + uint8_t mem_params8[sizeof(mem_params32)]; + + uint32_t remain_size = 0; + uint32_t buffer_size = 0; + uint32_t write_address = 0; + uint32_t write_size = 0; + uint32_t program_unit = 0; + uint8_t *write_data = 0; + uint32_t status = 0; + uint32_t cache_ctrl_flag = 0; + + uint32_t flash_type = KM1M7XX_FLASH_TYPE_KM1M7AB; + struct km1mxxx_flash_bank *flash_bank_info; + + static const uint8_t write_code[] = { + 0xF0, 0xB5, 0x00, 0x22, 0x00, 0x23, 0x00, 0x24, + 0x00, 0x20, 0x00, 0x21, 0x00, 0x25, 0x28, 0x4E, + 0x4E, 0x44, 0x32, 0x68, 0x27, 0x4E, 0x4E, 0x44, + 0x33, 0x68, 0x27, 0x4E, 0x4E, 0x44, 0x34, 0x68, + 0x00, 0x26, 0x26, 0x4F, 0x4F, 0x44, 0x3E, 0x60, + 0x3C, 0xE0, 0x25, 0x4E, 0xF2, 0x60, 0x00, 0x20, + 0x06, 0xE0, 0x40, 0xCB, 0xDF, 0xF8, 0x88, 0xC0, + 0x0C, 0xEB, 0x80, 0x07, 0x3E, 0x61, 0x40, 0x1C, + 0x20, 0x4E, 0x4E, 0x44, 0x36, 0x68, 0xB0, 0xEB, + 0x96, 0x0F, 0xF2, 0xD3, 0x00, 0x26, 0x1C, 0x4F, + 0x3E, 0x71, 0x01, 0x26, 0x3E, 0x71, 0x3E, 0x46, + 0x31, 0x89, 0x31, 0x89, 0x31, 0x89, 0x1A, 0x4D, + 0x31, 0x89, 0x00, 0xBF, 0x2E, 0x1E, 0xA5, 0xF1, + 0x01, 0x05, 0x00, 0xD1, 0xF0, 0xBD, 0x14, 0x4E, + 0x31, 0x89, 0x01, 0xF0, 0x01, 0x06, 0x00, 0x2E, + 0xF4, 0xD1, 0x11, 0x4E, 0x36, 0x79, 0x26, 0xF0, + 0x01, 0x06, 0x0F, 0x4F, 0x3E, 0x71, 0x01, 0xF4, + 0x7F, 0x46, 0x1E, 0xB1, 0x0B, 0x4E, 0x4E, 0x44, + 0x31, 0x60, 0x09, 0xE0, 0x0B, 0x4E, 0x4E, 0x44, + 0x36, 0x68, 0x32, 0x44, 0x09, 0x4E, 0x4E, 0x44, + 0x36, 0x68, 0xA4, 0x1B, 0x00, 0x2C, 0xC0, 0xD1, + 0x00, 0xBF, 0x00, 0xBE, 0x00, 0xBF, 0xDD, 0xE7, + 0x44, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, + 0x4C, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, + 0x00, 0xC0, 0x01, 0x40, 0x50, 0x00, 0x00, 0x00, + 0xA0, 0x86, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + }; + + /* Get working area for code */ + result = target_alloc_working_area(target, + sizeof(write_code), + &algorithm); + if (result != ERROR_OK) { + LOG_DEBUG("target_alloc_working_area() = %d\n", result); + return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; + } + + /* Transfer write program to RAM */ + result = target_write_buffer(target, + algorithm->address, + sizeof(write_code), + write_code); + if (result != ERROR_OK) { + LOG_DEBUG("target_write_buffer() = %d\n", result); + target_free_working_area(target, algorithm); + return result; + } + + /* Get working area for data */ + buffer_size = 4 * 1024; + result = ERROR_TARGET_RESOURCE_NOT_AVAILABLE; + while (result != ERROR_OK) { + result = target_alloc_working_area_try(target, buffer_size, &source); + if (result == ERROR_OK) + break; + + buffer_size /= 2; + if (buffer_size < 256) { + LOG_DEBUG("target_alloc_working_area_try() = %d\n", result); + target_free_working_area(target, algorithm); + return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; + } + } + + /* Flash Memory type */ + flash_bank_info = bank->driver_priv; + if (flash_bank_info) { + flash_type = flash_bank_info->cpu->flash_type; + } else { + LOG_ERROR("NuMicro flash driver: Unknown flash type\n"); + return ERROR_FLASH_OPERATION_FAILED; + } + + /* Set flash type parameter */ + if (flash_type == KM1M7XX_FLASH_TYPE_KM1M7C) { + program_unit = 16; + cache_ctrl_flag = 1; + } else { + program_unit = 8; + cache_ctrl_flag = 0; + } + + /* Flash Cache disable */ + if (cache_ctrl_flag) { + target_read_u32(bank->target, CCR, &backup_ccr); + disable_icache(bank); + disable_dcache(bank); + } + + /* Flash memory write enable */ + target_write_u32(bank->target, FEWEN, (FEWEN_KEY_CODE | FEWEN_ENABLE)); + if (flash_type == KM1M7XX_FLASH_TYPE_KM1M7C) { + target_write_u32(bank->target, FISPROSTR_KM1M7C, FISPROSTR_ENABLE); + target_write_u32(bank->target, FISPROEND_KM1M7C, FISPROEND_ENABLE); + } else { + target_write_u32(bank->target, FISPROSTR, FISPROSTR_ENABLE); + target_write_u32(bank->target, FISPROEND, FISPROEND_ENABLE); + } + + /** + * Set parameter (Core Register) + * Offset from last address of write program + * SP : <- -0x18 : Stack Pointer + * r9 : <- -0x58 : .data Section + **/ + init_reg_param(®_params[0], "sp", 32, PARAM_OUT); + init_reg_param(®_params[1], "r9", 32, PARAM_OUT); + + buf_set_u32(reg_params[0].value, 0, 32, + (algorithm->address + sizeof(write_code) - 0x18)); + buf_set_u32(reg_params[1].value, 0, 32, + (algorithm->address + sizeof(write_code) - 0x58)); + + /** + * Set parameter + * Offset from last address of write program + * (-0x14 : -> Address) + * -0x10 : -> BufferAddress + * (-0x0C : -> ByteCount) + * -0x08 : -> Program Unit + * (-0x04 : <- Result) + **/ + mem_params32[1] = source->address; + mem_params32[3] = program_unit; + + /* Program in units */ + remain_size = count; + write_address = bank->base + offset; + write_data = (uint8_t *)buffer; + write_size = buffer_size; + + while (remain_size != 0) { + if (remain_size < buffer_size) + write_size = remain_size; + + LOG_INFO("Program at 0x%08x to 0x%08x", + write_address, (write_address + write_size - 1)); + + /** + * Set parameter + * Offset from last address of write program + * -0x14 : -> Address + * (-0x10 : -> BufferAddress ) + * -0x0C : -> ByteCount + * (-0x08 : -> Program Unit) + * -0x04 : <- Result + **/ + mem_params32[0] = write_address; + mem_params32[2] = write_size; + mem_params32[4] = 0; + target_buffer_set_u32_array(target, + mem_params8, + ARRAY_SIZE(mem_params32), + mem_params32); + result = target_write_buffer(target, + algorithm->address + sizeof(write_code) - 0x14, + 16, + mem_params8); + if (result != ERROR_OK) { + LOG_DEBUG("target_write_buffer() = %d\n", result); + break; + } + + /* Set parameter (Write data) */ + result = target_write_buffer(target, + source->address, + write_size, + write_data); + if (result != ERROR_OK) { + LOG_DEBUG("target_write_buffer() = %d\n", result); + break; + } + + /* Run program */ + armv7m_info.common_magic = ARMV7M_COMMON_MAGIC; + armv7m_info.core_mode = ARM_MODE_THREAD; + result = target_run_algorithm(target, + 0, NULL, + ARRAY_SIZE(reg_params), reg_params, + algorithm->address, + 0, + 1000, + &armv7m_info); + if (result != ERROR_OK) { + LOG_DEBUG("target_run_algorithm() = %d\n", result); + result = ERROR_FLASH_OPERATION_FAILED; + break; + } + + /* Get status */ + result = target_read_u32(target, + algorithm->address + sizeof(write_code) - 4, + &status); + if (result != ERROR_OK) { + LOG_DEBUG("target_read_u32() = %d\n", result); + break; + } + + /* Next */ + remain_size -= write_size; + write_address += write_size; + write_data += write_size; + } + + /* Flash Cache disable */ + if (cache_ctrl_flag) { + enable_icache(bank); + enable_dcache(bank); + } + + /* Free allocated area */ + target_free_working_area(target, algorithm); + target_free_working_area(target, source); + destroy_reg_param(®_params[0]); + destroy_reg_param(®_params[1]); + + return result; +} + +static int km1m7xx_get_cpu_type(struct target *target, const struct km1mxxx_cpu_type **cpu) +{ + uint32_t part_id; + int retval = ERROR_OK; + + /* Read PartID */ + retval = target_read_u32(target, KM1MXXX_SYS_BASE, &part_id); + if (retval != ERROR_OK) { + LOG_ERROR("NuMicro flash driver: Failed to Get PartID\n"); + return ERROR_FLASH_OPERATION_FAILED; + } + + LOG_INFO("Device ID: 0x%08" PRIx32 "", part_id); + /* search part numbers */ + for (size_t i = 0; i < ARRAY_SIZE(km1m7xx_parts); i++) { + if (part_id == km1m7xx_parts[i].partid) { + *cpu = &km1m7xx_parts[i]; + LOG_INFO("Device Name: %s", (*cpu)->partname); + return ERROR_OK; + } + } + + return ERROR_FAIL; +} + +static int km1m7xx_get_flash_size(struct flash_bank *bank, const struct km1mxxx_cpu_type *cpu, uint32_t *flash_size) +{ + for (size_t i = 0; i < cpu->n_banks; i++) { + if (bank->base == cpu->bank[i].base) { + *flash_size = cpu->bank[i].size; + LOG_INFO("bank base = " TARGET_ADDR_FMT ", size = 0x%08" + PRIx32, bank->base, *flash_size); + return ERROR_OK; + } + } + return ERROR_FLASH_OPERATION_FAILED; +} + +static int km1m7xx_get_cpu_type_km1m7ab(struct target *target, const struct km1mxxx_cpu_type **cpu) +{ + int retval = ERROR_OK; + uint32_t opt_reg00; + uint32_t iflash_size; + + /* Read Option register */ + retval = target_read_u32(target, 0x4001C160, &opt_reg00); + if (retval != ERROR_OK) + return ERROR_FAIL; + + iflash_size = ((opt_reg00 & 0x00FF0000) >> 4); + + /* Search cpu type */ + for (size_t i = 0; i < ARRAY_SIZE(km1m7xx_parts_km1m7ab); i++) { + /* Size comparison with I-Flash(bank0) */ + if (iflash_size == km1m7xx_parts_km1m7ab[i].bank[0].size) { + *cpu = &km1m7xx_parts_km1m7ab[i]; + LOG_INFO("Device Name: %s", (*cpu)->partname); + return ERROR_OK; + } + } + + return ERROR_FAIL; +} + +static int km1m7xx_probe(struct flash_bank *bank) +{ + int cnt; + uint32_t part_id = 0x00000000; + uint32_t flash_size, offset = 0; + uint32_t flash_sector_size = FLASH_SECTOR_SIZE_4K; + const struct km1mxxx_cpu_type *cpu; + struct target *target = bank->target; + int retval = ERROR_OK; + + /* Read PartID */ + retval = target_read_u32(target, KM1MXXX_SYS_BASE, &part_id); + if (retval != ERROR_OK || part_id == 0x00000000) { + /** + * Run km1mxxx_probe() again later + * by leaving flash_bank_info->probed=0. + **/ + return ERROR_OK; + } + + if (part_id == 0x00000001 || part_id == 0x00000003) { + /* For KM1M7A/B, read the initial value(0x00000001 or 0x00000003) + of CHIPCKCTR(0x40000000). */ + retval = km1m7xx_get_cpu_type_km1m7ab(target, &cpu); + } else { + /* Reads CPUID (except for KM1M7A/B) */ + retval = km1m7xx_get_cpu_type(target, &cpu); + } + if (retval != ERROR_OK) { + LOG_ERROR("NuMicro flash driver: Failed to detect a known part\n"); + return ERROR_FLASH_OPERATION_FAILED; + } + + retval = km1m7xx_get_flash_size(bank, cpu, &flash_size); + if (retval != ERROR_OK) { + LOG_ERROR("NuMicro flash driver: Failed to detect flash size\n"); + return ERROR_FLASH_OPERATION_FAILED; + } + if (cpu->flash_type == KM1M7XX_FLASH_TYPE_KM1M7C) + flash_sector_size = FLASH_SECTOR_SIZE_8K; + + bank->size = flash_size; + bank->num_sectors = bank->size / flash_sector_size; + bank->sectors = malloc(sizeof(struct flash_sector) * bank->num_sectors); + + offset = 0; + for (cnt = 0; cnt < (int)(bank->num_sectors); cnt++) { + bank->sectors[cnt].offset = offset; + bank->sectors[cnt].size = flash_sector_size; + bank->sectors[cnt].is_erased = -1; + bank->sectors[cnt].is_protected = -1; + offset += flash_sector_size; + } + + struct km1mxxx_flash_bank *flash_bank_info; + flash_bank_info = bank->driver_priv; + flash_bank_info->probed = 1; + flash_bank_info->cpu = cpu; + + return ERROR_OK; +} + +static int km1m7xx_protect(struct flash_bank *bank, int set, unsigned int first, unsigned int last) +{ + LOG_INFO("protect function is unsupported\n"); + return ERROR_FLASH_OPER_UNSUPPORTED; +} + +static int km1m7xx_erase_check(struct flash_bank *bank) +{ + LOG_INFO("erase_check function is unsupported\n"); + return ERROR_FLASH_OPER_UNSUPPORTED; +} + +static int km1m7xx_protect_check(struct flash_bank *bank) +{ + LOG_INFO("protect_check function is unsupported\n"); + return ERROR_OK; +} + +static int km1m7xx_info(struct flash_bank *bank, struct command_invocation *cmd) +{ + return ERROR_OK; +} + +static int km1m7xx_auto_probe(struct flash_bank *bank) +{ + struct km1mxxx_flash_bank *flash_bank_info = bank->driver_priv; + + if (flash_bank_info->probed) + return ERROR_OK; + + return km1m7xx_probe(bank); +} + +COMMAND_HANDLER(km1m7xx_handle_erase_all_sectors_command) +{ + struct flash_bank *bank; + int result; + + /* Erase all sectors of each bank */ + for (bank = flash_bank_list(); bank; bank = bank->next) { + /* Get bank information */ + get_flash_bank_by_name(bank->name, &bank); + + /* Erase all sectors */ + result = km1m7xx_erase(bank, 0, (bank->num_sectors - 1)); + if (result != ERROR_OK) + return result; + } + + return ERROR_OK; +} + +static const struct command_registration km1m7xx_subcommand_handlers[] = { + { + .name = "erase_all_sectors", + .handler = km1m7xx_handle_erase_all_sectors_command, + .mode = COMMAND_EXEC, + .usage = "", + .help = "Erase all sectors", + }, + COMMAND_REGISTRATION_DONE +}; + +static const struct command_registration km1m7xx_command_handlers[] = { + { + .name = "km1m7xx", + .mode = COMMAND_ANY, + .help = "km1m7xx command group", + .usage = "", + .chain = km1m7xx_subcommand_handlers, + }, + COMMAND_REGISTRATION_DONE +}; + +struct flash_driver km1m7xx_flash = { + .name = "km1m7xx", + .usage = "", + .commands = km1m7xx_command_handlers, + .flash_bank_command = km1m7xx_flash_bank_command, + .erase = km1m7xx_erase, + .protect = km1m7xx_protect, + .write = km1m7xx_write, + .read = default_flash_read, + .probe = km1m7xx_probe, + .auto_probe = km1m7xx_auto_probe, + .erase_check = km1m7xx_erase_check, + .protect_check = km1m7xx_protect_check, + .info = km1m7xx_info, + .free_driver_priv = default_flash_free_driver_priv, +}; diff --git a/src/flash/nor/km1mxxx.h b/src/flash/nor/km1mxxx.h new file mode 100644 index 0000000000..8505e415b3 --- /dev/null +++ b/src/flash/nor/km1mxxx.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/*************************************************************************** + * Copyright (C) 2022 by Nuvoton Technology Corporation Japan * + * Naotoshi Izumi <izumi.naoto...@nuvoton.com> * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see <http://www.gnu.org/licenses/>. * + ***************************************************************************/ + +#ifndef OPENOCD_FLASH_NOR_KM1MXXX_H +#define OPENOCD_FLASH_NOR_KM1MXXX_H + +/* Nuvoton KM1Mxxx Series register locations */ +#define KM1MXXX_SYS_BASE 0x40000000 + +/* Definition for Erase timeout */ +#define TIMEOUT_ERASE 100000 + +/* Definition for Flash Memory */ +#define FLASH_SECTOR_SIZE_4K 0x00001000 +#define FLASH_SECTOR_SIZE_8K 0x00002000 + + +/* flash MAX banks */ +#define KM1MXXX_MAX_FLASH_BANKS 6 + +/* flash bank structs */ +struct km1mxxx_flash_bank_type { + uint32_t base; + uint32_t size; +}; + +/* part structs */ +struct km1mxxx_cpu_type { + char *partname; + uint32_t partid; + unsigned int flash_type; + unsigned int n_banks; + struct km1mxxx_flash_bank_type bank[KM1MXXX_MAX_FLASH_BANKS]; +}; + +struct km1mxxx_flash_bank { + int probed; + const struct km1mxxx_cpu_type *cpu; +}; + +#endif /* OPENOCD_FLASH_NOR_KM1MXXX_H */ diff --git a/src/target/Makefile.am b/src/target/Makefile.am index 2084de65e6..e8c115e3ac 100644 --- a/src/target/Makefile.am +++ b/src/target/Makefile.am @@ -23,6 +23,7 @@ noinst_LTLIBRARIES += %D%/libtarget.la $(INTEL_IA32_SRC) \ $(ESIRISC_SRC) \ $(ARC_SRC) \ + $(KM1Mx_SRC) \ %D%/avrt.c \ %D%/dsp563xx.c \ %D%/dsp563xx_once.c \ @@ -153,6 +154,9 @@ ARC_SRC = \ %D%/arc_jtag.c \ %D%/arc_mem.c +KM1Mx_SRC = \ + %D%/km1m7xx.c + %C%_libtarget_la_SOURCES += \ %D%/algorithm.h \ %D%/arm.h \ diff --git a/src/target/km1m7xx.c b/src/target/km1m7xx.c new file mode 100644 index 0000000000..627db19273 --- /dev/null +++ b/src/target/km1m7xx.c @@ -0,0 +1,2297 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/*************************************************************************** + * Copyright (C) 2005 by Dominic Rath * + * dominic.r...@gmx.de * + * * + * Copyright (C) 2006 by Magnus Lundin * + * lun...@mlu.mine.nu * + * * + * Copyright (C) 2008 by Spencer Oliver * + * s...@spen-soft.co.uk * + * * + * * + * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) * + * * + *-------------------------------------------------------------------------* + * * + * This file is based on cortex_m.c and adds functionality for the * + * Nuvoton KM1M7 series.This file was created based on cortex_m.c. * + * * + * Copyright (C) 2021 by Nuvoton Technology Corporation Japan * + * Yoshikazu Yamaguchi <yamaguchi.yoshik...@nuvoton.com> * + * * + ***************************************************************************/ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "jtag/interface.h" +#include "breakpoints.h" +#include "cortex_m.h" +#include "target_request.h" +#include "target_type.h" +#include "arm_adi_v5.h" +#include "arm_disassembler.h" +#include "register.h" +#include "arm_opcodes.h" +#include "arm_semihosting.h" +#include "smp.h" +#include <helper/time_support.h> +#include <rtt/rtt.h> + +#include "image.h" + +/* NOTE: most of this should work fine for the Cortex-M1 and + * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M. + * Some differences: M0/M1 doesn't have FPB remapping or the + * DWT tracing/profiling support. (So the cycle counter will + * not be usable; the other stuff isn't currently used here.) + * + * Although there are some workarounds for errata seen only in r0p0 + * silicon, such old parts are hard to find and thus not much tested + * any longer. + */ + +/* Timeout for register r/w */ +#define DHCSR_S_REGRDY_TIMEOUT (500) + +/* definition for security authentication */ +static uint32_t km1m7xx_key_set; +static uint32_t km1m7xx_key_data[4] = { 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff}; + +/* forward declarations */ +static int cortex_m_store_core_reg_u32(struct target *target, + uint32_t num, uint32_t value); + +/** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared + * on a read. Call this helper function each time DHCSR is read + * to preserve S_RESET_ST state in case of a reset event was detected. + */ +static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m, + uint32_t dhcsr) +{ + cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr; +} + +/** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate + * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky + */ +static int cortex_m_read_dhcsr_atomic_sticky(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = target_to_armv7m(target); + + int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, + &cortex_m->dcb_dhcsr); + if (retval != ERROR_OK) + return retval; + + cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr); + return ERROR_OK; +} + +static int cortex_m_load_core_reg_u32(struct target *target, + uint32_t regsel, uint32_t *value) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = target_to_armv7m(target); + int retval; + uint32_t dcrdr, tmp_value; + int64_t then; + + /* because the DCB_DCRDR is used for the emulated dcc channel + * we have to save/restore the DCB_DCRDR when used */ + if (target->dbg_msg_enabled) { + retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr); + if (retval != ERROR_OK) + return retval; + } + + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel); + if (retval != ERROR_OK) + return retval; + + /* check if value from register is ready and pre-read it */ + then = timeval_ms(); + while (1) { + retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, + &cortex_m->dcb_dhcsr); + if (retval != ERROR_OK) + return retval; + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR, + &tmp_value); + if (retval != ERROR_OK) + return retval; + cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr); + if (cortex_m->dcb_dhcsr & S_REGRDY) + break; + cortex_m->slow_register_read = true; /* Polling (still) needed. */ + if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) { + LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready"); + return ERROR_TIMEOUT_REACHED; + } + keep_alive(); + } + + *value = tmp_value; + + if (target->dbg_msg_enabled) { + /* restore DCB_DCRDR - this needs to be in a separate + * transaction otherwise the emulated DCC channel breaks */ + if (retval == ERROR_OK) + retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr); + } + + return retval; +} + +static int cortex_m_slow_read_all_regs(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = target_to_armv7m(target); + const unsigned int num_regs = armv7m->arm.core_cache->num_regs; + + /* Opportunistically restore fast read, it'll revert to slow + * if any register needed polling in cortex_m_load_core_reg_u32(). */ + cortex_m->slow_register_read = false; + + for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) { + struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id]; + if (r->exist) { + int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY); + if (retval != ERROR_OK) + return retval; + } + } + + if (!cortex_m->slow_register_read) + LOG_TARGET_DEBUG(target, "Switching back to fast register reads"); + + return ERROR_OK; +} + +static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel, + uint32_t *reg_value, uint32_t *dhcsr) +{ + struct armv7m_common *armv7m = target_to_armv7m(target); + int retval; + + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel); + if (retval != ERROR_OK) + return retval; + + retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr); + if (retval != ERROR_OK) + return retval; + + return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value); +} + +static int cortex_m_fast_read_all_regs(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = target_to_armv7m(target); + int retval; + uint32_t dcrdr; + + /* because the DCB_DCRDR is used for the emulated dcc channel + * we have to save/restore the DCB_DCRDR when used */ + if (target->dbg_msg_enabled) { + retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr); + if (retval != ERROR_OK) + return retval; + } + + const unsigned int num_regs = armv7m->arm.core_cache->num_regs; + const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1 + + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1; + /* we need one 32-bit word for each register except FP D0..D15, which + * need two words */ + uint32_t r_vals[n_r32]; + uint32_t dhcsr[n_r32]; + + unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */ + unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */ + for (reg_id = 0; reg_id < num_regs; reg_id++) { + struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id]; + if (!r->exist) + continue; /* skip non existent registers */ + + if (r->size <= 8) { + /* Any 8-bit or shorter register is unpacked from a 32-bit + * container register. Skip it now. */ + continue; + } + + uint32_t regsel = armv7m_map_id_to_regsel(reg_id); + retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi], + &dhcsr[wi]); + if (retval != ERROR_OK) + return retval; + wi++; + + assert(r->size == 32 || r->size == 64); + if (r->size == 32) + continue; /* done with 32-bit register */ + + assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG); + /* the odd part of FP register (S1, S3...) */ + retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi], + &dhcsr[wi]); + if (retval != ERROR_OK) + return retval; + wi++; + } + + assert(wi <= n_r32); + + retval = dap_run(armv7m->debug_ap->dap); + if (retval != ERROR_OK) + return retval; + + if (target->dbg_msg_enabled) { + /* restore DCB_DCRDR - this needs to be in a separate + * transaction otherwise the emulated DCC channel breaks */ + retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr); + if (retval != ERROR_OK) + return retval; + } + + bool not_ready = false; + for (unsigned int i = 0; i < wi; i++) { + if ((dhcsr[i] & S_REGRDY) == 0) { + not_ready = true; + LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i); + } + cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]); + } + + if (not_ready) { + /* Any register was not ready, + * fall back to slow read with S_REGRDY polling */ + return ERROR_TIMEOUT_REACHED; + } + + LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi); + + unsigned int ri = 0; /* read index from r_vals array */ + for (reg_id = 0; reg_id < num_regs; reg_id++) { + struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id]; + if (!r->exist) + continue; /* skip non existent registers */ + + r->dirty = false; + + unsigned int reg32_id; + uint32_t offset; + if (armv7m_map_reg_packing(reg_id, ®32_id, &offset)) { + /* Unpack a partial register from 32-bit container register */ + struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id]; + + /* The container register ought to precede all regs unpacked + * from it in the reg_list. So the value should be ready + * to unpack */ + assert(r32->valid); + buf_cpy(r32->value + offset, r->value, r->size); + + } else { + assert(r->size == 32 || r->size == 64); + buf_set_u32(r->value, 0, 32, r_vals[ri++]); + + if (r->size == 64) { + assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG); + /* the odd part of FP register (S1, S3...) */ + buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]); + } + } + r->valid = true; + } + assert(ri == wi); + + return retval; +} + +static int cortex_m_store_core_reg_u32(struct target *target, + uint32_t regsel, uint32_t value) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = target_to_armv7m(target); + int retval; + uint32_t dcrdr; + int64_t then; + + /* because the DCB_DCRDR is used for the emulated dcc channel + * we have to save/restore the DCB_DCRDR when used */ + if (target->dbg_msg_enabled) { + retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr); + if (retval != ERROR_OK) + return retval; + } + + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value); + if (retval != ERROR_OK) + return retval; + + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR); + if (retval != ERROR_OK) + return retval; + + /* check if value is written into register */ + then = timeval_ms(); + while (1) { + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) + return retval; + if (cortex_m->dcb_dhcsr & S_REGRDY) + break; + if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) { + LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready"); + return ERROR_TIMEOUT_REACHED; + } + keep_alive(); + } + + if (target->dbg_msg_enabled) { + /* restore DCB_DCRDR - this needs to be in a separate + * transaction otherwise the emulated DCC channel breaks */ + if (retval == ERROR_OK) + retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr); + } + + return retval; +} + +static int cortex_m_write_debug_halt_mask(struct target *target, + uint32_t mask_on, uint32_t mask_off) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + + /* mask off status bits */ + cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off); + /* create new register mask */ + cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on; + + return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr); +} + +static int cortex_m_set_maskints(struct target *target, bool mask) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask) + return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS); + else + return ERROR_OK; +} + +static int cortex_m_set_maskints_for_halt(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + switch (cortex_m->isrmasking_mode) { + case CORTEX_M_ISRMASK_AUTO: + /* interrupts taken at resume, whether for step or run -> no mask */ + return cortex_m_set_maskints(target, false); + + case CORTEX_M_ISRMASK_OFF: + /* interrupts never masked */ + return cortex_m_set_maskints(target, false); + + case CORTEX_M_ISRMASK_ON: + /* interrupts always masked */ + return cortex_m_set_maskints(target, true); + + case CORTEX_M_ISRMASK_STEPONLY: + /* interrupts masked for single step only -> mask now if MASKINTS + * erratum, otherwise only mask before stepping */ + return cortex_m_set_maskints(target, cortex_m->maskints_erratum); + } + return ERROR_OK; +} + +static int cortex_m_set_maskints_for_run(struct target *target) +{ + switch (target_to_cm(target)->isrmasking_mode) { + case CORTEX_M_ISRMASK_AUTO: + /* interrupts taken at resume, whether for step or run -> no mask */ + return cortex_m_set_maskints(target, false); + + case CORTEX_M_ISRMASK_OFF: + /* interrupts never masked */ + return cortex_m_set_maskints(target, false); + + case CORTEX_M_ISRMASK_ON: + /* interrupts always masked */ + return cortex_m_set_maskints(target, true); + + case CORTEX_M_ISRMASK_STEPONLY: + /* interrupts masked for single step only -> no mask */ + return cortex_m_set_maskints(target, false); + } + return ERROR_OK; +} + +static int cortex_m_set_maskints_for_step(struct target *target) +{ + switch (target_to_cm(target)->isrmasking_mode) { + case CORTEX_M_ISRMASK_AUTO: + /* the auto-interrupt should already be done -> mask */ + return cortex_m_set_maskints(target, true); + + case CORTEX_M_ISRMASK_OFF: + /* interrupts never masked */ + return cortex_m_set_maskints(target, false); + + case CORTEX_M_ISRMASK_ON: + /* interrupts always masked */ + return cortex_m_set_maskints(target, true); + + case CORTEX_M_ISRMASK_STEPONLY: + /* interrupts masked for single step only -> mask */ + return cortex_m_set_maskints(target, true); + } + return ERROR_OK; +} + +static int cortex_m_clear_halt(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + int retval; + + /* clear step if any */ + cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP); + + /* Read Debug Fault Status Register */ + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr); + if (retval != ERROR_OK) + return retval; + + /* Clear Debug Fault Status */ + retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr); + if (retval != ERROR_OK) + return retval; + LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr); + + return ERROR_OK; +} + +static int cortex_m_single_step_core(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + int retval; + + /* Mask interrupts before clearing halt, if not done already. This avoids + * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing + * HALT can put the core into an unknown state. + */ + if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) { + retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0); + if (retval != ERROR_OK) + return retval; + } + retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT); + if (retval != ERROR_OK) + return retval; + LOG_TARGET_DEBUG(target, "single step"); + + /* restore dhcsr reg */ + cortex_m_clear_halt(target); + + return ERROR_OK; +} + +static int cortex_m_enable_fpb(struct target *target) +{ + int retval = target_write_u32(target, FP_CTRL, 3); + if (retval != ERROR_OK) + return retval; + + /* check the fpb is actually enabled */ + uint32_t fpctrl; + retval = target_read_u32(target, FP_CTRL, &fpctrl); + if (retval != ERROR_OK) + return retval; + + if (fpctrl & 1) + return ERROR_OK; + + return ERROR_FAIL; +} + +static int cortex_m_endreset_event(struct target *target) +{ + int retval; + uint32_t dcb_demcr; + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap; + struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list; + struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list; + + /* REVISIT The four debug monitor bits are currently ignored... */ + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr); + if (retval != ERROR_OK) + return retval; + LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr); + + /* this register is used for emulated dcc channel */ + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0); + if (retval != ERROR_OK) + return retval; + + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) + return retval; + + if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) { + /* Enable debug requests */ + retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS); + if (retval != ERROR_OK) + return retval; + } + + /* Restore proper interrupt masking setting for running CPU. */ + cortex_m_set_maskints_for_run(target); + + /* Enable features controlled by ITM and DWT blocks, and catch only + * the vectors we were told to pay attention to. + * + * Target firmware is responsible for all fault handling policy + * choices *EXCEPT* explicitly scripted overrides like "vector_catch" + * or manual updates to the NVIC SHCSR and CCR registers. + */ + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr); + if (retval != ERROR_OK) + return retval; + + /* Paranoia: evidently some (early?) chips don't preserve all the + * debug state (including FPB, DWT, etc) across reset... + */ + + /* Enable FPB */ + retval = cortex_m_enable_fpb(target); + if (retval != ERROR_OK) { + LOG_TARGET_ERROR(target, "Failed to enable the FPB"); + return retval; + } + + cortex_m->fpb_enabled = true; + + /* Restore FPB registers */ + for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) { + retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value); + if (retval != ERROR_OK) + return retval; + } + + /* Restore DWT registers */ + for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) { + retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0, + dwt_list[i].comp); + if (retval != ERROR_OK) + return retval; + retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4, + dwt_list[i].mask); + if (retval != ERROR_OK) + return retval; + retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8, + dwt_list[i].function); + if (retval != ERROR_OK) + return retval; + } + retval = dap_run(swjdp); + if (retval != ERROR_OK) + return retval; + + register_cache_invalidate(armv7m->arm.core_cache); + + /* TODO: invalidate also working areas (needed in the case of detected reset). + * Doing so will require flash drivers to test if working area + * is still valid in all target algo calling loops. + */ + + /* make sure we have latest dhcsr flags */ + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) + return retval; + + return retval; +} + +static int cortex_m_examine_debug_reason(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + + /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason + * only check the debug reason if we don't know it already */ + + if (target->debug_reason != DBG_REASON_DBGRQ + && target->debug_reason != DBG_REASON_SINGLESTEP) { + if (cortex_m->nvic_dfsr & DFSR_BKPT) { + target->debug_reason = DBG_REASON_BREAKPOINT; + if (cortex_m->nvic_dfsr & DFSR_DWTTRAP) + target->debug_reason = DBG_REASON_WPTANDBKPT; + } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP) { + target->debug_reason = DBG_REASON_WATCHPOINT; + } else if (cortex_m->nvic_dfsr & DFSR_VCATCH) { + target->debug_reason = DBG_REASON_BREAKPOINT; + } else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL) { + target->debug_reason = DBG_REASON_DBGRQ; + } else { /* HALTED */ + target->debug_reason = DBG_REASON_UNDEFINED; + } + } + + return ERROR_OK; +} + +static int cortex_m_examine_exception_reason(struct target *target) +{ + uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1; + struct armv7m_common *armv7m = target_to_armv7m(target); + struct adiv5_dap *swjdp = armv7m->arm.dap; + int retval; + + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr); + if (retval != ERROR_OK) + return retval; + switch (armv7m->exception_number) { + case 2: /* NMI */ + break; + case 3: /* Hard Fault */ + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr); + if (retval != ERROR_OK) + return retval; + if (except_sr & 0x40000000) { + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr); + if (retval != ERROR_OK) + return retval; + } + break; + case 4: /* Memory Management */ + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr); + if (retval != ERROR_OK) + return retval; + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar); + if (retval != ERROR_OK) + return retval; + break; + case 5: /* Bus Fault */ + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr); + if (retval != ERROR_OK) + return retval; + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar); + if (retval != ERROR_OK) + return retval; + break; + case 6: /* Usage Fault */ + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr); + if (retval != ERROR_OK) + return retval; + break; + case 7: /* Secure Fault */ + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr); + if (retval != ERROR_OK) + return retval; + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar); + if (retval != ERROR_OK) + return retval; + break; + case 11: /* SVCall */ + break; + case 12: /* Debug Monitor */ + retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr); + if (retval != ERROR_OK) + return retval; + break; + case 14: /* PendSV */ + break; + case 15: /* SysTick */ + break; + default: + except_sr = 0; + break; + } + retval = dap_run(swjdp); + if (retval == ERROR_OK) + LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32 + ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32, + armv7m_exception_string(armv7m->exception_number), + shcsr, except_sr, cfsr, except_ar); + return retval; +} + +static int cortex_m_debug_entry(struct target *target) +{ + uint32_t xpsr; + int retval; + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + struct arm *arm = &armv7m->arm; + struct reg *r; + + LOG_TARGET_DEBUG(target, " "); + + /* Do this really early to minimize the window where the MASKINTS erratum + * can pile up pending interrupts. */ + cortex_m_set_maskints_for_halt(target); + + cortex_m_clear_halt(target); + + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) + return retval; + + retval = armv7m->examine_debug_reason(target); + if (retval != ERROR_OK) + return retval; + + /* examine PE security state */ + bool secure_state = false; + if (armv7m->arm.arch == ARM_ARCH_V8M) { + uint32_t dscsr; + + retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr); + if (retval != ERROR_OK) + return retval; + + secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS; + } + + /* Load all registers to arm.core_cache */ + if (!cortex_m->slow_register_read) { + retval = cortex_m_fast_read_all_regs(target); + if (retval == ERROR_TIMEOUT_REACHED) { + cortex_m->slow_register_read = true; + LOG_TARGET_DEBUG(target, "Switched to slow register read"); + } + } + + if (cortex_m->slow_register_read) + retval = cortex_m_slow_read_all_regs(target); + + if (retval != ERROR_OK) + return retval; + + r = arm->cpsr; + xpsr = buf_get_u32(r->value, 0, 32); + + /* Are we in an exception handler */ + if (xpsr & 0x1FF) { + armv7m->exception_number = (xpsr & 0x1FF); + + arm->core_mode = ARM_MODE_HANDLER; + arm->map = armv7m_msp_reg_map; + } else { + unsigned int control = buf_get_u32(arm->core_cache + ->reg_list[ARMV7M_CONTROL].value, 0, 3); + + /* is this thread privileged? */ + arm->core_mode = control & 1 + ? ARM_MODE_USER_THREAD + : ARM_MODE_THREAD; + + /* which stack is it using? */ + if (control & 2) + arm->map = armv7m_psp_reg_map; + else + arm->map = armv7m_msp_reg_map; + + armv7m->exception_number = 0; + } + + if (armv7m->exception_number) + cortex_m_examine_exception_reason(target); + + LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32 + ", cpu in %s state, target->state: %s", + arm_mode_name(arm->core_mode), + buf_get_u32(arm->pc->value, 0, 32), + secure_state ? "Secure" : "Non-Secure", + target_state_name(target)); + + if (armv7m->post_debug_entry) { + retval = armv7m->post_debug_entry(target); + if (retval != ERROR_OK) + return retval; + } + + return ERROR_OK; +} + +static int cortex_m_poll_one(struct target *target) +{ + int detected_failure = ERROR_OK; + int retval = ERROR_OK; + enum target_state prev_target_state = target->state; + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + + /* Read from Debug Halting Control and Status Register */ + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) { + target->state = TARGET_UNKNOWN; + return retval; + } + + /* Recover from lockup. See ARMv7-M architecture spec, + * section B1.5.15 "Unrecoverable exception cases". + */ + if (cortex_m->dcb_dhcsr & S_LOCKUP) { + LOG_TARGET_ERROR(target, "clearing lockup after double fault"); + cortex_m_write_debug_halt_mask(target, C_HALT, 0); + target->debug_reason = DBG_REASON_DBGRQ; + + /* We have to execute the rest (the "finally" equivalent, but + * still throw this exception again). + */ + detected_failure = ERROR_FAIL; + + /* refresh status bits */ + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) + return retval; + } + + if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) { + cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST; + if (target->state != TARGET_RESET) { + target->state = TARGET_RESET; + LOG_TARGET_INFO(target, "external reset detected"); + } + return ERROR_OK; + } + + if (target->state == TARGET_RESET) { + /* Cannot switch context while running so endreset is + * called with target->state == TARGET_RESET + */ + LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32, + cortex_m->dcb_dhcsr); + retval = cortex_m_endreset_event(target); + if (retval != ERROR_OK) { + target->state = TARGET_UNKNOWN; + return retval; + } + target->state = TARGET_RUNNING; + prev_target_state = TARGET_RUNNING; + } + + if (cortex_m->dcb_dhcsr & S_HALT) { + target->state = TARGET_HALTED; + + if (prev_target_state == TARGET_RUNNING || prev_target_state == TARGET_RESET) { + retval = cortex_m_debug_entry(target); + + /* arm_semihosting needs to know registers, don't run if debug entry returned error */ + if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0) + return retval; + + if (target->smp) { + LOG_TARGET_DEBUG(target, "postpone target event 'halted'"); + target->smp_halt_event_postponed = true; + } else { + /* regardless of errors returned in previous code update state */ + target_call_event_callbacks(target, TARGET_EVENT_HALTED); + } + } + if (prev_target_state == TARGET_DEBUG_RUNNING) { + retval = cortex_m_debug_entry(target); + + target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED); + } + if (retval != ERROR_OK) + return retval; + } + + if (target->state == TARGET_UNKNOWN) { + /* Check if processor is retiring instructions or sleeping. + * Unlike S_RESET_ST here we test if the target *is* running now, + * not if it has been running (possibly in the past). Instructions are + * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST + * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky. + */ + if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) { + target->state = TARGET_RUNNING; + retval = ERROR_OK; + } + } + + /* Check that target is truly halted, since the target could be resumed externally */ + if (prev_target_state == TARGET_HALTED && !(cortex_m->dcb_dhcsr & S_HALT)) { + /* registers are now invalid */ + register_cache_invalidate(armv7m->arm.core_cache); + + target->state = TARGET_RUNNING; + LOG_TARGET_WARNING(target, "external resume detected"); + target_call_event_callbacks(target, TARGET_EVENT_RESUMED); + retval = ERROR_OK; + } + + /* Did we detect a failure condition that we cleared? */ + if (detected_failure != ERROR_OK) + retval = detected_failure; + return retval; +} + +static int cortex_m_halt_one(struct target *target); + +static int cortex_m_smp_halt_all(struct list_head *smp_targets) +{ + int retval = ERROR_OK; + struct target_list *head; + + foreach_smp_target(head, smp_targets) { + struct target *curr = head->target; + if (!target_was_examined(curr)) + continue; + if (curr->state == TARGET_HALTED) + continue; + + int ret2 = cortex_m_halt_one(curr); + if (retval == ERROR_OK) + retval = ret2; /* store the first error code ignore others */ + } + return retval; +} + +static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets) +{ + int retval = ERROR_OK; + struct target_list *head; + + foreach_smp_target(head, smp_targets) { + struct target *curr = head->target; + if (!target_was_examined(curr)) + continue; + /* skip targets that were already halted */ + if (curr->state == TARGET_HALTED) + continue; + + int ret2 = cortex_m_poll_one(curr); + if (retval == ERROR_OK) + retval = ret2; /* store the first error code ignore others */ + } + return retval; +} + +static int cortex_m_poll_smp(struct list_head *smp_targets) +{ + int retval = ERROR_OK; + struct target_list *head; + bool halted = false; + + foreach_smp_target(head, smp_targets) { + struct target *curr = head->target; + if (curr->smp_halt_event_postponed) { + halted = true; + break; + } + } + + if (halted) { + retval = cortex_m_smp_halt_all(smp_targets); + + int ret2 = cortex_m_smp_post_halt_poll(smp_targets); + if (retval == ERROR_OK) + retval = ret2; /* store the first error code ignore others */ + + foreach_smp_target(head, smp_targets) { + struct target *curr = head->target; + if (!curr->smp_halt_event_postponed) + continue; + + curr->smp_halt_event_postponed = false; + if (curr->state == TARGET_HALTED) { + LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'"); + target_call_event_callbacks(curr, TARGET_EVENT_HALTED); + } + } + /* There is no need to set gdb_service->target + * as hwthread_update_threads() selects an interesting thread + * by its own + */ + } + return retval; +} + +static int cortex_m_poll(struct target *target) +{ + int retval = cortex_m_poll_one(target); + + if (target->smp) { + struct target_list *last; + last = list_last_entry(target->smp_targets, struct target_list, lh); + if (target == last->target) + /* After the last target in SMP group has been polled + * check for postponed halted events and eventually halt and re-poll + * other targets */ + cortex_m_poll_smp(target->smp_targets); + } + return retval; +} + +static int cortex_m_halt_one(struct target *target) +{ + LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target)); + + if (target->state == TARGET_HALTED) { + LOG_TARGET_DEBUG(target, "target was already halted"); + return ERROR_OK; + } + + if (target->state == TARGET_UNKNOWN) + LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested"); + + if (target->state == TARGET_RESET) { + if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) { + LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST"); + return ERROR_TARGET_FAILURE; + } else { + /* we came here in a reset_halt or reset_init sequence + * debug entry was already prepared in cortex_m3_assert_reset() + */ + target->debug_reason = DBG_REASON_DBGRQ; + + return ERROR_OK; + } + } + + /* Write to Debug Halting Control and Status Register */ + cortex_m_write_debug_halt_mask(target, C_HALT, 0); + + /* Do this really early to minimize the window where the MASKINTS erratum + * can pile up pending interrupts. */ + cortex_m_set_maskints_for_halt(target); + + target->debug_reason = DBG_REASON_DBGRQ; + + return ERROR_OK; +} + +static int cortex_m_halt(struct target *target) +{ + if (target->smp) + return cortex_m_smp_halt_all(target->smp_targets); + else + return cortex_m_halt_one(target); +} + +static int cortex_m_soft_reset_halt(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + int retval, timeout = 0; + + /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality + * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'. + * As this reset only uses VC_CORERESET it would only ever reset the cortex_m + * core, not the peripherals */ + LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead."); + + if (!cortex_m->vectreset_supported) { + LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core"); + return ERROR_FAIL; + } + + /* Set C_DEBUGEN */ + retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS); + if (retval != ERROR_OK) + return retval; + + /* Enter debug state on reset; restore DEMCR in endreset_event() */ + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, + TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET); + if (retval != ERROR_OK) + return retval; + + /* Request a core-only reset */ + retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, + AIRCR_VECTKEY | AIRCR_VECTRESET); + if (retval != ERROR_OK) + return retval; + target->state = TARGET_RESET; + + /* registers are now invalid */ + register_cache_invalidate(cortex_m->armv7m.arm.core_cache); + + while (timeout < 100) { + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval == ERROR_OK) { + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, + &cortex_m->nvic_dfsr); + if (retval != ERROR_OK) + return retval; + if ((cortex_m->dcb_dhcsr & S_HALT) + && (cortex_m->nvic_dfsr & DFSR_VCATCH)) { + LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32, + cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr); + cortex_m_poll(target); + /* FIXME restore user's vector catch config */ + return ERROR_OK; + } else { + LOG_TARGET_DEBUG(target, "waiting for system reset-halt, " + "DHCSR 0x%08" PRIx32 ", %d ms", + cortex_m->dcb_dhcsr, timeout); + } + } + timeout++; + alive_sleep(1); + } + + return ERROR_OK; +} + +static int cortex_m_restore_one(struct target *target, bool current, + target_addr_t *address, bool handle_breakpoints, bool debug_execution) +{ + struct armv7m_common *armv7m = target_to_armv7m(target); + struct breakpoint *breakpoint = NULL; + uint32_t resume_pc; + struct reg *r; + + if (target->state != TARGET_HALTED) { + LOG_TARGET_ERROR(target, "target not halted"); + return ERROR_TARGET_NOT_HALTED; + } + + if (!debug_execution) { + target_free_all_working_areas(target); + cortex_m_enable_breakpoints(target); + cortex_m_enable_watchpoints(target); + } + + if (debug_execution) { + r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK; + + /* Disable interrupts */ + /* We disable interrupts in the PRIMASK register instead of + * masking with C_MASKINTS. This is probably the same issue + * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS + * in parallel with disabled interrupts can cause local faults + * to not be taken. + * + * This breaks non-debug (application) execution if not + * called from armv7m_start_algorithm() which saves registers. + */ + buf_set_u32(r->value, 0, 1, 1); + r->dirty = true; + r->valid = true; + + /* Make sure we are in Thumb mode, set xPSR.T bit */ + /* armv7m_start_algorithm() initializes entire xPSR register. + * This duplicity handles the case when cortex_m_resume() + * is used with the debug_execution flag directly, + * not called through armv7m_start_algorithm(). + */ + r = armv7m->arm.cpsr; + buf_set_u32(r->value, 24, 1, 1); + r->dirty = true; + r->valid = true; + } + + /* current = 1: continue on current pc, otherwise continue at <address> */ + r = armv7m->arm.pc; + if (!current) { + buf_set_u32(r->value, 0, 32, *address); + r->dirty = true; + r->valid = true; + } + + /* if we halted last time due to a bkpt instruction + * then we have to manually step over it, otherwise + * the core will break again */ + + if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32)) + && !debug_execution) + armv7m_maybe_skip_bkpt_inst(target, NULL); + + resume_pc = buf_get_u32(r->value, 0, 32); + if (current) + *address = resume_pc; + + int retval = armv7m_restore_context(target); + if (retval != ERROR_OK) + return retval; + + /* the front-end may request us not to handle breakpoints */ + if (handle_breakpoints) { + /* Single step past breakpoint at current address */ + breakpoint = breakpoint_find(target, resume_pc); + if (breakpoint) { + LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")", + breakpoint->address, + breakpoint->unique_id); + retval = cortex_m_unset_breakpoint(target, breakpoint); + if (retval == ERROR_OK) + retval = cortex_m_single_step_core(target); + int ret2 = cortex_m_set_breakpoint(target, breakpoint); + if (retval != ERROR_OK) + return retval; + if (ret2 != ERROR_OK) + return ret2; + } + } + + return ERROR_OK; +} + +static int cortex_m_restart_one(struct target *target, bool debug_execution) +{ + struct armv7m_common *armv7m = target_to_armv7m(target); + + /* Restart core */ + cortex_m_set_maskints_for_run(target); + cortex_m_write_debug_halt_mask(target, 0, C_HALT); + + target->debug_reason = DBG_REASON_NOTHALTED; + /* registers are now invalid */ + register_cache_invalidate(armv7m->arm.core_cache); + + if (!debug_execution) { + target->state = TARGET_RUNNING; + target_call_event_callbacks(target, TARGET_EVENT_RESUMED); + } else { + target->state = TARGET_DEBUG_RUNNING; + target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED); + } + + return ERROR_OK; +} + +static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints) +{ + struct target_list *head; + target_addr_t address; + foreach_smp_target(head, target->smp_targets) { + struct target *curr = head->target; + /* skip calling target */ + if (curr == target) + continue; + if (!target_was_examined(curr)) + continue; + /* skip running targets */ + if (curr->state == TARGET_RUNNING) + continue; + + int retval = cortex_m_restore_one(curr, true, &address, + handle_breakpoints, false); + if (retval != ERROR_OK) + return retval; + + retval = cortex_m_restart_one(curr, false); + if (retval != ERROR_OK) + return retval; + + LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address); + } + return ERROR_OK; +} + +static int cortex_m_resume(struct target *target, int current, + target_addr_t address, int handle_breakpoints, int debug_execution) +{ + int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution); + if (retval != ERROR_OK) { + LOG_TARGET_ERROR(target, "context restore failed, aborting resume"); + return retval; + } + + if (target->smp && !debug_execution) { + retval = cortex_m_restore_smp(target, !!handle_breakpoints); + if (retval != ERROR_OK) + LOG_WARNING("resume of a SMP target failed, trying to resume current one"); + } + + cortex_m_restart_one(target, !!debug_execution); + if (retval != ERROR_OK) { + LOG_TARGET_ERROR(target, "resume failed"); + return retval; + } + + LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT, + debug_execution ? "debug " : "", address); + + return ERROR_OK; +} + +/* int irqstepcount = 0; */ +static int cortex_m_step(struct target *target, int current, + target_addr_t address, int handle_breakpoints) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + struct breakpoint *breakpoint = NULL; + struct reg *pc = armv7m->arm.pc; + bool bkpt_inst_found = false; + int retval; + bool isr_timed_out = false; + + if (target->state != TARGET_HALTED) { + LOG_TARGET_WARNING(target, "target not halted"); + return ERROR_TARGET_NOT_HALTED; + } + + /* Just one of SMP cores will step. Set the gdb control + * target to current one or gdb miss gdb-end event */ + if (target->smp && target->gdb_service) + target->gdb_service->target = target; + + /* current = 1: continue on current pc, otherwise continue at <address> */ + if (!current) { + buf_set_u32(pc->value, 0, 32, address); + pc->dirty = true; + pc->valid = true; + } + + uint32_t pc_value = buf_get_u32(pc->value, 0, 32); + + /* the front-end may request us not to handle breakpoints */ + if (handle_breakpoints) { + breakpoint = breakpoint_find(target, pc_value); + if (breakpoint) + cortex_m_unset_breakpoint(target, breakpoint); + } + + armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found); + + target->debug_reason = DBG_REASON_SINGLESTEP; + + armv7m_restore_context(target); + + target_call_event_callbacks(target, TARGET_EVENT_RESUMED); + + /* if no bkpt instruction is found at pc then we can perform + * a normal step, otherwise we have to manually step over the bkpt + * instruction - as such simulate a step */ + if (!bkpt_inst_found) { + if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) { + /* Automatic ISR masking mode off: Just step over the next + * instruction, with interrupts on or off as appropriate. */ + cortex_m_set_maskints_for_step(target); + cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT); + } else { + /* Process interrupts during stepping in a way they don't interfere + * debugging. + * + * Principle: + * + * Set a temporary break point at the current pc and let the core run + * with interrupts enabled. Pending interrupts get served and we run + * into the breakpoint again afterwards. Then we step over the next + * instruction with interrupts disabled. + * + * If the pending interrupts don't complete within time, we leave the + * core running. This may happen if the interrupts trigger faster + * than the core can process them or the handler doesn't return. + * + * If no more breakpoints are available we simply do a step with + * interrupts enabled. + * + */ + + /* 2012-09-29 ph + * + * If a break point is already set on the lower half word then a break point on + * the upper half word will not break again when the core is restarted. So we + * just step over the instruction with interrupts disabled. + * + * The documentation has no information about this, it was found by observation + * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to + * suffer from this problem. + * + * To add some confusion: pc_value has bit 0 always set, while the breakpoint + * address has it always cleared. The former is done to indicate thumb mode + * to gdb. + * + */ + if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) { + LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled"); + cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0); + cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT); + /* Re-enable interrupts if appropriate */ + cortex_m_write_debug_halt_mask(target, C_HALT, 0); + cortex_m_set_maskints_for_halt(target); + } else { + /* Set a temporary break point */ + if (breakpoint) { + retval = cortex_m_set_breakpoint(target, breakpoint); + } else { + enum breakpoint_type type = BKPT_HARD; + if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) { + /* FPB rev.1 cannot handle such addr, try BKPT instr */ + type = BKPT_SOFT; + } + retval = breakpoint_add(target, pc_value, 2, type); + } + + bool tmp_bp_set = (retval == ERROR_OK); + + /* No more breakpoints left, just do a step */ + if (!tmp_bp_set) { + cortex_m_set_maskints_for_step(target); + cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT); + /* Re-enable interrupts if appropriate */ + cortex_m_write_debug_halt_mask(target, C_HALT, 0); + cortex_m_set_maskints_for_halt(target); + } else { + /* Start the core */ + LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts"); + int64_t t_start = timeval_ms(); + cortex_m_set_maskints_for_run(target); + cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP); + + /* Wait for pending handlers to complete or timeout */ + do { + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) { + target->state = TARGET_UNKNOWN; + return retval; + } + isr_timed_out = ((timeval_ms() - t_start) > 500); + } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out)); + + /* only remove breakpoint if we created it */ + if (breakpoint) { + cortex_m_unset_breakpoint(target, breakpoint); + } else { + /* Remove the temporary breakpoint */ + breakpoint_remove(target, pc_value); + } + + if (isr_timed_out) { + LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, " + "leaving target running"); + } else { + /* Step over next instruction with interrupts disabled */ + cortex_m_set_maskints_for_step(target); + cortex_m_write_debug_halt_mask(target, + C_HALT | C_MASKINTS, + 0); + cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT); + /* Re-enable interrupts if appropriate */ + cortex_m_write_debug_halt_mask(target, C_HALT, 0); + cortex_m_set_maskints_for_halt(target); + } + } + } + } + } + + retval = cortex_m_read_dhcsr_atomic_sticky(target); + if (retval != ERROR_OK) + return retval; + + /* registers are now invalid */ + register_cache_invalidate(armv7m->arm.core_cache); + + if (breakpoint) + cortex_m_set_breakpoint(target, breakpoint); + + if (isr_timed_out) { + /* Leave the core running. The user has to stop execution manually. */ + target->debug_reason = DBG_REASON_NOTHALTED; + target->state = TARGET_RUNNING; + return ERROR_OK; + } + + LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32 + " nvic_icsr = 0x%" PRIx32, + cortex_m->dcb_dhcsr, cortex_m->nvic_icsr); + + retval = cortex_m_debug_entry(target); + if (retval != ERROR_OK) + return retval; + target_call_event_callbacks(target, TARGET_EVENT_HALTED); + + LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32 + " nvic_icsr = 0x%" PRIx32, + cortex_m->dcb_dhcsr, cortex_m->nvic_icsr); + + return ERROR_OK; +} + +static int km1m7xx_m_assert_reset(struct target *target) +{ + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config; + + LOG_TARGET_DEBUG(target, "target->state: %s,%s examined", + target_state_name(target), + target_was_examined(target) ? "" : " not"); + + enum reset_types jtag_reset_config = jtag_get_reset_config(); + + if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) { + /* allow scripts to override the reset event */ + + target_handle_event(target, TARGET_EVENT_RESET_ASSERT); + register_cache_invalidate(cortex_m->armv7m.arm.core_cache); + target->state = TARGET_RESET; + + return ERROR_OK; + } + + /* some cores support connecting while srst is asserted + * use that mode is it has been configured */ + + bool srst_asserted = false; + + if ((jtag_reset_config & RESET_HAS_SRST) && + ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) { + /* If we have no debug_ap, asserting SRST is the only thing + * we can do now */ + adapter_assert_reset(); + srst_asserted = true; + } + + /* TODO: replace the hack calling target_examine_one() + * as soon as a better reset framework is available */ + if (!target_was_examined(target) && !target->defer_examine + && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) { + LOG_TARGET_DEBUG(target, "Trying to re-examine under reset"); + target_examine_one(target); + } + + /* We need at least debug_ap to go further. + * Inform user and bail out if we don't have one. */ + if (!armv7m->debug_ap) { + if (srst_asserted) { + if (target->reset_halt) + LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!"); + + /* Do not propagate error: reset was asserted, proceed to deassert! */ + target->state = TARGET_RESET; + register_cache_invalidate(cortex_m->armv7m.arm.core_cache); + return ERROR_OK; + + } else { + LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!"); + return ERROR_FAIL; + } + } + + /* Start of original procedure for km1m7xx series */ + uint32_t optreg0 = 0; + uint32_t cpuid = 0; + int ret = 0; + uint32_t optreg0_key = 0x672c0000; + + /* Disable WDT */ + ret = target_read_u32(target, 0xf0102010, &optreg0); + if (ret != ERROR_OK) + return ret; + + ret = target_write_u32(target, 0xf0102010, ((optreg0 & 0xffff) | optreg0_key | 0x00000004)); + if (ret != ERROR_OK) + return ret; + + /* When CPUID is 0x00000000, it may be security locked. */ + ret = target_read_u32(target, 0xe000ed00, &cpuid); + if (ret != ERROR_OK) + return ret; + + LOG_INFO("CPUID = 0x%08x\n", cpuid); + if (cpuid == 0 && km1m7xx_key_set == 1) { + /* Unlock DAP */ + target_write_u32(target, 0xf0102000, km1m7xx_key_data[0]); + target_write_u32(target, 0xf0102004, km1m7xx_key_data[1]); + target_write_u32(target, 0xf0102008, km1m7xx_key_data[2]); + target_write_u32(target, 0xf010200c, km1m7xx_key_data[3]); + + /* Still if the CPUID is 0x00000000, the security can not be unlocked */ + ret = target_read_u32(target, 0xe000ed00, &cpuid); + if (ret != ERROR_OK) + return ret; + + LOG_INFO("CPUID = 0x%08x\n", cpuid); + if (cpuid == 0x00000000) { + LOG_ERROR("Cannot unlock security"); + return ERROR_FAIL; + } + } + /* End of original procedure for km1m7xx series */ + + /* Enable debug requests */ + int retval = cortex_m_read_dhcsr_atomic_sticky(target); + + /* Store important errors instead of failing and proceed to reset assert */ + + if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN)) + retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS); + + /* If the processor is sleeping in a WFI or WFE instruction, the + * C_HALT bit must be asserted to regain control */ + if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP)) + retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0); + + mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0); + /* Ignore less important errors */ + + if (!target->reset_halt) { + /* Set/Clear C_MASKINTS in a separate operation */ + cortex_m_set_maskints_for_run(target); + + /* clear any debug flags before resuming */ + cortex_m_clear_halt(target); + + /* clear C_HALT in dhcsr reg */ + cortex_m_write_debug_halt_mask(target, 0, C_HALT); + } else { + /* Halt in debug on reset; endreset_event() restores DEMCR. + * + * REVISIT catching BUSERR presumably helps to defend against + * bad vector table entries. Should this include MMERR or + * other flags too? + */ + int retval2; + retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR, + TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET); + if (retval != ERROR_OK || retval2 != ERROR_OK) + LOG_TARGET_INFO(target, "AP write error, reset will not halt"); + } + + if (jtag_reset_config & RESET_HAS_SRST) { + /* default to asserting srst */ + if (!srst_asserted) + adapter_assert_reset(); + + /* srst is asserted, ignore AP access errors */ + retval = ERROR_OK; + } else { + /* Use a standard Cortex-M3 software reset mechanism. + * We default to using VECTRESET as it is supported on all current cores + * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!) + * This has the disadvantage of not resetting the peripherals, so a + * reset-init event handler is needed to perform any peripheral resets. + */ + if (!cortex_m->vectreset_supported + && reset_config == CORTEX_M_RESET_VECTRESET) { + reset_config = CORTEX_M_RESET_SYSRESETREQ; + LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead."); + LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'."); + } + + LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ) + ? "SYSRESETREQ" : "VECTRESET"); + + if (reset_config == CORTEX_M_RESET_VECTRESET) { + LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event " + "handler to reset any peripherals or configure hardware srst support."); + } + + int retval3; + retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, + AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ) + ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET)); + if (retval3 != ERROR_OK) + LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset"); + + retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap); + if (retval3 != ERROR_OK) { + LOG_TARGET_ERROR(target, "DP initialisation failed"); + /* The error return value must not be propagated in this case. + * SYSRESETREQ or VECTRESET have been possibly triggered + * so reset processing should continue */ + } else { + /* I do not know why this is necessary, but it + * fixes strange effects (step/resume cause NMI + * after reset) on LM3S6918 -- Michael Schwingen + */ + uint32_t tmp; + mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp); + } + } + + target->state = TARGET_RESET; + jtag_sleep(50000); + + register_cache_invalidate(cortex_m->armv7m.arm.core_cache); + + /* now return stored error code if any */ + if (retval != ERROR_OK) + return retval; + + if (target->reset_halt && target_was_examined(target)) { + retval = target_halt(target); + if (retval != ERROR_OK) + return retval; + } + + return ERROR_OK; +} + +static int cortex_m_deassert_reset(struct target *target) +{ + struct armv7m_common *armv7m = &target_to_cm(target)->armv7m; + + LOG_TARGET_DEBUG(target, "target->state: %s,%s examined", + target_state_name(target), + target_was_examined(target) ? "" : " not"); + + /* deassert reset lines */ + adapter_deassert_reset(); + + enum reset_types jtag_reset_config = jtag_get_reset_config(); + + if ((jtag_reset_config & RESET_HAS_SRST) && + !(jtag_reset_config & RESET_SRST_NO_GATING) && + armv7m->debug_ap) { + + int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap); + if (retval != ERROR_OK) { + LOG_TARGET_ERROR(target, "DP initialisation failed"); + return retval; + } + } + + return ERROR_OK; +} + +static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint) +{ + if (target->debug_reason != DBG_REASON_WATCHPOINT) + return ERROR_FAIL; + + struct cortex_m_common *cortex_m = target_to_cm(target); + + for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) { + if (!wp->is_set) + continue; + + unsigned int dwt_num = wp->number; + struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num; + + uint32_t dwt_function; + int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function); + if (retval != ERROR_OK) + return ERROR_FAIL; + + /* check the MATCHED bit */ + if (dwt_function & BIT(24)) { + *hit_watchpoint = wp; + return ERROR_OK; + } + } + + return ERROR_FAIL; +} + +static int cortex_m_read_memory(struct target *target, target_addr_t address, + uint32_t size, uint32_t count, uint8_t *buffer) +{ + struct armv7m_common *armv7m = target_to_armv7m(target); + + if (armv7m->arm.arch == ARM_ARCH_V6M) { + /* armv6m does not handle unaligned memory access */ + if ((size == 4 && (address & 0x3u)) || (size == 2 && (address & 0x1u))) + return ERROR_TARGET_UNALIGNED_ACCESS; + } + + return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address); +} + +static int cortex_m_write_memory(struct target *target, target_addr_t address, + uint32_t size, uint32_t count, const uint8_t *buffer) +{ + struct armv7m_common *armv7m = target_to_armv7m(target); + + if (armv7m->arm.arch == ARM_ARCH_V6M) { + /* armv6m does not handle unaligned memory access */ + if ((size == 4 && (address & 0x3u)) || (size == 2 && (address & 0x1u))) + return ERROR_TARGET_UNALIGNED_ACCESS; + } + + return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address); +} + +static int cortex_m_init_target(struct command_context *cmd_ctx, + struct target *target) +{ + armv7m_build_reg_cache(target); + arm_semihosting_init(target); + return ERROR_OK; +} + +static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl) +{ + struct armv7m_common *armv7m = target_to_armv7m(target); + uint16_t dcrdr; + uint8_t buf[2]; + int retval; + + retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR); + if (retval != ERROR_OK) + return retval; + + dcrdr = target_buffer_get_u16(target, buf); + *ctrl = (uint8_t)dcrdr; + *value = (uint8_t)(dcrdr >> 8); + + LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl); + + /* write ack back to software dcc register + * signify we have read data */ + if (dcrdr & (1 << 0)) { + target_buffer_set_u16(target, buf, 0); + retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR); + if (retval != ERROR_OK) + return retval; + } + + return ERROR_OK; +} + +static int cortex_m_target_request_data(struct target *target, + uint32_t size, uint8_t *buffer) +{ + uint8_t data; + uint8_t ctrl; + uint32_t i; + + for (i = 0; i < (size * 4); i++) { + int retval = cortex_m_dcc_read(target, &data, &ctrl); + if (retval != ERROR_OK) + return retval; + buffer[i] = data; + } + + return ERROR_OK; +} + +static int cortex_m_handle_target_request(void *priv) +{ + struct target *target = priv; + if (!target_was_examined(target)) + return ERROR_OK; + + if (!target->dbg_msg_enabled) + return ERROR_OK; + + if (target->state == TARGET_RUNNING) { + uint8_t data; + uint8_t ctrl; + int retval; + + retval = cortex_m_dcc_read(target, &data, &ctrl); + if (retval != ERROR_OK) + return retval; + + /* check if we have data */ + if (ctrl & (1 << 0)) { + uint32_t request; + + /* we assume target is quick enough */ + request = data; + for (int i = 1; i <= 3; i++) { + retval = cortex_m_dcc_read(target, &data, &ctrl); + if (retval != ERROR_OK) + return retval; + request |= ((uint32_t)data << (i * 8)); + } + target_request(target, request); + } + } + + return ERROR_OK; +} + +static int cortex_m_init_arch_info(struct target *target, + struct cortex_m_common *cortex_m, struct adiv5_dap *dap) +{ + struct armv7m_common *armv7m = &cortex_m->armv7m; + + armv7m_init_arch_info(target, armv7m); + + /* default reset mode is to use srst if fitted + * if not it will use CORTEX_M3_RESET_VECTRESET */ + cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET; + + armv7m->arm.dap = dap; + + /* register arch-specific functions */ + armv7m->examine_debug_reason = cortex_m_examine_debug_reason; + + armv7m->post_debug_entry = NULL; + + armv7m->pre_restore_context = NULL; + + armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32; + armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32; + + target_register_timer_callback(cortex_m_handle_target_request, 1, + TARGET_TIMER_TYPE_PERIODIC, target); + + return ERROR_OK; +} + +static int cortex_m_target_create(struct target *target, Jim_Interp *interp) +{ + struct adiv5_private_config *pc; + + pc = (struct adiv5_private_config *)target->private_config; + if (adiv5_verify_config(pc) != ERROR_OK) + return ERROR_FAIL; + + struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common)); + if (!cortex_m) { + LOG_TARGET_ERROR(target, "No memory creating target"); + return ERROR_FAIL; + } + + cortex_m->common_magic = CORTEX_M_COMMON_MAGIC; + cortex_m->apsel = pc->ap_num; + + cortex_m_init_arch_info(target, cortex_m, pc->dap); + + return ERROR_OK; +} + +/*--------------------------------------------------------------------------*/ + +static int cortex_m_verify_pointer(struct command_invocation *cmd, + struct cortex_m_common *cm) +{ + if (!is_cortex_m_with_dap_access(cm)) { + command_print(cmd, "target is not a Cortex-M"); + return ERROR_TARGET_INVALID; + } + return ERROR_OK; +} + +/* + * Only stuff below this line should need to verify that its target + * is a Cortex-M3. Everything else should have indirected through the + * cortexm3_target structure, which is only used with CM3 targets. + */ + +COMMAND_HANDLER(handle_cortex_m_vector_catch_command) +{ + struct target *target = get_current_target(CMD_CTX); + struct cortex_m_common *cortex_m = target_to_cm(target); + struct armv7m_common *armv7m = &cortex_m->armv7m; + uint32_t demcr = 0; + int retval; + + static const struct { + char name[10]; + unsigned int mask; + } vec_ids[] = { + { "hard_err", VC_HARDERR, }, + { "int_err", VC_INTERR, }, + { "bus_err", VC_BUSERR, }, + { "state_err", VC_STATERR, }, + { "chk_err", VC_CHKERR, }, + { "nocp_err", VC_NOCPERR, }, + { "mm_err", VC_MMERR, }, + { "reset", VC_CORERESET, }, + }; + + retval = cortex_m_verify_pointer(CMD, cortex_m); + if (retval != ERROR_OK) + return retval; + + if (!target_was_examined(target)) { + LOG_TARGET_ERROR(target, "Target not examined yet"); + return ERROR_FAIL; + } + + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr); + if (retval != ERROR_OK) + return retval; + + if (CMD_ARGC > 0) { + unsigned int catch = 0; + + if (CMD_ARGC == 1) { + if (strcmp(CMD_ARGV[0], "all") == 0) { + catch = VC_HARDERR | VC_INTERR | VC_BUSERR + | VC_STATERR | VC_CHKERR | VC_NOCPERR + | VC_MMERR | VC_CORERESET; + goto write; + } else if (strcmp(CMD_ARGV[0], "none") == 0) { + goto write; + } + } + while (CMD_ARGC-- > 0) { + unsigned int i; + for (i = 0; i < ARRAY_SIZE(vec_ids); i++) { + if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0) + continue; + catch |= vec_ids[i].mask; + break; + } + if (i == ARRAY_SIZE(vec_ids)) { + LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]); + return ERROR_COMMAND_SYNTAX_ERROR; + } + } +write: + /* For now, armv7m->demcr only stores vector catch flags. */ + armv7m->demcr = catch; + + demcr &= ~0xffff; + demcr |= catch; + + /* write, but don't assume it stuck (why not??) */ + retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr); + if (retval != ERROR_OK) + return retval; + retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr); + if (retval != ERROR_OK) + return retval; + + /* FIXME be sure to clear DEMCR on clean server shutdown. + * Otherwise the vector catch hardware could fire when there's + * no debugger hooked up, causing much confusion... + */ + } + + for (unsigned int i = 0; i < ARRAY_SIZE(vec_ids); i++) { + command_print(CMD, "%9s: %s", vec_ids[i].name, + (demcr & vec_ids[i].mask) ? "catch" : "ignore"); + } + + return ERROR_OK; +} + +COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command) +{ + struct target *target = get_current_target(CMD_CTX); + struct cortex_m_common *cortex_m = target_to_cm(target); + int retval; + + static const struct jim_nvp nvp_maskisr_modes[] = { + { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO }, + { .name = "off", .value = CORTEX_M_ISRMASK_OFF }, + { .name = "on", .value = CORTEX_M_ISRMASK_ON }, + { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY }, + { .name = NULL, .value = -1 }, + }; + const struct jim_nvp *n; + + + retval = cortex_m_verify_pointer(CMD, cortex_m); + if (retval != ERROR_OK) + return retval; + + if (target->state != TARGET_HALTED) { + command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME); + return ERROR_OK; + } + + if (CMD_ARGC > 0) { + n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]); + if (!n->name) + return ERROR_COMMAND_SYNTAX_ERROR; + cortex_m->isrmasking_mode = n->value; + cortex_m_set_maskints_for_halt(target); + } + + n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode); + command_print(CMD, "cortex_m interrupt mask %s", n->name); + + return ERROR_OK; +} + +COMMAND_HANDLER(handle_cortex_m_reset_config_command) +{ + struct target *target = get_current_target(CMD_CTX); + struct cortex_m_common *cortex_m = target_to_cm(target); + int retval; + char *reset_config; + + retval = cortex_m_verify_pointer(CMD, cortex_m); + if (retval != ERROR_OK) + return retval; + + if (CMD_ARGC > 0) { + if (strcmp(*CMD_ARGV, "sysresetreq") == 0) { + cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ; + } else if (strcmp(*CMD_ARGV, "vectreset") == 0) { + if (target_was_examined(target) + && !cortex_m->vectreset_supported) + LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!"); + else + cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET; + + } else { + return ERROR_COMMAND_SYNTAX_ERROR; + } + } + + switch (cortex_m->soft_reset_config) { + case CORTEX_M_RESET_SYSRESETREQ: + reset_config = "sysresetreq"; + break; + + case CORTEX_M_RESET_VECTRESET: + reset_config = "vectreset"; + break; + + default: + reset_config = "unknown"; + break; + } + + command_print(CMD, "cortex_m reset_config %s", reset_config); + + return ERROR_OK; +} + +COMMAND_HANDLER(km1m7xx_handle_keycode_file_command) +{ + FILE *fp_keyfile; + char key_str[16]; + int key_count; + + if (CMD_ARGC != 1) + return ERROR_COMMAND_SYNTAX_ERROR; + + fp_keyfile = fopen(CMD_ARGV[0], "r"); + if (!fp_keyfile) + return ERROR_FAIL; + + key_count = 0; + while (fgets(key_str, 15, fp_keyfile)) + km1m7xx_key_data[key_count++] = strtoul(key_str, NULL, 16); + + fclose(fp_keyfile); + + km1m7xx_key_set = 1; + + return ERROR_OK; +} + +COMMAND_HANDLER(km1m7xx_handle_keycode_data_command) +{ + char key_str[16]; + int key_count; + + if (CMD_ARGC != 4) + return ERROR_COMMAND_SYNTAX_ERROR; + + for (key_count = 0; key_count < 4; key_count++) { + if (strncmp(CMD_ARGV[key_count], "0x", 2) != 0) + strcpy(key_str, "0x"); + else + key_str[0] = '\0'; + + strcat(key_str, CMD_ARGV[key_count]); + COMMAND_PARSE_NUMBER(u32, key_str, km1m7xx_key_data[key_count]); + } + + km1m7xx_key_set = 1; + return ERROR_OK; +} + +static const struct command_registration km1m7xx_subcommand_handlers[] = { + { + .name = "maskisr", + .handler = handle_cortex_m_mask_interrupts_command, + .mode = COMMAND_EXEC, + .help = "mask cortex_m interrupts", + .usage = "['auto'|'on'|'off'|'steponly']", + }, + { + .name = "vector_catch", + .handler = handle_cortex_m_vector_catch_command, + .mode = COMMAND_EXEC, + .help = "configure hardware vectors to trigger debug entry", + .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]", + }, + { + .name = "reset_config", + .handler = handle_cortex_m_reset_config_command, + .mode = COMMAND_ANY, + .help = "configure software reset handling", + .usage = "['sysresetreq'|'vectreset']", + }, + { + .chain = smp_command_handlers, + }, + { + .name = "keycode_file", + .handler = km1m7xx_handle_keycode_file_command, + .mode = COMMAND_CONFIG, + .usage = "filename", + .help = "Set keycode file for authentication", + }, + { + .name = "keycode_data", + .handler = km1m7xx_handle_keycode_data_command, + .mode = COMMAND_CONFIG, + .usage = "keycode0 keycode1 keycode2 keycode3", + .help = "Set 4 keycode data for authentication", + }, + COMMAND_REGISTRATION_DONE +}; + +static const struct command_registration km1m7xx_command_handlers[] = { + { + .chain = armv7m_command_handlers, + }, + { + .chain = armv7m_trace_command_handlers, + }, + { + .name = "km1m7xx", + .mode = COMMAND_ANY, + .help = "km1m7xx command group", + .usage = "", + .chain = km1m7xx_subcommand_handlers, + }, + { + .chain = rtt_target_command_handlers, + }, + COMMAND_REGISTRATION_DONE +}; + +struct target_type km1m7xx_target = { + .name = "km1m7xx", + + .poll = cortex_m_poll, + .arch_state = armv7m_arch_state, + + .target_request_data = cortex_m_target_request_data, + + .halt = cortex_m_halt, + .resume = cortex_m_resume, + .step = cortex_m_step, + + .assert_reset = km1m7xx_m_assert_reset, + .deassert_reset = cortex_m_deassert_reset, + .soft_reset_halt = cortex_m_soft_reset_halt, + + .get_gdb_arch = arm_get_gdb_arch, + .get_gdb_reg_list = armv7m_get_gdb_reg_list, + + .read_memory = cortex_m_read_memory, + .write_memory = cortex_m_write_memory, + .checksum_memory = armv7m_checksum_memory, + .blank_check_memory = armv7m_blank_check_memory, + + .run_algorithm = armv7m_run_algorithm, + .start_algorithm = armv7m_start_algorithm, + .wait_algorithm = armv7m_wait_algorithm, + + .add_breakpoint = cortex_m_add_breakpoint, + .remove_breakpoint = cortex_m_remove_breakpoint, + .add_watchpoint = cortex_m_add_watchpoint, + .remove_watchpoint = cortex_m_remove_watchpoint, + .hit_watchpoint = cortex_m_hit_watchpoint, + + .commands = km1m7xx_command_handlers, + .target_create = cortex_m_target_create, + .target_jim_configure = adiv5_jim_configure, + .init_target = cortex_m_init_target, + .examine = cortex_m_examine, + .deinit_target = cortex_m_deinit_target, + + .profiling = cortex_m_profiling, +}; diff --git a/src/target/target.c b/src/target/target.c index c55b67cc95..95767ea8b6 100644 --- a/src/target/target.c +++ b/src/target/target.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 by Dominic Rath * * dominic.r...@gmx.de * * * - * Copyright (C) 2007-2010 Øyvind Harboe * + * Copyright (C) 2007-2010 ?yvind Harboe * * oyvind.har...@zylin.com * * * * Copyright (C) 2008, Duane Ellis * @@ -104,6 +104,7 @@ extern struct target_type riscv_target; extern struct target_type mem_ap_target; extern struct target_type esirisc_target; extern struct target_type arcv2_target; +extern struct target_type km1m7xx_target; static struct target_type *target_types[] = { &arm7tdmi_target, @@ -144,6 +145,7 @@ static struct target_type *target_types[] = { &aarch64_target, &armv8r_target, &mips_mips64_target, + &km1m7xx_target, NULL, }; diff --git a/tcl/target/numicroKM1M7AF.cfg b/tcl/target/numicroKM1M7AF.cfg new file mode 100644 index 0000000000..a5827022e5 --- /dev/null +++ b/tcl/target/numicroKM1M7AF.cfg @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +# Adapt based on what transport is active. +if [catch {transport select}] { + echo "Error: unable to select a session transport. Can't continue." + shutdown +} + +proc swj_newdap {chip tag args} { + if [using_hla] { + eval hla newtap $chip $tag $args + } elseif [using_jtag] { + eval jtag newtap $chip $tag $args + } elseif [using_swd] { + eval swd newdap $chip $tag $args + } +} + +# Set Chipname +if { [info exists CHIPNAME] } { + set _CHIPNAME $CHIPNAME +} else { + set _CHIPNAME KM1M7AF00N +} + +# SWD DP-ID Nuvoton NuMicro Cortex-M0 has SWD Transport only. +if { [info exists CPUDAPID] } { + set _CPUDAPID $CPUDAPID +} else { +# set _CPUDAPID 0x411fc271 + set _CPUDAPID 0x0bd11477 +} + +# Work-area is a space in RAM used for flash programming +# By default use 2kB +if { [info exists WORKAREASIZE] } { + set _WORKAREASIZE $WORKAREASIZE +} else { + set _WORKAREASIZE 0x2000 +} + + +# Debug Adapter Target Settings +swj_newdap $_CHIPNAME cpu -irlen 4 -expected-id $_CPUDAPID +dap create $_CHIPNAME.dap -chain-position $_CHIPNAME.cpu +set _TARGETNAME $_CHIPNAME.cpu +target create $_TARGETNAME km1m7xx -dap $_CHIPNAME.dap + + +$_TARGETNAME configure -work-area-phys 0x00000000 -work-area-size 0x2000 -work-area-backup 0 + +# flash bank <name> km1m7xx <base> <size(autodetect,set to 0)> 0 0 <target#> +#set _FLASHNAME $_CHIPNAME.flash +#flash bank $_FLASHNAME numicro 0 $_FLASHSIZE 0 0 $_TARGETNAME +# flash size will be probed + + +set _FLASHNAME $_CHIPNAME.flash_i +flash bank $_FLASHNAME km1m7xx 0x00800000 0 0 0 $_TARGETNAME 0 + +set _FLASHNAME $_CHIPNAME.flash_d +flash bank $_FLASHNAME km1m7xx 0x10800000 0 0 0 $_TARGETNAME 0 + +# set default SWCLK frequency +adapter speed 1000 + +# set default srst setting +reset_config srst_only srst_nogate srst_push_pull connect_assert_srst +gdb_breakpoint_override hard + +$_TARGETNAME configure -event gdb-detach { shutdown } diff --git a/tcl/target/numicroKM1M7CF.cfg b/tcl/target/numicroKM1M7CF.cfg new file mode 100644 index 0000000000..297f5f1b13 --- /dev/null +++ b/tcl/target/numicroKM1M7CF.cfg @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +# Adapt based on what transport is active. +if [catch {transport select}] { + echo "Error: unable to select a session transport. Can't continue." + shutdown +} + +proc swj_newdap {chip tag args} { + if [using_hla] { + eval hla newtap $chip $tag $args + } elseif [using_jtag] { + eval jtag newtap $chip $tag $args + } elseif [using_swd] { + eval swd newdap $chip $tag $args + } +} + +# Set Chipname +if { [info exists CHIPNAME] } { + set _CHIPNAME $CHIPNAME +} else { + set _CHIPNAME KM1M7CFxx +} + +# SWD DP-ID Nuvoton NuMicro Cortex-M0 has SWD Transport only. +if { [info exists CPUDAPID] } { + set _CPUDAPID $CPUDAPID +} else { +# set _CPUDAPID 0x411fc271 + set _CPUDAPID 0x0bd11477 +} + +# Work-area is a space in RAM used for flash programming +# By default use 2kB +if { [info exists WORKAREASIZE] } { + set _WORKAREASIZE $WORKAREASIZE +} else { + set _WORKAREASIZE 0x2000 +} + + +# Debug Adapter Target Settings +swj_newdap $_CHIPNAME cpu -irlen 4 -expected-id $_CPUDAPID +dap create $_CHIPNAME.dap -chain-position $_CHIPNAME.cpu +set _TARGETNAME $_CHIPNAME.cpu +target create $_TARGETNAME km1m7xx -dap $_CHIPNAME.dap + + +$_TARGETNAME configure -work-area-phys 0x00000000 -work-area-size 0x2000 -work-area-backup 0 + +# flash bank <name> km1m7xx <base> <size(autodetect,set to 0)> 0 0 <target#> +#set _FLASHNAME $_CHIPNAME.flash +#flash bank $_FLASHNAME numicro 0 $_FLASHSIZE 0 0 $_TARGETNAME +# flash size will be probed + + +set _FLASHNAME $_CHIPNAME.flash_i +flash bank $_FLASHNAME km1m7xx 0x00800000 0 0 0 $_TARGETNAME 0 + +set _FLASHNAME $_CHIPNAME.flash_d0 +flash bank $_FLASHNAME km1m7xx 0x00C04000 0 0 0 $_TARGETNAME 0 + +set _FLASHNAME $_CHIPNAME.flash_d1 +flash bank $_FLASHNAME km1m7xx 0x00E04000 0 0 0 $_TARGETNAME 0 + +# set default SWCLK frequency +adapter speed 1000 + +# set default srst setting +reset_config srst_only srst_nogate srst_push_pull connect_assert_srst +gdb_breakpoint_override hard + +$_TARGETNAME configure -event gdb-detach { shutdown } --