This is an automated email from Gerrit.

"Daniel Goehring <dgoeh...@os.amperecomputing.com>" just uploaded a new patch 
set to Gerrit, which you can find at https://review.openocd.org/c/openocd/+/7193

-- gerrit

commit 3b80e3f5b082e09c515d137c04a707a0b4fa99df
Author: Steve Clevenger <sccleven...@os.amperecomputing.com>
Date:   Mon Nov 29 21:53:56 2021 +0000

    OpenOCD CTI rework for tighter coupling of run/halt events.
    
    Change-Id: I440feeecefc59b653c453729c62689c28324ecc3
    Signed-off-by: Steve Clevenger <sccleven...@os.amperecomputing.com>

diff --git a/src/target/aarch64.c b/src/target/aarch64.c
index 8b0cf3b631..fce8c97e30 100644
--- a/src/target/aarch64.c
+++ b/src/target/aarch64.c
@@ -2,6 +2,7 @@
 
 /***************************************************************************
  *   Copyright (C) 2015 by David Ung                                       *
+ *   Copyright (C) 2019-2021, Ampere Computing LLC                         *
  *                                                                         *
  ***************************************************************************/
 
@@ -196,6 +197,7 @@ static int aarch64_mmu_modify(struct target *target, int 
enable)
 static int aarch64_init_debug_access(struct target *target)
 {
        struct armv8_common *armv8 = target_to_armv8(target);
+       struct aarch64_common *aarch64 = target_to_aarch64(target);
        int retval;
        uint32_t dummy;
 
@@ -217,7 +219,7 @@ static int aarch64_init_debug_access(struct target *target)
 
        /*
         * Static CTI configuration:
-        * Channel 0 -> trigger outputs HALT request to PE
+        * Channel 0 -> trigger outputs HALT request to PE (or all PEs)
         * Channel 1 -> trigger outputs Resume request to PE
         * Gate all channel trigger events from entering the CTM
         */
@@ -227,18 +229,27 @@ static int aarch64_init_debug_access(struct target 
*target)
        /* By default, gate all channel events to and from the CTM */
        if (retval == ERROR_OK)
                retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
+       /* For CTI extended mode, set up synchronous halt using channel 0. Each
+        * CTI propagates a channel 0 cross-halt trigger event in extended mode.
+        */
+       if (aarch64->cti_mode == AARCH64_CTIMODE_EXTEND) {
+               /* each CTI generates a debug request trigger in response to 
channel event */
+               if (retval == ERROR_OK)
+                       retval = arm_cti_write_reg(armv8->cti, CTI_INEN0, 
CTI_CHNL(0));
+       } else {
+               if (retval == ERROR_OK)
+                       retval = arm_cti_write_reg(armv8->cti, CTI_INEN0, 0);
+       }
        /* output halt requests to PE on channel 0 event */
        if (retval == ERROR_OK)
                retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
        /* output restart requests to PE on channel 1 event */
        if (retval == ERROR_OK)
                retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
-       if (retval != ERROR_OK)
-               return retval;
 
        /* Resync breakpoint registers */
 
-       return ERROR_OK;
+       return retval;
 }
 
 /* Write to memory mapped registers directly with no cache or mmu handling */
@@ -375,7 +386,7 @@ static int aarch64_halt_one(struct target *target, enum 
halt_mode mode)
        if (retval != ERROR_OK)
                return retval;
 
-       /* trigger an event on channel 0, this outputs a halt request to the PE 
*/
+       /* trigger an event on channel 0, this outputs a halt request */
        retval = arm_cti_pulse_channel(armv8->cti, 0);
        if (retval != ERROR_OK)
                return retval;
@@ -457,13 +468,16 @@ static int aarch64_halt_smp(struct target *target, bool 
exc_target)
 
 static int update_halt_gdb(struct target *target, enum target_debug_reason 
debug_reason)
 {
+       struct aarch64_common *aarch64 = target_to_aarch64(target);
        struct target *gdb_target = NULL;
        struct target_list *head;
        struct target *curr;
 
-       if (debug_reason == DBG_REASON_NOTHALTED) {
-               LOG_DEBUG("Halting remaining targets in SMP group");
-               aarch64_halt_smp(target, true);
+       if (aarch64->cti_mode == AARCH64_CTIMODE_LEGACY) {
+               if (debug_reason == DBG_REASON_NOTHALTED) {
+                       LOG_DEBUG("Halting remaining targets in SMP group");
+                       aarch64_halt_smp(target, true);
+               }
        }
 
        /* poll all targets in the group, but skip the target that serves GDB */
@@ -515,6 +529,7 @@ static int aarch64_poll(struct target *target)
        if (halted) {
                prev_target_state = target->state;
                if (prev_target_state != TARGET_HALTED) {
+                       struct aarch64_common *aarch64 = 
target_to_aarch64(target);
                        enum target_debug_reason debug_reason = 
target->debug_reason;
 
                        /* We have a halting debug event */
@@ -530,6 +545,17 @@ static int aarch64_poll(struct target *target)
                        if (arm_semihosting(target, &retval) != 0)
                                return retval;
 
+                       /* The verbose_halt_msg setting is tied to LOG_LVL 
here. OpenOCD
+                        * attempts to suppress (what it considers) redundant 
messages
+                        * related to GDB run/halt events. The unfortunate end 
result is
+                        * misleading log messages.
+                        */
+                       bool verbose_halt_msg = target->verbose_halt_msg;
+
+                       if ((aarch64->cti_mode == AARCH64_CTIMODE_EXTEND) &&
+                               (debug_level >= LOG_LVL_INFO))
+                               target->verbose_halt_msg = true;
+
                        switch (prev_target_state) {
                        case TARGET_RUNNING:
                        case TARGET_UNKNOWN:
@@ -542,6 +568,9 @@ static int aarch64_poll(struct target *target)
                        default:
                                break;
                        }
+                       if ((aarch64->cti_mode == AARCH64_CTIMODE_EXTEND) &&
+                               (debug_level >= LOG_LVL_INFO))
+                               target->verbose_halt_msg = verbose_halt_msg;
                }
        } else
                target->state = TARGET_RUNNING;
@@ -622,6 +651,7 @@ static int aarch64_restore_one(struct target *target, int 
current,
  */
 static int aarch64_prepare_restart_one(struct target *target)
 {
+       struct aarch64_common *aarch64 = target_to_aarch64(target);
        struct armv8_common *armv8 = target_to_armv8(target);
        int retval;
        uint32_t dscr;
@@ -648,8 +678,14 @@ static int aarch64_prepare_restart_one(struct target 
*target)
         */
        if (retval == ERROR_OK)
                retval = arm_cti_ungate_channel(armv8->cti, 1);
-       if (retval == ERROR_OK)
-               retval = arm_cti_gate_channel(armv8->cti, 0);
+       if (aarch64->cti_mode == AARCH64_CTIMODE_EXTEND) {
+               /* Open the CTI gate for channel 0 to propagate halt events to 
all PEs. */
+               if (retval == ERROR_OK)
+                       retval = arm_cti_ungate_channel(armv8->cti, 0);
+       } else {
+               if (retval == ERROR_OK)
+                       retval = arm_cti_gate_channel(armv8->cti, 0);
+       }
 
        /* make sure that DSCR.HDE is set */
        if (retval == ERROR_OK) {
@@ -766,6 +802,7 @@ static int aarch64_prep_restart_smp(struct target *target, 
int handle_breakpoint
 static int aarch64_step_restart_smp(struct target *target)
 {
        int retval = ERROR_OK;
+       struct aarch64_common *aarch64 = target_to_aarch64(target);
        struct target_list *head;
        struct target *first = NULL;
 
@@ -774,12 +811,17 @@ static int aarch64_step_restart_smp(struct target *target)
        retval = aarch64_prep_restart_smp(target, 0, &first);
        if (retval != ERROR_OK)
                return retval;
-
-       if (first)
-               retval = aarch64_do_restart_one(first, RESTART_LAZY);
-       if (retval != ERROR_OK) {
-               LOG_DEBUG("error restarting target %s", target_name(first));
-               return retval;
+       if (aarch64->cti_mode == AARCH64_CTIMODE_LEGACY) {
+               /*
+                * CTI legacy mode restarts SMP cores here, but it's faked
+                * in extended mode.
+                */
+               if (first)
+                       retval = aarch64_do_restart_one(first, RESTART_LAZY);
+               if (retval != ERROR_OK) {
+                       LOG_DEBUG("error restarting target %s", 
target_name(first));
+                       return retval;
+               }
        }
 
        int64_t then = timeval_ms();
@@ -799,13 +841,16 @@ static int aarch64_step_restart_smp(struct target *target)
                        if (!target_was_examined(curr))
                                continue;
 
-                       retval = aarch64_check_state_one(curr,
-                                       PRSR_SDR, PRSR_SDR, &resumed, &prsr);
-                       if (retval != ERROR_OK || (!resumed && (prsr & 
PRSR_HALT))) {
-                               all_resumed = false;
-                               break;
+                       if (aarch64->cti_mode == AARCH64_CTIMODE_LEGACY) {
+                               retval = aarch64_check_state_one(curr,
+                                               PRSR_SDR, PRSR_SDR, &resumed, 
&prsr);
+                               if (retval != ERROR_OK || (!resumed && (prsr & 
PRSR_HALT))) {
+                                       all_resumed = false;
+                                       break;
+                               }
                        }
 
+                       /* Only fake a restart for CTI extended mode */
                        if (curr->state != TARGET_RUNNING) {
                                curr->state = TARGET_RUNNING;
                                curr->debug_reason = DBG_REASON_NOTHALTED;
@@ -828,10 +873,11 @@ static int aarch64_step_restart_smp(struct target *target)
                 * cluster explicitly. So if we find that a core has not halted
                 * yet, we trigger an explicit resume for the second cluster.
                 */
-               retval = aarch64_do_restart_one(curr, RESTART_LAZY);
+               if (aarch64->cti_mode == AARCH64_CTIMODE_LEGACY)
+                       retval = aarch64_do_restart_one(curr, RESTART_LAZY);
                if (retval != ERROR_OK)
                        break;
-}
+       }
 
        return retval;
 }
@@ -842,6 +888,8 @@ static int aarch64_resume(struct target *target, int 
current,
        int retval = 0;
        uint64_t addr = address;
 
+       LOG_DEBUG("%s", target_name(target));
+
        struct armv8_common *armv8 = target_to_armv8(target);
        armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
 
@@ -953,6 +1001,11 @@ static int aarch64_debug_entry(struct target *target)
        if (retval == ERROR_OK)
                retval = mem_ap_read_atomic_u32(armv8->debug_ap,
                                armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
+
+       /* close the CTI gate for all events */
+       if (retval == ERROR_OK)
+               retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
+
        if (retval == ERROR_OK)
                retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
 
@@ -966,9 +1019,6 @@ static int aarch64_debug_entry(struct target *target)
        armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
        armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
 
-       /* close the CTI gate for all events */
-       if (retval == ERROR_OK)
-               retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
        /* discard async exceptions */
        if (retval == ERROR_OK)
                retval = dpm->instr_cpsr_sync(dpm);
@@ -1088,6 +1138,8 @@ static int aarch64_step(struct target *target, int 
current, target_addr_t addres
        int retval;
        uint32_t edecr;
 
+       LOG_DEBUG("%s", target_name(target));
+
        armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
 
        if (target->state != TARGET_HALTED) {
@@ -1114,11 +1166,26 @@ static int aarch64_step(struct target *target, int 
current, target_addr_t addres
 
        if (target->smp && (current == 1)) {
                /*
-                * isolate current target so that it doesn't get resumed
-                * together with the others
+                * Isolate current target so that it doesn't get resumed
+                * with the other SMP cores.
                 */
                retval = arm_cti_gate_channel(armv8->cti, 1);
-               /* resume all other targets in the group */
+               if (aarch64->cti_mode == AARCH64_CTIMODE_EXTEND) {
+                       if (retval == ERROR_OK)
+                               retval = arm_cti_gate_channel(armv8->cti, 0);
+                       if (retval == ERROR_OK)
+                               retval = arm_cti_ack_events(armv8->cti, 
CTI_TRIG(HALT));
+                       if (retval != ERROR_OK) {
+                               LOG_ERROR("Failed to gate stepping target CTI");
+                               return retval;
+                       }
+               }
+               /* For CTI legacy mode, aarch64_step_restart_smp restarts
+                * the other SMP cores prior to stepping the current core.
+                * CTI extended mode only fakes like this happened. To
+                * ensure tighter SMP coupling, the non-stepped SMP cores
+                * are CTI signaled to restart when the current core steps.
+                */
                if (retval == ERROR_OK)
                        retval = aarch64_step_restart_smp(target);
                if (retval != ERROR_OK) {
@@ -1224,6 +1291,8 @@ static int aarch64_set_breakpoint(struct target *target,
        struct armv8_common *armv8 = &aarch64->armv8_common;
        struct aarch64_brp *brp_list = aarch64->brp_list;
 
+       LOG_DEBUG("%s", target_name(target));
+
        if (breakpoint->is_set) {
                LOG_WARNING("breakpoint already set");
                return ERROR_OK;
@@ -1478,6 +1547,8 @@ static int aarch64_unset_breakpoint(struct target 
*target, struct breakpoint *br
        struct armv8_common *armv8 = &aarch64->armv8_common;
        struct aarch64_brp *brp_list = aarch64->brp_list;
 
+       LOG_DEBUG("%s", target_name(target));
+
        if (!breakpoint->is_set) {
                LOG_WARNING("breakpoint not set");
                return ERROR_OK;
@@ -2714,6 +2785,8 @@ static int aarch64_init_arch_info(struct target *target,
 
        /* Setup struct aarch64_common */
        aarch64->common_magic = AARCH64_COMMON_MAGIC;
+       aarch64->cti_mode = AARCH64_CTIMODE_LEGACY;     /* default to legacy */
+       aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
        armv8->arm.dap = dap;
 
        /* register arch-specific functions */
@@ -2955,6 +3028,50 @@ COMMAND_HANDLER(aarch64_mask_interrupts_command)
        return ERROR_OK;
 }
 
+COMMAND_HANDLER(aarch64_cti_mode)
+{
+       struct target *target = get_current_target(CMD_CTX);
+       struct target_list *head;
+       struct aarch64_common *aarch64 = target_to_aarch64(target);
+       struct armv8_common *armv8 = NULL;
+
+       static const struct jim_nvp nvp_cti_mode[] = {
+               { .name = "legacy", .value = AARCH64_CTIMODE_LEGACY },
+               { .name = "extend", .value = AARCH64_CTIMODE_EXTEND },
+               { .name = NULL, .value = -1 },
+       };
+       const struct jim_nvp *n;
+
+       if (CMD_ARGC > 0) {
+               int retval = ERROR_OK;
+               n = jim_nvp_name2value_simple(nvp_cti_mode, CMD_ARGV[0]);
+               if (n->name == NULL) {
+                       LOG_ERROR("Unknown parameter: %s - should be legacy or 
extend", CMD_ARGV[0]);
+                       return ERROR_COMMAND_SYNTAX_ERROR;
+               }
+               foreach_smp_target(head, target->smp_targets) {
+                       aarch64 = target_to_aarch64(head->target);
+                       armv8 = target_to_armv8(head->target);
+
+                       aarch64->cti_mode = n->value;
+                       retval = arm_cti_enable(armv8->cti, false);
+
+                       if (retval == ERROR_OK)
+                               retval = 
aarch64_init_debug_access(head->target);
+
+                       if (retval != ERROR_OK) {
+                               LOG_ERROR("%s: debug access failure", __func__);
+                               return retval;
+                       }
+               }
+       }
+
+       n = jim_nvp_value2name_simple(nvp_cti_mode, aarch64->cti_mode);
+       command_print(CMD, "aarch64 cti mode %s", n->name);
+
+       return ERROR_OK;
+}
+
 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
 {
        struct command *c = jim_to_command(interp);
@@ -3101,6 +3218,13 @@ static const struct command_registration 
aarch64_exec_command_handlers[] = {
                .help = "display information about target caches",
                .usage = "",
        },
+       {
+               .name = "cti",
+               .handler = aarch64_cti_mode,
+               .mode = COMMAND_ANY,
+               .help = "tighter coupling of smp core run/halt events",
+               .usage = "['legacy'|'extend']",
+       },
        {
                .name = "dbginit",
                .handler = aarch64_handle_dbginit_command,
diff --git a/src/target/aarch64.h b/src/target/aarch64.h
index 2721fe747d..61c32d852e 100644
--- a/src/target/aarch64.h
+++ b/src/target/aarch64.h
@@ -29,6 +29,11 @@ enum aarch64_isrmasking_mode {
        AARCH64_ISRMASK_ON,
 };
 
+enum aarch64_cti_mode {
+       AARCH64_CTIMODE_LEGACY,
+       AARCH64_CTIMODE_EXTEND
+};
+
 struct aarch64_brp {
        int used;
        int type;
@@ -58,6 +63,8 @@ struct aarch64_common {
        struct aarch64_brp *wp_list;
 
        enum aarch64_isrmasking_mode isrmasking_mode;
+
+       enum aarch64_cti_mode cti_mode;
 };
 
 static inline struct aarch64_common *

-- 

Reply via email to