Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

target/riscv: read abstract args using batch #1033

Merged
merged 1 commit into from
May 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 62 additions & 23 deletions src/target/riscv/batch.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
/* Reserve extra room in the batch (needed for the last NOP operation) */
#define BATCH_RESERVED_SCANS 1

struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle)
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans)
{
scans += BATCH_RESERVED_SCANS;
struct riscv_batch *out = calloc(1, sizeof(*out));
Expand All @@ -27,8 +27,9 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_

out->target = target;
out->allocated_scans = scans;
out->idle_count = idle;
out->last_scan = RISCV_SCAN_TYPE_INVALID;
out->was_run = false;
out->used_idle_count = 0;

out->data_out = NULL;
out->data_in = NULL;
Expand Down Expand Up @@ -89,26 +90,53 @@ bool riscv_batch_full(struct riscv_batch *batch)
return riscv_batch_available_scans(batch) == 0;
}

int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
size_t reset_delays_after)
static bool riscv_batch_was_scan_busy(const struct riscv_batch *batch,
size_t scan_idx)
{
if (batch->used_scans == 0) {
LOG_TARGET_DEBUG(batch->target, "Ignoring empty batch.");
return ERROR_OK;
}
assert(batch->was_run);
assert(scan_idx < batch->used_scans);
const struct scan_field *field = batch->fields + scan_idx;
assert(field->in_value);
const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
}

static void add_idle_if_increased(struct riscv_batch *batch, size_t new_idle_count)
{
if (!batch->was_run)
return;
if (batch->used_idle_count <= new_idle_count)
return;
const size_t idle_change = new_idle_count - batch->used_idle_count;
LOG_TARGET_DEBUG(batch->target,
"Idle count increased. Adding %zu idle cycles before the batch.",
idle_change);
jtag_add_runtest(idle_change, TAP_IDLE);
}

int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
size_t idle_count, bool resets_delays, size_t reset_delays_after)
{
assert(batch->used_scans);
assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
assert(!batch->was_run || riscv_batch_was_scan_busy(batch, start_idx));
assert(start_idx == 0 || !riscv_batch_was_scan_busy(batch, start_idx - 1));

add_idle_if_increased(batch, idle_count);

riscv_batch_add_nop(batch);
LOG_TARGET_DEBUG(batch->target, "Running batch of scans [%zu, %zu)",
start_idx, batch->used_scans);

for (size_t i = 0; i < batch->used_scans; ++i) {
for (size_t i = start_idx; i < batch->used_scans; ++i) {
if (bscan_tunnel_ir_width != 0)
riscv_add_bscan_tunneled_scan(batch->target, batch->fields + i, batch->bscan_ctxt + i);
else
jtag_add_dr_scan(batch->target->tap, 1, batch->fields + i, TAP_IDLE);

const bool delays_were_reset = resets_delays
&& (i >= reset_delays_after);
if (batch->idle_count > 0 && !delays_were_reset)
jtag_add_runtest(batch->idle_count, TAP_IDLE);
if (idle_count > 0 && !delays_were_reset)
jtag_add_runtest(idle_count, TAP_IDLE);
}

keep_alive();
Expand All @@ -122,16 +150,18 @@ int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,

if (bscan_tunnel_ir_width != 0) {
/* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
for (size_t i = 0; i < batch->used_scans; ++i) {
for (size_t i = start_idx; i < batch->used_scans; ++i) {
if ((batch->fields + i)->in_value)
buffer_shr((batch->fields + i)->in_value, DMI_SCAN_BUF_SIZE, 1);
}
}

for (size_t i = 0; i < batch->used_scans; ++i)
riscv_decode_dmi_scan(batch->target, batch->idle_count, batch->fields + i,
for (size_t i = start_idx; i < batch->used_scans; ++i)
riscv_log_dmi_scan(batch->target, idle_count, batch->fields + i,
/*discard_in*/ false);

batch->was_run = true;
batch->used_idle_count = idle_count;
return ERROR_OK;
}

Expand Down Expand Up @@ -208,14 +238,23 @@ size_t riscv_batch_available_scans(struct riscv_batch *batch)
return batch->allocated_scans - batch->used_scans - BATCH_RESERVED_SCANS;
}

bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch)
bool riscv_batch_was_batch_busy(const struct riscv_batch *batch)
{
if (batch->used_scans == 0)
/* Empty batch */
return false;

assert(batch->was_run);
assert(batch->used_scans);
assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
const struct scan_field *field = batch->fields + batch->used_scans - 1;
const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
return riscv_batch_was_scan_busy(batch, batch->used_scans - 1);
}

size_t riscv_batch_finished_scans(const struct riscv_batch *batch)
{
if (!riscv_batch_was_batch_busy(batch)) {
/* Whole batch succeeded. */
return batch->used_scans;
}
assert(batch->used_scans);
size_t first_busy = 0;
while (!riscv_batch_was_scan_busy(batch, first_busy))
++first_busy;
return first_busy;
}
38 changes: 26 additions & 12 deletions src/target/riscv/batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ struct riscv_batch {
size_t allocated_scans;
size_t used_scans;

size_t idle_count;

uint8_t *data_out;
uint8_t *data_in;
struct scan_field *fields;
Expand All @@ -44,26 +42,42 @@ struct riscv_batch {
/* The read keys. */
size_t *read_keys;
size_t read_keys_used;

/* Flag indicating that the last run of the batch finished without an error
* from the underlying JTAG layer of OpenOCD - all scans were performed.
* However, RISC-V DMI "busy" condition could still have occurred.
*/
bool was_run;
/* Idle count used on the last run. Only valid after `was_run` is set. */
size_t used_idle_count;
};

/* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG
* scans that can be issued to this object, and idle is the number of JTAG idle
* cycles between every real scan. */
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle);
* scans that can be issued to this object. */
struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans);
void riscv_batch_free(struct riscv_batch *batch);

/* Checks to see if this batch is full. */
bool riscv_batch_full(struct riscv_batch *batch);

/* Executes this batch of JTAG DTM DMI scans.
/* Executes this batch of JTAG DTM DMI scans, starting form "start" scan.
*
* If batch is run for the first time, it is expected that "start" is zero.
* It is expected that the batch ends with a DMI NOP operation.
*
* If resets_delays is true, the algorithm will stop inserting idle cycles
* (JTAG Run-Test Idle) after "reset_delays_after" number of scans is
* "idle_count" is the number of JTAG Run-Test-Idle cycles to add in-between
* the scans.
*
* If "resets_delays" is true, the algorithm will stop inserting idle cycles
* (JTAG Run-Test-Idle) after "reset_delays_after" number of scans is
* performed. This is useful for stress-testing of RISC-V algorithms in
* OpenOCD that are based on batches.
*/
int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
size_t reset_delays_after);
int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
size_t idle_count, bool resets_delays, size_t reset_delays_after);

/* Get the number of scans successfully executed form this batch. */
size_t riscv_batch_finished_scans(const struct riscv_batch *batch);

/* Adds a DM register write to this batch. */
void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data,
Expand All @@ -83,12 +97,12 @@ void riscv_batch_add_nop(struct riscv_batch *batch);
size_t riscv_batch_available_scans(struct riscv_batch *batch);

/* Return true iff the last scan in the batch returned DMI_OP_BUSY. */
bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch);
bool riscv_batch_was_batch_busy(const struct riscv_batch *batch);

/* TODO: The function is defined in `riscv-013.c`. This is done to reduce the
* diff of the commit. The intention is to move the function definition to
* a separate module (e.g. `riscv013-jtag-dtm.c/h`) in another commit. */
void riscv_decode_dmi_scan(const struct target *target, int idle, const struct scan_field *field,
void riscv_log_dmi_scan(const struct target *target, int idle, const struct scan_field *field,
bool discard_in);

#endif
Loading