update kpu driver

fix/double
jiangxiangbing 2019-03-08 17:07:22 +08:00
parent 2dd43d46e5
commit 79da3b13fe
5 changed files with 688 additions and 262 deletions

View File

@ -21,9 +21,20 @@
#include <kpu.h>
#include <sysctl.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
using namespace sys;
#define KPU_DEBUG 1
#define NNCASE_DEBUG 0
#define USE_CACHED_AI_RAM 0
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
#define COMMON_ENTRY \
semaphore_lock locker(free_mutex_);
@ -33,21 +44,22 @@ public:
k_model_context(uint8_t *buffer)
{
uintptr_t base_addr = (uintptr_t)buffer;
kpu_model_header_t *header = (kpu_model_header_t *)buffer;
model_buffer_ = buffer;
layer_headers_ = (kpu_model_layer_header_t *)(base_addr + sizeof(kpu_model_header_t));
layers_length_ = header->layers_length;
body_start_ = (uint8_t *)(base_addr + sizeof(kpu_model_header_t) + 8 * header->layers_length);
storage_ = std::make_unique<uint8_t[]>(header->main_mem_usage);
main_buffer_ = { storage_.get(), ptrdiff_t(header->main_mem_usage) };
}
virtual void on_first_open() override
{
}
virtual void on_last_close() override
{
const kpu_model_header_t *header = (const kpu_model_header_t *)buffer;
if (header->version == 3 && header->arch == 0)
{
model_buffer_ = buffer;
output_count_ = header->output_count;
outputs_ = (const kpu_model_output_t *)(base_addr + sizeof(kpu_model_header_t));
layer_headers_ = (const kpu_model_layer_header_t *)((uintptr_t)outputs_ + sizeof(kpu_model_output_t) * output_count_);
layers_length_ = header->layers_length;
body_start_ = (const uint8_t *)((uintptr_t)layer_headers_ + sizeof(kpu_model_layer_header_t) * header->layers_length);
storage_ = std::make_unique<uint8_t[]>(header->main_mem_usage);
main_buffer_ = { storage_.get(), ptrdiff_t(header->main_mem_usage) };
}
else
{
throw std::runtime_error("Cannot load kmodel.");
}
}
void get(kpu_model_context_t *ctx)
@ -57,12 +69,16 @@ public:
ctx->main_buffer = main_buffer_.data();
ctx->layer_headers = layer_headers_;
ctx->layers_length = layers_length_;
ctx->output_count = output_count_;
ctx->outputs = outputs_;
}
private:
uint8_t *model_buffer_;
kpu_model_layer_header_t *layer_headers_;
uint8_t *body_start_;
const uint8_t *model_buffer_;
const kpu_model_layer_header_t *layer_headers_;
const uint8_t *body_start_;
uint32_t layers_length_;
uint32_t output_count_;
const kpu_model_output_t * outputs_;
gsl::span<uint8_t> main_buffer_;
std::unique_ptr<uint8_t[]> storage_;
};
@ -97,7 +113,7 @@ public:
return system_alloc_handle(make_accessor(make_object<k_model_context>(buffer)));
}
virtual int run(handle_t context, const uint8_t *src, uint8_t **output, size_t *output_size) override
virtual int run(handle_t context, const uint8_t *src) override
{
COMMON_ENTRY;
@ -126,20 +142,23 @@ public:
pic_set_irq_handler(IRQN_AI_INTERRUPT, kpu_isr_handle, this);
pic_set_irq_enable(IRQN_AI_INTERRUPT, 1);
kpu_model_layer_header_t *first_layer_header = ctx_.layer_headers;
const kpu_model_layer_header_t *first_layer_header = ctx_.layer_headers;
if (first_layer_header->type != KL_K210_CONV)
return -1;
kpu_model_conv_layer_argument_t *first_layer = (kpu_model_conv_layer_argument_t *)ctx_.body_start;
kpu_layer_argument_t *layer_arg = (kpu_layer_argument_t *)(ctx_.model_buffer + first_layer->layer_offset);
const kpu_model_conv_layer_argument_t *first_layer = (const kpu_model_conv_layer_argument_t *)ctx_.body_start;
kpu_layer_argument_t layer_arg = *(kpu_layer_argument_t *)(ctx_.model_buffer + first_layer->layer_offset);
if ((layer_arg->image_size.data.i_row_wid + 1) % 64 != 0)
#if KPU_DEBUG
gettimeofday(&last_time_, NULL);
#endif
if ((layer_arg.image_size.data.i_row_wid + 1) % 64 != 0)
{
kpu_input_with_padding(layer_arg, src);
kpu_input_with_padding(&layer_arg, src);
ai_step_not_isr();
}
else
{
kpu_input_dma(layer_arg, src);
kpu_input_dma(&layer_arg, src);
}
while (!done_flag_)
{
@ -156,8 +175,20 @@ public:
}
}
}
*output = output_address_;
*output_size = output_size_;
done_flag_ = 0;
return 0;
}
virtual int get_output(handle_t context, uint32_t index, uint8_t **data, size_t *size) override
{
auto model_context = system_handle_to_object(context).as<k_model_context>();
model_context->get(&ctx_);
if (index >= ctx_.output_count)
return -1;
const kpu_model_output_t *output = ctx_.outputs + index;
*data = ctx_.main_buffer + output->address;
*size = output->size;
return 0;
}
@ -184,153 +215,16 @@ private:
}
}
int ai_step()
void kpu_flush_cache(uint32_t addr, size_t lines)
{
uint32_t cnt_layer_id = ctx_.current_layer++;
uint8_t *layer_body = ctx_.current_body;
kpu_model_layer_header_t *cnt_layer_header = ctx_.layer_headers + cnt_layer_id;
ctx_.current_body += cnt_layer_header->body_size;
switch (cnt_layer_header->type)
size_t line;
for (line = 0; line < lines; line++)
{
case KL_GLOBAL_AVERAGE_POOL2D:
kpu_global_average_pool2d((kpu_model_gap2d_layer_argument_t *)layer_body);
break;
case KL_QUANTIZE:
kpu_quantize((kpu_model_quantize_layer_argument_t *)layer_body);
break;
case KL_DEQUANTIZE:
kpu_dequantize((kpu_model_dequantize_layer_argument_t *)layer_body);
break;
case KL_L2_NORMALIZATION:
kpu_l2_normalization((kpu_model_l2_norm_layer_argument_t *)layer_body);
break;
case KL_K210_CONV:
kpu_conv((kpu_model_conv_layer_argument_t *)layer_body);
return 0;
case KL_K210_ADD_PADDING:
kpu_add_padding((kpu_model_add_padding_layer_argument_t *)layer_body);
break;
case KL_K210_REMOVE_PADDING:
kpu_remove_padding((kpu_model_remove_padding_layer_argument_t *)layer_body);
break;
default:
configASSERT("Layer is not supported.");
}
if (cnt_layer_id != (ctx_.layers_length - 1))
{
return 1;
}
else
{
kpu_done();
return 0;
}
}
void kpu_input_with_padding(kpu_layer_argument_t *layer, const uint8_t *src)
{
size_t width = layer->image_size.data.i_row_wid + 1;
size_t height = layer->image_size.data.i_col_high + 1;
size_t channels = layer->image_channel_num.data.i_ch_num + 1;
uint8_t *dest = (uint8_t *)(uintptr_t)(AI_IO_BASE_ADDR + layer->image_addr.data.image_src_addr * 64);
size_t oc, y, x;
uint32_t row_padding;
uint32_t row_group;
uint32_t row_length;
if (width <= 16)
{
row_padding = 16;
row_group = 4;
row_length = 1;
}
else if (width <= 32)
{
row_padding = 32;
row_group = 2;
row_length = 1;
}
else
{
row_padding = 64;
row_group = 1;
row_length = (width + 63) / 64;
}
for (oc = 0; oc < channels; oc++)
{
uint8_t *channel_origin = dest + oc / row_group * row_length * height * 64 + oc % row_group * row_padding;
for (y = 0; y < height; y++)
{
uint8_t *y_origin = channel_origin + y * row_length * 64;
for (x = 0; x < width; x++)
y_origin[x] = *src++;
}
}
}
void ai_step_not_isr()
{
portENTER_CRITICAL();
ai_step();
vPortExitCritical();
}
void kpu_input_dma(kpu_layer_argument_t *layer, const uint8_t *src)
{
uint64_t input_size = layer->kernel_calc_type_cfg.data.channel_switch_addr * 64 * (layer->image_channel_num.data.i_ch_num + 1);
dma_set_request_source(dma_ch_, dma_req_);
dma_transmit_async(dma_ch_, src, (void *)(uintptr_t)((uint8_t *)AI_IO_BASE_ADDR + layer->image_addr.data.image_src_addr * 64), 1, 1, sizeof(uint64_t), input_size / 8, 16, completion_event_);
}
int kpu_done()
{
kpu_.interrupt_clear.data.calc_done_int = 1;
kpu_.interrupt_clear.data.layer_cfg_almost_empty_int = 1;
kpu_.interrupt_clear.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_clear.data.reserved = 0;
kpu_.interrupt_mask.data.calc_done_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_empty_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_mask.data.reserved = 0;
kpu_model_header_t *header = (kpu_model_header_t *)ctx_.model_buffer;
output_address_ = ctx_.main_buffer + header->output_address;
output_size_ = header->output_size;
done_flag_ = 1;
return 0;
}
void kpu_conv(kpu_model_conv_layer_argument_t *arg)
{
kpu_layer_argument_t *layer = (kpu_layer_argument_t *)(ctx_.model_buffer + arg->layer_offset);
layer->kernel_load_cfg.data.para_start_addr = (uintptr_t)(ctx_.model_buffer + arg->weights_offset);
layer->kernel_pool_type_cfg.data.bwsx_base_addr = (uintptr_t)(ctx_.model_buffer + arg->bn_offset);
layer->kernel_calc_type_cfg.data.active_addr = (uintptr_t)(ctx_.model_buffer + arg->act_offset);
if (arg->flags & KLF_MAIN_MEM_OUT)
{
uint8_t *dest = ctx_.main_buffer + arg->main_mem_out_address;
layer->dma_parameter.data.send_data_out = 1;
dma_set_request_source(dma_ch_, dma_req_);
dma_transmit_async(dma_ch_, (void *)(&kpu_.fifo_data_out), dest, 0, 1, sizeof(uint64_t), (layer->dma_parameter.data.dma_total_byte + 8) / 8, 8, completion_event_);
kpu_send_layer(layer);
}
else
{
kpu_send_layer(layer);
kpu_.interrupt_mask.data.calc_done_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_empty_int = 0;
kpu_.interrupt_mask.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_mask.data.reserved = 0;
const uint64_t *src = (const uint64_t *)(AI_RAM_BASE_ADDR + (addr + line) * 64);
uint64_t *dest = (uint64_t *)(AI_IO_BASE_ADDR + (addr + line) * 64);
size_t i;
for (i = 0; i < 8; i++)
dest[i] = src[i];
}
}
@ -350,7 +244,147 @@ private:
kpu_.layer_argument_fifo = layer->dma_parameter.reg;
}
void kpu_global_average_pool2d(kpu_model_gap2d_layer_argument_t *arg)
void kpu_upload_core(size_t width, size_t height, size_t channels, const uint8_t *src, uint32_t kpu_addr)
{
uint8_t *dest = (uint8_t *)AI_IO_BASE_ADDR + kpu_addr * 64;
size_t oc, y, x;
uint32_t row_padding;
uint32_t row_group;
uint32_t row_length;
if (width <= 16)
{
row_padding = 16;
row_group = 4;
row_length = 1;
}
else if (width <= 32)
{
row_padding = 32;
row_group = 2;
row_length = 1;
}
else
{
row_padding = 64;
row_group = 1;
row_length = (width + 63) / 64;
}
if ((uintptr_t)src % 8 == 0 && width % 8 == 0)
{
width /= 8;
const uint64_t *u64_src = (const uint64_t *)src;
for (oc = 0; oc < channels; oc++)
{
uint8_t *channel_origin = dest + oc / row_group * row_length * height * 64 + oc % row_group * row_padding;
for (y = 0; y < height; y++)
{
uint64_t *y_origin = (uint64_t *)channel_origin + y * row_length * 64L;
for (x = 0; x < width; x++)
{
y_origin[x] = *u64_src++;
#if NNCASE_DEBUG
assert(y_origin > AI_IO_BASE_ADDR && y_origin < (AI_IO_BASE_ADDR + 2 * 1024 * 1024));
#endif
}
}
}
}
else
{
for (oc = 0; oc < channels; oc++)
{
uint8_t *channel_origin = dest + oc / row_group * row_length * height * 64 + oc % row_group * row_padding;
for (y = 0; y < height; y++)
{
uint8_t *y_origin = channel_origin + y * row_length * 64;
for (x = 0; x < width; x++)
y_origin[x] = *src++;
}
}
}
}
void kpu_input_dma(const kpu_layer_argument_t *layer, const uint8_t *src)
{
uint64_t input_size = layer->kernel_calc_type_cfg.data.channel_switch_addr * 64 * (layer->image_channel_num.data.i_ch_num + 1);
dma_set_request_source(dma_ch_, dma_req_);
dma_transmit_async(dma_ch_, src, (void *)(uintptr_t)((uint8_t *)AI_IO_BASE_ADDR + layer->image_addr.data.image_src_addr * 64), 1, 1, sizeof(uint64_t), input_size / 8, 16, completion_event_);
}
void kpu_input_with_padding(const kpu_layer_argument_t *layer, const uint8_t *src)
{
size_t width = layer->image_size.data.i_row_wid + 1;
size_t height = layer->image_size.data.i_col_high + 1;
size_t channels = layer->image_channel_num.data.i_ch_num + 1;
kpu_upload_core(width, height, channels, src, layer->image_addr.data.image_src_addr);
}
void kpu_fully_connected(const float *src, const float *weights, const float *biases, float *dest, int input_channels, int output_channels)
{
int ic, oc;
for (oc = 0; oc < output_channels; oc++)
{
const float *c_weights = weights + oc * input_channels;
float sum = 0.0f;
for (ic = 0; ic < input_channels; ic++)
sum += src[ic] * c_weights[ic];
dest[oc] = sum + biases[oc];
}
}
void kpu_add(const kpu_model_add_layer_argument_t *arg)
{
const float *src_a = (const float *)(ctx_.main_buffer + arg->main_mem_in_a_address);
const float *src_b = (const float *)(ctx_.main_buffer + arg->main_mem_in_b_address);
float *dest = (float *)(ctx_.main_buffer + arg->main_mem_out_address);
size_t i, count = arg->count;
for (i = 0; i < count; i++)
dest[i] = src_a[i] + src_b[i];
}
void kpu_quantized_add(const kpu_model_quant_add_layer_argument_t *arg)
{
const uint8_t *src_a = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_a_address);
const uint8_t *src_b = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_b_address);
size_t count = arg->count;
int64_t off_a = arg->in_a_offset, mul_a = arg->in_a_mul, sh_a = arg->in_a_shift;
int64_t off_b = arg->in_b_offset, mul_b = arg->in_b_mul, sh_b = arg->in_b_shift;
int64_t off_o = arg->out_offset, mul_o = arg->out_mul, sh_o = arg->out_shift;
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->main_mem_out_address);
size_t i;
if (sh_a == sh_b)
{
for (i = 0; i < count; i++)
{
int64_t a = (*src_a++ + off_a) * mul_a;
int64_t b = (*src_b++ + off_b) * mul_b;
int64_t value = (((a + b) >> sh_a) * mul_o >> sh_o) + off_o;
if (value < 0) value = 0;
if (value > 0xFF) value = 0xFF;
*dest++ = (uint8_t)value;
}
}
else
{
for (i = 0; i < count; i++)
{
int64_t a = (*src_a++ + off_a) * mul_a >> sh_a;
int64_t b = (*src_b++ + off_b) * mul_b >> sh_b;
int64_t value = ((a + b) * mul_o >> sh_o) + off_o;
if (value < 0) value = 0;
if (value > 0xFF) value = 0xFF;
*dest++ = (uint8_t)value;
}
}
}
void kpu_global_average_pool2d(const kpu_model_gap2d_layer_argument_t *arg)
{
const float *src = (const float *)(ctx_.main_buffer + arg->main_mem_in_address);
float *dest = (float *)(ctx_.main_buffer + arg->main_mem_out_address);
@ -367,73 +401,68 @@ private:
}
}
void kpu_quantize(kpu_model_quantize_layer_argument_t *arg)
void kpu_quantized_max_pool2d(const kpu_model_quant_max_pool2d_layer_argument_t *arg)
{
size_t width = arg->width;
size_t height = arg->height;
size_t channels = arg->channels;
const float *src = (const float *)(ctx_.main_buffer + arg->main_mem_in_address);;
const kpu_model_quant_param_t q = arg->quant_param;
if (arg->flags & KLF_MAIN_MEM_OUT)
const uint8_t *src = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_address);
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->main_mem_out_address);
kpu_model_shape_t in_shape = arg->in_shape, out_shape = arg->out_shape;
uint32_t kernel_width = arg->kernel_width, kernel_height = arg->kernel_height;
uint32_t stride_width = arg->stride_width, stride_height = arg->stride_height;
uint32_t padding_width = arg->padding_width, padding_height = arg->padding_height;
uint32_t out_y, out_x, oc;
for (oc = 0; oc < out_shape.channels; oc++)
{
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->mem_out_address);
size_t i, count = width * height * channels;
for (i = 0; i < count; i++)
const uint8_t *channel_src = src + in_shape.width * in_shape.height * oc;
for (out_y = 0; out_y < out_shape.height; out_y++)
{
int value = (*src++ - q.bias) / q.scale;
if (value < 0) value = 0;
if (value > 0xFF) value = 0xFF;
*dest++ = (uint8_t)value;
}
}
else
{
uint8_t *dest = (uint8_t *)AI_IO_BASE_ADDR + arg->mem_out_address * 64;
size_t oc, y, x;
uint32_t row_padding;
uint32_t row_group;
uint32_t row_length;
if (width <= 16)
{
row_padding = 16;
row_group = 4;
row_length = 1;
}
else if (width <= 32)
{
row_padding = 32;
row_group = 2;
row_length = 1;
}
else
{
row_padding = 64;
row_group = 1;
row_length = (width + 63) / 64;
}
for (oc = 0; oc < channels; oc++)
{
uint8_t *channel_origin = dest + oc / row_group * row_length * height * 64 + oc % row_group * row_padding;
for (y = 0; y < height; y++)
for (out_x = 0; out_x < out_shape.width; out_x++)
{
uint8_t *y_origin = channel_origin + y * row_length * 64;
for (x = 0; x < width; x++)
int32_t in_x_origin = (int32_t)(out_x * stride_width) - padding_width;
int32_t in_y_origin = (int32_t)(out_y * stride_height) - padding_height;
int32_t kernel_x_start = max(0, -in_x_origin);
int32_t kernel_x_end = min(kernel_width, in_shape.width - in_x_origin);
int32_t kernel_y_start = max(0, -in_y_origin);
int32_t kernel_y_end = min(kernel_height, in_shape.height - in_y_origin);
uint8_t value = 0;
int32_t kernel_y, kernel_x;
for (kernel_y = kernel_y_start; kernel_y < kernel_y_end; kernel_y++)
{
int value = (*src++ - q.bias) / q.scale;
if (value < 0) value = 0;
if (value > 0xFF) value = 0xFF;
y_origin[x] = (uint8_t)value;
for (kernel_x = kernel_x_start; kernel_x < kernel_x_end; kernel_x++)
{
int32_t in_x = in_x_origin + kernel_x;
int32_t in_y = in_y_origin + kernel_y;
value = max(value, channel_src[in_y * in_shape.width + in_x]);
}
}
*dest++ = value;
}
}
}
}
void kpu_dequantize(kpu_model_dequantize_layer_argument_t *arg)
void kpu_quantize(const kpu_model_quantize_layer_argument_t *arg)
{
size_t count = arg->count;
const float *src = (const float *)(ctx_.main_buffer + arg->main_mem_in_address);;
const kpu_model_quant_param_t q = arg->quant_param;
float scale = 1.f / q.scale;
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->mem_out_address);
size_t i;
for (i = 0; i < count; i++)
{
int value = (*src++ - q.bias) * scale;
if (value < 0) value = 0;
if (value > 0xFF) value = 0xFF;
*dest++ = (uint8_t)value;
}
}
void kpu_dequantize(const kpu_model_dequantize_layer_argument_t *arg)
{
const uint8_t *src = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_address);
float *dest = (float *)(ctx_.main_buffer + arg->main_mem_out_address);
@ -444,7 +473,18 @@ private:
dest[oc] = *src++ * q.scale + q.bias;
}
void kpu_l2_normalization(kpu_model_l2_norm_layer_argument_t *arg)
void kpu_requantize(const kpu_model_requantize_layer_argument_t *arg)
{
const uint8_t *src = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_address);
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->main_mem_out_address);
size_t oc, count = arg->count;
const uint8_t *table = arg->table;
for (oc = 0; oc < count; oc++)
dest[oc] = table[*src++];
}
void kpu_l2_normalization(const kpu_model_l2_norm_layer_argument_t *arg)
{
const float *src = (const float *)(ctx_.main_buffer + arg->main_mem_in_address);
float *dest = (float *)(ctx_.main_buffer + arg->main_mem_out_address);
@ -461,10 +501,85 @@ private:
dest[oc] = src[oc] * sum;
}
void kpu_add_padding(kpu_model_add_padding_layer_argument_t *arg)
void kpu_softmax(const kpu_model_softmax_layer_argument_t *arg)
{
const float *src = (const float *)(ctx_.main_buffer + arg->main_mem_in_address);
float *dest = (float *)(ctx_.main_buffer + arg->main_mem_out_address);
size_t oc, channels = arg->channels;
float max = FLT_MIN;
for (oc = 0; oc < channels; oc++)
max = fmaxf(max, src[oc]);
float sum = 0.f;
for (oc = 0; oc < channels; oc++)
{
float value = expf(src[oc] - max);
sum += value;
dest[oc] = value;
}
for (oc = 0; oc < channels; oc++)
dest[oc] /= sum;
}
void kpu_concat(const kpu_model_concat_layer_argument_t *arg)
{
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->main_mem_out_address);
uint32_t count = arg->input_count, i;
for (i = 0; i < count; i++)
{
kpu_model_memory_range_t input = arg->inputs_mem[i];
const uint8_t *src = (const uint8_t *)(ctx_.main_buffer + input.start);
memcpy(dest, src, input.size);
dest += input.size;
}
}
void kpu_conv(const kpu_model_conv_layer_argument_t *arg)
{
volatile kpu_layer_argument_t layer = *(kpu_layer_argument_t *)(ctx_.model_buffer + arg->layer_offset);
layer.kernel_load_cfg.data.para_start_addr = (uintptr_t)(ctx_.model_buffer + arg->weights_offset);
layer.kernel_pool_type_cfg.data.bwsx_base_addr = (uintptr_t)(ctx_.model_buffer + arg->bn_offset);
layer.kernel_calc_type_cfg.data.active_addr = (uintptr_t)(ctx_.model_buffer + arg->act_offset);
if (arg->flags & KLF_MAIN_MEM_OUT)
{
uint8_t *dest = ctx_.main_buffer + arg->main_mem_out_address;
layer.dma_parameter.data.send_data_out = 1;
dma_set_request_source(dma_ch_, dma_req_);
dma_transmit_async(dma_ch_, (void *)(&kpu_.fifo_data_out), dest, 0, 1, sizeof(uint64_t), (layer.dma_parameter.data.dma_total_byte + 8) / 8, 8, completion_event_);
}
else
{
#if KPU_DEBUG
kpu_.interrupt_mask.data.calc_done_int = 0;
kpu_.interrupt_mask.data.layer_cfg_almost_empty_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_mask.data.reserved = 0;
layer.interrupt_enabe.data.int_en = 1;
#else
kpu_.interrupt_mask.data.calc_done_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_empty_int = 0;
kpu_.interrupt_mask.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_mask.data.reserved = 0;
#endif
}
kpu_send_layer((const kpu_layer_argument_t *)&layer);
}
void kpu_add_padding(const kpu_model_add_padding_layer_argument_t *arg)
{
const uint8_t *src = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_address);
uint8_t *dest = (uint8_t *)AI_IO_BASE_ADDR + arg->kpu_mem_out_address * 64L;
#if USE_CACHED_AI_RAM
uint8_t *dest = (uint8_t *)AI_RAM_BASE_ADDR + arg->kpu_mem_out_address * 64;
#else
uint8_t *dest = (uint8_t *)AI_IO_BASE_ADDR + arg->kpu_mem_out_address * 64;
#endif
uint32_t row_padding = 16;
uint32_t row_group = 4;
@ -482,9 +597,13 @@ private:
y_origin[x] = *src++;
}
}
#if USE_CACHED_AI_RAM
uint32_t lines = row_length * height * channels / row_group;
kpu_flush_cache(arg->kpu_mem_out_address, lines);
#endif
}
void kpu_remove_padding(kpu_model_remove_padding_layer_argument_t *arg)
void kpu_remove_padding(const kpu_model_remove_padding_layer_argument_t *arg)
{
const uint8_t *src = (const uint8_t *)(ctx_.main_buffer + arg->main_mem_in_address);
uint8_t *dest = (uint8_t *)(ctx_.main_buffer + arg->main_mem_out_address);
@ -494,6 +613,171 @@ private:
*dest++ = src[oc * 16];
}
void kpu_upload(const kpu_model_upload_layer_argument_t *arg)
{
size_t width = arg->width;
size_t height = arg->height;
size_t channels = arg->channels;
kpu_upload_core(width, height, channels, ctx_.main_buffer + arg->main_mem_in_address, arg->kpu_mem_out_address);
}
#if KPU_DEBUG
const char *str_layer_type(uint32_t type)
{
switch (type)
{
case KL_ADD:
return "Add";
case KL_QUANTIZED_ADD:
return "QuantAdd";
case KL_GLOBAL_AVERAGE_POOL2D:
return "GAP";
case KL_QUANTIZED_MAX_POOL2D:
return "QuantMaxPool2d";
case KL_QUANTIZE:
return "Quantize";
case KL_DEQUANTIZE:
return "Dequantize";
case KL_REQUANTIZE:
return "Requantize";
case KL_L2_NORMALIZATION:
return "L2Norm";
case KL_SOFTMAX:
return "Softmax";
case KL_CONCAT:
return "Concat";
case KL_QUANTIZED_CONCAT:
return "QuantConcat";
case KL_K210_CONV:
return "K210Conv";
case KL_K210_ADD_PADDING:
return "K210AddPad";
case KL_K210_REMOVE_PADDING:
return "K210RemovePad";
case KL_K210_UPLOAD:
return "K210Upload";
default:
return "Unknown";
}
}
#endif
int kpu_done()
{
kpu_.interrupt_clear.data.calc_done_int = 1;
kpu_.interrupt_clear.data.layer_cfg_almost_empty_int = 1;
kpu_.interrupt_clear.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_clear.data.reserved = 0;
kpu_.interrupt_mask.data.calc_done_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_empty_int = 1;
kpu_.interrupt_mask.data.layer_cfg_almost_full_int = 1;
kpu_.interrupt_mask.data.reserved = 0;
#if KPU_DEBUG
uint32_t cnt_layer_id = ctx_.current_layer - 1;
gettimeofday(&time_, NULL);
if (total_time_ != 0)
{
uint64_t layer_time = (time_.tv_sec -last_time_.tv_sec) * 1000*1000 + (time_.tv_usec - last_time_.tv_usec);
printf("layer %d [%s]: %f ms\n", cnt_layer_id, str_layer_type(last_layer_type_), layer_time / 1000.0);
total_time_ += layer_time;
}
printf("Model: %f ms\n", total_time_ / 1000.0);
#endif
done_flag_ = 1;
return 0;
}
int ai_step()
{
uint32_t cnt_layer_id = ctx_.current_layer++;
const uint8_t *layer_body = ctx_.current_body;
const kpu_model_layer_header_t *cnt_layer_header = ctx_.layer_headers + cnt_layer_id;
ctx_.current_body += cnt_layer_header->body_size;
#if KPU_DEBUG
uint64_t layer_time;
gettimeofday(&time_, NULL);
layer_time = (time_.tv_sec -last_time_.tv_sec) * 1000*1000 + (time_.tv_usec - last_time_.tv_usec);
if(total_time_ == 0)
printf("DMA INPUT: %f ms\n", layer_time / 1000.0);
else
printf("layer %d [%s]: %f ms\n", cnt_layer_id - 1, str_layer_type(last_layer_type_), layer_time / 1000.0);
total_time_ += layer_time;
last_layer_type_ = cnt_layer_header->type;
gettimeofday(&last_time_, NULL);
#endif
switch (cnt_layer_header->type)
{
case KL_ADD:
kpu_add((const kpu_model_add_layer_argument_t *)layer_body);
break;
case KL_QUANTIZED_ADD:
kpu_quantized_add((const kpu_model_quant_add_layer_argument_t *)layer_body);
break;
case KL_GLOBAL_AVERAGE_POOL2D:
kpu_global_average_pool2d((const kpu_model_gap2d_layer_argument_t *)layer_body);
break;
case KL_QUANTIZED_MAX_POOL2D:
kpu_quantized_max_pool2d((const kpu_model_quant_max_pool2d_layer_argument_t *)layer_body);
break;
case KL_QUANTIZE:
kpu_quantize((const kpu_model_quantize_layer_argument_t *)layer_body);
break;
case KL_DEQUANTIZE:
kpu_dequantize((const kpu_model_dequantize_layer_argument_t *)layer_body);
break;
case KL_REQUANTIZE:
kpu_requantize((const kpu_model_requantize_layer_argument_t *)layer_body);
break;
case KL_L2_NORMALIZATION:
kpu_l2_normalization((const kpu_model_l2_norm_layer_argument_t *)layer_body);
break;
case KL_SOFTMAX:
kpu_softmax((const kpu_model_softmax_layer_argument_t *)layer_body);
break;
case KL_CONCAT:
case KL_QUANTIZED_CONCAT:
kpu_concat((const kpu_model_concat_layer_argument_t *)layer_body);
break;
case KL_K210_CONV:
kpu_conv((const kpu_model_conv_layer_argument_t *)layer_body);
return 0;
case KL_K210_ADD_PADDING:
kpu_add_padding((const kpu_model_add_padding_layer_argument_t *)layer_body);
break;
case KL_K210_REMOVE_PADDING:
kpu_remove_padding((const kpu_model_remove_padding_layer_argument_t *)layer_body);
break;
case KL_K210_UPLOAD:
kpu_upload((const kpu_model_upload_layer_argument_t *)layer_body);
break;
default:
assert(!"Layer is not supported.");
}
if (cnt_layer_id != (ctx_.layers_length - 1))
{
return 1;
}
else
{
kpu_done();
return 0;
}
}
void ai_step_not_isr()
{
portENTER_CRITICAL();
ai_step();
vPortExitCritical();
}
private:
volatile kpu_config_t &kpu_;
sysctl_clock_t clock_;
@ -502,9 +786,13 @@ private:
uintptr_t dma_ch_;
SemaphoreHandle_t completion_event_;
uint8_t done_flag_ = 0;
uint8_t *output_address_;
size_t output_size_;
kpu_model_context_t ctx_;
#if KPU_DEBUG
struct timeval time_;
struct timeval last_time_;
uint64_t total_time_ = 0;
uint32_t last_layer_type_;
#endif
};
static k_kpu_driver dev0_driver(AI_BASE_ADDR, SYSCTL_CLOCK_AI, SYSCTL_DMA_SELECT_AI_RX_REQ);

View File

@ -853,18 +853,30 @@ void rtc_set_datetime(handle_t file, const struct tm *datetime);
handle_t kpu_model_load_from_buffer(uint8_t *buffer);
/**
* @brief KPU run and get output data.
* @brief KPU run.
*
* @param[in] context The kpu context handle
* @param[in] src The src data
* @param[in] output The address of the kpu output data address.
* @param[in] output_size The address of output data size
*
* @return result
* - 0 Success
* - other Fail
*/
int kpu_run(handle_t context, const uint8_t *src, uint8_t **output, size_t *output_size);
int kpu_run(handle_t context, const uint8_t *src);
/**
* @brief Get output data.
*
* @param[in] context The kpu context handle
* @param[in] index The output index.
* @param[out] data The address of the kpu output data address.
* @param[out] size The address of output data size
*
* @return result
* - 0 Success
* - other Fail
*/
int kpu_get_output(handle_t context, uint32_t index, uint8_t **data, size_t *size);
#ifdef __cplusplus
}

View File

@ -357,7 +357,8 @@ class kpu_driver : public driver
{
public:
virtual handle_t model_load_from_buffer(uint8_t *buffer) = 0;
virtual int run(handle_t context, const uint8_t *src, uint8_t **output, size_t *output_size) = 0;
virtual int run(handle_t context, const uint8_t *src) = 0;
virtual int get_output(handle_t context, uint32_t index, uint8_t **data, size_t *size) = 0;
};
class custom_driver : public driver

View File

@ -805,10 +805,16 @@ handle_t kpu_model_load_from_buffer(uint8_t *buffer)
return kpu->model_load_from_buffer(buffer);
}
int kpu_run(handle_t context, const uint8_t *src, uint8_t **output, size_t *output_size)
int kpu_run(handle_t context, const uint8_t *src)
{
COMMON_ENTRY_FILE(kpu_file_, kpu);
return kpu->run(context, src, output, output_size);
return kpu->run(context, src);
}
int kpu_get_output(handle_t context, uint32_t index, uint8_t **data, size_t *size)
{
COMMON_ENTRY_FILE(kpu_file_, kpu);
return kpu->get_output(context, index, data, size);
}
/* HAL */

View File

@ -22,9 +22,6 @@
extern "C" {
#endif
#define ALIGN_UP(x, align) ((x + (align - 1)) & (~(align - 1)))
#define KMODEL_HEADER_SIZE_V2(layers) (ALIGN_UP(sizeof(kpu_model_header_t) + sizeof(kpu_model_layer_metadata_v2_t) * layers, 8) + sizeof(kpu_layer_argument_t) * layers)
typedef struct
{
union
@ -329,23 +326,43 @@ typedef struct
{
uint32_t version;
uint32_t flags;
uint32_t arch;
uint32_t layers_length;
uint32_t max_start_address;
uint32_t main_mem_usage;
uint32_t output_address;
uint32_t output_size;
uint32_t output_count;
} kpu_model_header_t;
typedef struct
{
uint32_t address;
uint32_t size;
} kpu_model_output_t;
typedef enum
{
KL_GLOBAL_AVERAGE_POOL2D = 0,
KL_QUANTIZE = 1,
KL_DEQUANTIZE = 2,
KL_L2_NORMALIZATION = 3,
KL_K210_CONV = 4,
KL_K210_ADD_PADDING = 5,
KL_K210_REMOVE_PADDING = 6,
_KL_MAX_COUNT
KL_INVALID = 0,
KL_ADD,
KL_QUANTIZED_ADD,
KL_GLOBAL_MAX_POOL2D,
KL_QUANTIZED_GLOBAL_MAX_POOL2D,
KL_GLOBAL_AVERAGE_POOL2D,
KL_QUANTIZED_GLOBAL_AVERAGE_POOL2D,
KL_MAX_POOL2D,
KL_QUANTIZED_MAX_POOL2D,
KL_AVERAGE_POOL2D,
KL_QUANTIZED_AVERAGE_POOL2D,
KL_QUANTIZE,
KL_DEQUANTIZE,
KL_REQUANTIZE,
KL_L2_NORMALIZATION,
KL_SOFTMAX,
KL_CONCAT,
KL_QUANTIZED_CONCAT,
KL_K210_CONV = 10240,
KL_K210_ADD_PADDING,
KL_K210_REMOVE_PADDING,
KL_K210_UPLOAD
} kpu_model_layer_type_t;
typedef struct
@ -360,12 +377,38 @@ typedef enum
KLF_MAIN_MEM_OUT = 1
} kpu_model_layer_flags_t;
typedef enum
{
KLP_SAME = 0,
KLP_VALID = 1
} kpu_model_padding_t;
typedef enum
{
KLA_LINEAR = 0,
KLA_RELU = 1,
KLA_RELU6 = 2
} kpu_model_activation_t;
typedef struct
{
float scale;
float bias;
} kpu_model_quant_param_t;
typedef struct
{
uint32_t width;
uint32_t height;
uint32_t channels;
} kpu_model_shape_t;
typedef struct
{
uint32_t start;
uint32_t size;
} kpu_model_memory_range_t;
typedef struct
{
uint32_t flags;
@ -376,6 +419,33 @@ typedef struct
uint32_t act_offset;
} kpu_model_conv_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_a_address;
uint32_t main_mem_in_b_address;
uint32_t main_mem_out_address;
uint32_t count;
} kpu_model_add_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_a_address;
uint32_t main_mem_in_b_address;
uint32_t main_mem_out_address;
uint32_t count;
int32_t in_a_offset;
int32_t in_a_mul;
int32_t in_a_shift;
int32_t in_b_offset;
int32_t in_b_mul;
int32_t in_b_shift;
int32_t out_offset;
int32_t out_mul;
int32_t out_shift;
} kpu_model_quant_add_layer_argument_t;
typedef struct
{
uint32_t flags;
@ -385,14 +455,27 @@ typedef struct
uint32_t channels;
} kpu_model_gap2d_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_address;
uint32_t main_mem_out_address;
kpu_model_shape_t in_shape;
kpu_model_shape_t out_shape;
uint32_t kernel_width;
uint32_t kernel_height;
uint32_t stride_width;
uint32_t stride_height;
uint32_t padding_width;
uint32_t padding_height;
} kpu_model_quant_max_pool2d_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_address;
uint32_t mem_out_address;
uint32_t width;
uint32_t height;
uint32_t channels;
uint32_t count;
kpu_model_quant_param_t quant_param;
} kpu_model_quantize_layer_argument_t;
@ -405,6 +488,15 @@ typedef struct
kpu_model_quant_param_t quant_param;
} kpu_model_dequantize_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_address;
uint32_t main_mem_out_address;
uint32_t count;
uint8_t table[256];
} kpu_model_requantize_layer_argument_t;
typedef struct
{
uint32_t flags;
@ -421,6 +513,16 @@ typedef struct
uint32_t channels;
} kpu_model_remove_padding_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_address;
uint32_t kpu_mem_out_address;
uint32_t width;
uint32_t height;
uint32_t channels;
} kpu_model_upload_layer_argument_t;
typedef struct
{
uint32_t flags;
@ -429,16 +531,33 @@ typedef struct
uint32_t channels;
} kpu_model_l2_norm_layer_argument_t;
typedef struct
{
uint32_t flags;
uint32_t main_mem_in_address;
uint32_t main_mem_out_address;
uint32_t channels;
} kpu_model_softmax_layer_argument_t;
typedef struct
{
uint8_t *model_buffer;
uint32_t flags;
uint32_t main_mem_out_address;
uint32_t input_count;
kpu_model_memory_range_t inputs_mem[0];
} kpu_model_concat_layer_argument_t;
typedef struct
{
const uint8_t *model_buffer;
uint8_t *main_buffer;
kpu_model_layer_header_t *layer_headers;
uint8_t *body_start;
uint32_t output_count;
const kpu_model_output_t *outputs;
const kpu_model_layer_header_t *layer_headers;
const uint8_t *body_start;
uint32_t layers_length;
volatile uint32_t current_layer;
uint8_t * volatile current_body;
const uint8_t * volatile current_body;
} kpu_model_context_t;
#ifdef __cplusplus