Browse Source

update knowing framework and application from Tian_Chunyu

it is perfect
pull/3/head
xuedongliang 1 year ago
parent
commit
c4daa1dc44
  1. 11
      APP_Framework/Applications/connection_app/Kconfig
  2. 7
      APP_Framework/Applications/knowing_app/Kconfig
  3. 14
      APP_Framework/Applications/knowing_app/SConscript
  4. 7
      APP_Framework/Applications/knowing_app/face_detect/Kconfig
  5. 9
      APP_Framework/Applications/knowing_app/face_detect/SConscript
  6. 253
      APP_Framework/Applications/knowing_app/face_detect/face_detect.c
  7. 0
      APP_Framework/Applications/knowing_app/mnist/.gitignore
  8. BIN
      APP_Framework/Applications/knowing_app/mnist/K210 mnist .png
  9. 4
      APP_Framework/Applications/knowing_app/mnist/Kconfig
  10. 4
      APP_Framework/Applications/knowing_app/mnist/README.md
  11. 9
      APP_Framework/Applications/knowing_app/mnist/SConscript
  12. 20
      APP_Framework/Applications/knowing_app/mnist/digit.h
  13. 50
      APP_Framework/Applications/knowing_app/mnist/main.cpp
  14. 20
      APP_Framework/Applications/knowing_app/mnist/model.h
  15. 18
      APP_Framework/Applications/knowing_app/mnist/tools/mnist-c-digit.py
  16. 23
      APP_Framework/Applications/knowing_app/mnist/tools/mnist-c-model.py
  17. 18
      APP_Framework/Applications/knowing_app/mnist/tools/mnist-inference.py
  18. 18
      APP_Framework/Applications/knowing_app/mnist/tools/mnist-train.py
  19. 2
      APP_Framework/Framework/Kconfig
  20. 8
      APP_Framework/Framework/know/Kconfig
  21. 4
      APP_Framework/Framework/know/tflite_mnist/Kconfig
  22. 8
      APP_Framework/Framework/know/tflite_mnist/Makefile
  23. 30
      APP_Framework/Framework/know/tflite_mnist/mnistmain.c
  24. 41
      APP_Framework/Framework/know/tflite_mnist/tools/mnist-c-model.py
  25. 4
      APP_Framework/Framework/know/tflite_sin/Kconfig
  26. 11
      APP_Framework/Framework/know/tflite_sin/Makefile
  27. 8
      APP_Framework/Framework/knowing/Kconfig
  28. 0
      APP_Framework/Framework/knowing/Makefile
  29. 14
      APP_Framework/Framework/knowing/SConscript
  30. 5
      APP_Framework/Framework/knowing/kpu-postprocessing/Kconfig
  31. 14
      APP_Framework/Framework/knowing/kpu-postprocessing/SConscript
  32. 7
      APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/Kconfig
  33. 10
      APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/SConscript
  34. 437
      APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/region_layer.c
  35. 49
      APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/region_layer.h
  36. 24
      APP_Framework/Framework/knowing/tensorflow-lite/Kconfig
  37. 207
      APP_Framework/Framework/knowing/tensorflow-lite/SConscript
  38. 1
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/.gitignore
  39. 203
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/README.md
  40. 2171
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/patch/cmsis_gcc.h
  41. 203
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/LICENSE
  42. 57
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/Makefile
  43. 29
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/README_MAKE.md
  44. 139
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/core/public/version.h
  45. 484
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/c/builtin_op_data.h
  46. 92
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/c/c_api_types.h
  47. 236
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/c/common.c
  48. 913
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/c/common.h
  49. 38
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/error_reporter.cc
  50. 59
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/error_reporter.h
  51. 1945
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.cc
  52. 301
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.h
  53. 67
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/op_resolver.cc
  54. 60
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/op_resolver.h
  55. 194
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/profiler.h
  56. 50
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/tensor_utils.cc
  57. 28
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/tensor_utils.h
  58. 1037
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/common.h
  59. 112
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/compatibility.h
  60. 40
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/cppmath.h
  61. 35
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/max.h
  62. 35
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/min.h
  63. 40
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/optimized/neon_check.h
  64. 122
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/portable_tensor.h
  65. 395
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/quantization_util.cc
  66. 292
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/quantization_util.h
  67. 446
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/add.h
  68. 68
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/arg_min_max.h
  69. 80
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/binary_function.h
  70. 37
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/ceil.h
  71. 280
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/comparisons.h
  72. 139
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/concatenation.h
  73. 264
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/conv.h
  74. 100
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h
  75. 297
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
  76. 78
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/dequantize.h
  77. 39
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/floor.h
  78. 320
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/fully_connected.h
  79. 166
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/hard_swish.h
  80. 144
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
  81. 221
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h
  82. 289
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h
  83. 108
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
  84. 65
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h
  85. 106
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h
  86. 77
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h
  87. 131
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
  88. 258
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
  89. 110
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h
  90. 221
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h
  91. 90
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/l2normalization.h
  92. 132
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/logistic.h
  93. 64
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/maximum_minimum.h
  94. 166
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/mul.h
  95. 37
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/neg.h
  96. 162
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/pad.h
  97. 297
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/pooling.h
  98. 109
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/prelu.h
  99. 138
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h
  100. 55
      APP_Framework/Framework/knowing/tensorflow-lite/tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/reference/quantize.h

11
APP_Framework/Applications/connection_app/Kconfig

@ -1,14 +1,3 @@
menu "connection app"
menuconfig APPLICATION_CONNECTION
bool "Using connection apps"
default n
menuconfig CONNECTION_COMMUNICATION_ZIGBEE
bool "enable zigbee demo"
default n
select CONFIG_CONNECTION_COMMUNICATION_ZIGBEE
if CONNECTION_COMMUNICATION_ZIGBEE
source "$KERNEL_DIR/framework/connection/Adapter/zigbee/Kconfig"
endif
endmenu

7
APP_Framework/Applications/knowing_app/Kconfig

@ -1,7 +1,4 @@
menu "knowing app"
menuconfig APPLICATION_KNOWING
bool "Using knowing apps"
default n
source "$APP_DIR/Applications/knowing_app/mnist/Kconfig"
source "$APP_DIR/Applications/knowing_app/face_detect/Kconfig"
endmenu

14
APP_Framework/Applications/knowing_app/SConscript

@ -0,0 +1,14 @@
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(path, 'SConscript'))
Return('objs')

7
APP_Framework/Applications/knowing_app/face_detect/Kconfig

@ -0,0 +1,7 @@
config FACE_DETECT
bool "enable apps/face detect"
depends on BOARD_K210_EVB
depends on DRV_USING_OV2640
depends on USING_KPU_POSTPROCESSING
depends on USING_YOLOV2
default n

9
APP_Framework/Applications/knowing_app/face_detect/SConscript

@ -0,0 +1,9 @@
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
group = DefineGroup('Applications', src, depend = ['FACE_DETECT'], LOCAL_CPPPATH = CPPPATH)
Return('group')

253
APP_Framework/Applications/knowing_app/face_detect/face_detect.c

@ -0,0 +1,253 @@
#include <transform.h>
#include"region_layer.h"
#define SHOW_RGB_BUF_SIZE (320*240*2)
#define AI_KPU_RGB_BUF_SIZE (320*240*3)
#define KMODEL_SIZE (388776) //face model size
#define ANCHOR_NUM 5
#define KPUIMAGEWIDTH (320)
#define KPUIMAGEHEIGHT (240)
static float anchor[ANCHOR_NUM * 2] = {1.889,2.5245, 2.9465,3.94056, 3.99987,5.3658, 5.155437,6.92275, 6.718375,9.01025};
#define THREAD_PRIORITY_FACE_D (11)
static pthread_t facetid = 0;
static void* thread_face_detcet_entry(void *parameter);
static int g_fd = 0;
static int kmodel_fd = 0;
static int if_exit = 0;
static unsigned char * showbuffer = NULL ;
static unsigned char * kpurgbbuffer = NULL ;
static _ioctl_shoot_para shoot_para_t = {0};
unsigned char * model_data = NULL; //kpu data load memory
unsigned char *model_data_align = NULL;
kpu_model_context_t face_detect_task;
static region_layer_t face_detect_rl;
static obj_info_t face_detect_info;
volatile uint32_t g_ai_done_flag;
static void ai_done(void *ctx)
{
g_ai_done_flag = 1;
}
void face_detect()
{
int ret = 0;
int result = 0;
int size = 0;
g_fd = open("/dev/ov2640",O_RDONLY);
if(g_fd < 0)
{
printf("open ov2640 fail !!");
return;
}
showbuffer = (unsigned char*)malloc(SHOW_RGB_BUF_SIZE);
if(NULL ==showbuffer)
{
close(g_fd);
printf("showbuffer apply memory fail !!");
return ;
}
kpurgbbuffer = (unsigned char*)malloc(AI_KPU_RGB_BUF_SIZE);
if(NULL ==kpurgbbuffer)
{
close(g_fd);
free(showbuffer);
printf("kpurgbbuffer apply memory fail !!");
return ;
}
model_data = (unsigned char *)malloc(KMODEL_SIZE + 255);
if(NULL ==model_data)
{
free(showbuffer);
free(kpurgbbuffer);
close(g_fd);
printf("model_data apply memory fail !!");
return ;
}
memset(model_data,0,KMODEL_SIZE + 255);
memset(showbuffer,0,SHOW_RGB_BUF_SIZE);
memset(kpurgbbuffer,0,AI_KPU_RGB_BUF_SIZE);
shoot_para_t.pdata = (unsigned int *)(showbuffer);
shoot_para_t.length = SHOW_RGB_BUF_SIZE;
/*
load memory
*/
kmodel_fd = open("/kmodel/detect.kmodel",O_RDONLY);
if(kmodel_fd <0)
{
printf("open kmodel fail");
close(g_fd);
free(showbuffer);
free(kpurgbbuffer);
free(model_data);
return;
}
else
{
size = read(kmodel_fd, model_data, KMODEL_SIZE);
if(size != KMODEL_SIZE)
{
printf("read kmodel error size %d\n",size);
close(g_fd);
close(kmodel_fd);
free(showbuffer);
free(kpurgbbuffer);
free(model_data);
return;
}
else
{
printf("read kmodel success \n");
}
}
unsigned char *model_data_align = (unsigned char *)(((unsigned int)model_data+255)&(~255));
dvp_set_ai_addr((uint32_t)kpurgbbuffer, (uint32_t)(kpurgbbuffer + 320 * 240), (uint32_t)(kpurgbbuffer + 320 * 240 * 2));
if (kpu_load_kmodel(&face_detect_task, model_data_align) != 0)
{
printf("\nmodel init error\n");
close(g_fd);
close(kmodel_fd);
free(showbuffer);
free(kpurgbbuffer);
free(model_data);
return;
}
face_detect_rl.anchor_number = ANCHOR_NUM;
face_detect_rl.anchor = anchor;
face_detect_rl.threshold = 0.7;
face_detect_rl.nms_value = 0.3;
result = region_layer_init(&face_detect_rl, 20, 15, 30, KPUIMAGEWIDTH, KPUIMAGEHEIGHT);
printf("region_layer_init result %d \n\r",result);
size_t stack_size = 32*1024;
pthread_attr_t attr; /* 线程属性 */
struct sched_param prio; /* 线程优先级 */
prio.sched_priority = 8; /* 优先级设置为 8 */
pthread_attr_init(&attr); /* 先使用默认值初始化属性 */
pthread_attr_setschedparam(&attr,&prio); /* 修改属性对应的优先级 */
pthread_attr_setstacksize(&attr, stack_size);
/* 创建线程 1, 属性为 attr,入口函数是 thread_entry,入口函数参数是 1 */
result = pthread_create(&facetid,&attr,thread_face_detcet_entry,NULL);
if (0 == result)
{
printf("thread_face_detcet_entry successfully!\n");
}
else
{
printf("thread_face_detcet_entry failed! error code is %d\n",result);
close(g_fd);
}
}
#ifdef __RT_THREAD_H__
MSH_CMD_EXPORT(face_detect,face detect task);
#endif
static void* thread_face_detcet_entry(void *parameter)
{
extern void lcd_draw_picture(uint16_t x1, uint16_t y1, uint16_t width, uint16_t height, uint32_t *ptr);
printf("thread_face_detcet_entry start!\n");
int ret = 0;
//sysctl_enable_irq();
while(1)
{
//memset(showbuffer,0,320*240*2);
g_ai_done_flag = 0;
ret = ioctl(g_fd,IOCTRL_CAMERA_START_SHOT,&shoot_para_t);
if(RT_ERROR == ret)
{
printf("ov2640 can't wait event flag");
rt_free(showbuffer);
close(g_fd);
pthread_exit(NULL);
return NULL;
}
kpu_run_kmodel(&face_detect_task, kpurgbbuffer, DMAC_CHANNEL5, ai_done, NULL);
while(!g_ai_done_flag);
float *output;
size_t output_size;
kpu_get_output(&face_detect_task, 0, (uint8_t **)&output, &output_size);
face_detect_rl.input = output;
region_layer_run(&face_detect_rl, &face_detect_info);
/* display result */
#ifdef BSP_USING_LCD
for (int face_cnt = 0; face_cnt < face_detect_info.obj_number; face_cnt++)
{
draw_edge((uint32_t *)showbuffer, &face_detect_info, face_cnt, 0xF800);
}
lcd_draw_picture(0, 0, 320, 240, (unsigned int*)showbuffer);
#endif
usleep(1);
if(1 == if_exit)
{
if_exit = 0;
printf("thread_face_detcet_entry exit");
pthread_exit(NULL);
}
}
}
void face_detect_delete()
{
if(showbuffer != NULL)
{
int ret = 0;
close(g_fd);
close(kmodel_fd);
free(showbuffer);
free(kpurgbbuffer);
free(model_data);
printf("face detect task cancel!!! ret %d ",ret);
if_exit = 1;
}
}
#ifdef __RT_THREAD_H__
MSH_CMD_EXPORT(face_detect_delete,face detect task delete);
#endif
void kmodel_load(unsigned char * model_data)
{
int kmodel_fd = 0;
int size = 0;
kmodel_fd = open("/kmodel/detect.kmodel",O_RDONLY);
model_data = (unsigned char *)malloc(KMODEL_SIZE + 255);
if(NULL ==model_data)
{
printf("model_data apply memory fail !!");
return ;
}
memset(model_data,0,KMODEL_SIZE + 255);
if (kmodel_fd>= 0)
{
size = read(kmodel_fd, model_data, KMODEL_SIZE);
if(size != KMODEL_SIZE)
{
printf("read kmodel error size %d\n",size);
}
else
{
printf("read kmodel success");
}
}
else
{
free(model_data);
printf("open kmodel fail");
}
}
#ifdef __RT_THREAD_H__
MSH_CMD_EXPORT(kmodel_load,kmodel load memory);
#endif

0
APP_Framework/Framework/know/tflite_mnist/.gitignore → APP_Framework/Applications/knowing_app/mnist/.gitignore

BIN
APP_Framework/Applications/knowing_app/mnist/K210 mnist .png

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

4
APP_Framework/Applications/knowing_app/mnist/Kconfig

@ -0,0 +1,4 @@
config APP_MNIST
bool "enable apps/mnist"
depends on USING_TENSORFLOWLITEMICRO
default n

4
APP_Framework/Framework/know/tflite_mnist/README.md → APP_Framework/Applications/knowing_app/mnist/README.md

@ -1,5 +1,9 @@
# MNIST 说明
要使用本例程,MCU RAM必须至少500K左右,所以本例程目前在K210上面验证过,stm32f407 目前在rtt上原则上只能采取dlmodule加载的方式。
![K210 mnist ](E:\XIUOS_FRAMEWORK\xiuos\APP_Framework\Applications\knowing_app\mnist\K210 mnist .png)
## 使用
tools/mnist-train.py 训练生成 mnist 模型。

9
APP_Framework/Applications/knowing_app/mnist/SConscript

@ -0,0 +1,9 @@
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.cpp')
CPPPATH = [cwd]
group = DefineGroup('Applications', src, depend = ['APP_MNIST'], LOCAL_CPPPATH = CPPPATH)
Return('group')

20
APP_Framework/Framework/know/tflite_mnist/digit.h → APP_Framework/Applications/knowing_app/mnist/digit.h

@ -1,23 +1,3 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file: digit.h
* @brief: store digits in this file
* @version: 1.0
* @author: AIIT XUOS Lab
* @date: 2021/4/30
*
*/
const float mnist_digit[] = {
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,

50
APP_Framework/Framework/know/tflite_mnist/mnistapp.cpp → APP_Framework/Applications/knowing_app/mnist/main.cpp

@ -1,24 +1,5 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file: mnistapp.cpp
* @brief: mnist function
* @version: 1.0
* @author: AIIT XUOS Lab
* @date: 2021/4/30
*
*/
#include <xiuos.h>
#include <transform.h>
#include <stdio.h>
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
@ -36,8 +17,8 @@ tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;
constexpr int kTensorArenaSize = 110 * 1024;
//uint8_t *tensor_arena = nullptr;
uint8_t tensor_arena[kTensorArenaSize];
uint8_t *tensor_arena = nullptr;
//uint8_t tensor_arena[kTensorArenaSize];
}
extern "C" void mnist_app() {
@ -52,13 +33,12 @@ extern "C" void mnist_app() {
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
/*
tensor_arena = (uint8_t *)rt_malloc(kTensorArenaSize);
tensor_arena = (uint8_t *)malloc(kTensorArenaSize);
if (tensor_arena == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter, "malloc for tensor_arena failed");
return;
}
*/
tflite::AllOpsResolver resolver;
tflite::MicroInterpreter static_interpreter(
@ -75,15 +55,15 @@ extern "C" void mnist_app() {
input = interpreter->input(0);
output = interpreter->output(0);
KPrintf("\n------- Input Digit -------\n");
printf("------- Input Digit -------\n");
for (int i = 0; i < 28; i++) {
for (int j = 0; j < 28; j++) {
if (mnist_digit[i*28+j] > 0.3)
KPrintf("#");
printf("#");
else
KPrintf(".");
printf(".");
}
KPrintf("\n");
printf("\n");
}
for (int i = 0; i < 28*28; i++) {
@ -105,8 +85,12 @@ extern "C" void mnist_app() {
index = i;
}
}
printf("------- Output Result -------\n");
printf("result is %d\n", index);
}
KPrintf("\n------- Output Result -------\n");
KPrintf("result is %d\n\n", index);
extern "C" {
#ifdef __RT_THREAD_H__
MSH_CMD_EXPORT(mnist_app, run mnist app);
#endif
}

20
APP_Framework/Framework/know/tflite_mnist/model.h → APP_Framework/Applications/knowing_app/mnist/model.h

@ -1,23 +1,3 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file: model.h
* @brief: store model weights in this file
* @version: 1.0
* @author: AIIT XUOS Lab
* @date: 2021/4/30
*
*/
unsigned char mnist_model[] = {
0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x14, 0x00, 0x20, 0x00,
0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x00, 0x00,

18
APP_Framework/Framework/know/tflite_mnist/tools/mnist-c-digit.py → APP_Framework/Applications/knowing_app/mnist/tools/mnist-c-digit.py

@ -1,22 +1,4 @@
#!/usr/bin/env python3
# ==========================================================================================
# Copyright (c) 2020 AIIT XUOS Lab
# XiOS is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# @file: mnist-c-digit.py
# @brief: print image digit at command line
# @version: 1.0
# @author: AIIT XUOS Lab
# @date: 2021/4/30
# ==========================================================================================
import tensorflow as tf

23
APP_Framework/Applications/knowing_app/mnist/tools/mnist-c-model.py

@ -0,0 +1,23 @@
#!/usr/bin/env python3
#tflite_file_path = 'mnist-default-quan.tflite'
tflite_file_path = 'mnist.tflite'
model_file_path = 'model.h'
tflite_file = open(tflite_file_path, 'rb')
tflite_data = tflite_file.read()
tflite_file.close()
tflite_array = [ '0x%02x' % byte for byte in tflite_data ]
model_content = '''unsigned char mnist_model[] = {
%s
};
unsigned int mnist_model_len = %d;
'''
# 12 bytes in a line, the same with xxd
bytes_of_line = 12
model_data = (',\n ').join([ (', ').join(tflite_array[i:i+bytes_of_line]) for i in range(0, len(tflite_array), bytes_of_line) ])
model_file = open(model_file_path, 'w')
model_file.write(model_content % (model_data, len(tflite_array)))
model_file.close()

18
APP_Framework/Framework/know/tflite_mnist/tools/mnist-inference.py → APP_Framework/Applications/knowing_app/mnist/tools/mnist-inference.py

@ -1,22 +1,4 @@
#!/usr/bin/env python3
# ==========================================================================================
# Copyright (c) 2020 AIIT XUOS Lab
# XiOS is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# @file: mnist-inference.py
# @brief: load data amd start model omferemce
# @version: 1.0
# @author: AIIT XUOS Lab
# @date: 2021/4/30
# ==========================================================================================
import tensorflow as tf

18
APP_Framework/Framework/know/tflite_mnist/tools/mnist-train.py → APP_Framework/Applications/knowing_app/mnist/tools/mnist-train.py

@ -1,22 +1,4 @@
#!/usr/bin/env python3
# ==========================================================================================
# Copyright (c) 2020 AIIT XUOS Lab
# XiOS is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# @file: mnist-train.py
# @brief: model training
# @version: 1.0
# @author: AIIT XUOS Lab
# @date: 2021/4/30
# ==========================================================================================
import os
import tensorflow as tf

2
APP_Framework/Framework/Kconfig

@ -20,7 +20,7 @@ menu "Framework"
source "$APP_DIR/Framework/sensor/Kconfig"
source "$APP_DIR/Framework/connection/Kconfig"
source "$APP_DIR/Framework/know/Kconfig"
source "$APP_DIR/Framework/knowing/Kconfig"
source "$APP_DIR/Framework/control/Kconfig"

8
APP_Framework/Framework/know/Kconfig

@ -1,8 +0,0 @@
menuconfig SUPPORT_KNOWING_FRAMEWORK
bool "support knowing framework"
default y
if SUPPORT_KNOWING_FRAMEWORK
source "$APP_DIR/Framework/know/tflite_sin/Kconfig"
source "$APP_DIR/Framework/know/tflite_mnist/Kconfig"
endif

4
APP_Framework/Framework/know/tflite_mnist/Kconfig

@ -1,4 +0,0 @@
menuconfig USING_TFLITE_MNIST
bool "mnist demo app for tflite micro"
depends on INTELLIGENT_TFLITE
default n

8
APP_Framework/Framework/know/tflite_mnist/Makefile

@ -1,8 +0,0 @@
ifeq ($(CONFIG_USING_TFLITE_MNIST),y)
SRC_FILES := \
mnistapp.cpp \
mnistmain.c
CPPPATHS += -I.
endif
include $(KERNEL_ROOT)/compiler.mk

30
APP_Framework/Framework/know/tflite_mnist/mnistmain.c

@ -1,30 +0,0 @@
/*
* Copyright (c) 2020 AIIT XUOS Lab
* XiOS is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/**
* @file: mnistmain.c
* @brief: start mnist function
* @version: 1.0
* @author: AIIT XUOS Lab
* @date: 2021/4/30
*
*/
#include <xiuos.h>
void mnist_app(void);
int tfmnist(void) {
mnist_app();
}
SHELL_EXPORT_CMD(SHELL_CMD_PERMISSION(0)|SHELL_CMD_TYPE(SHELL_TYPE_CMD_FUNC)|SHELL_CMD_PARAM_NUM(0), tfmnist, tfmnist, run mnist demo of tflite);

41
APP_Framework/Framework/know/tflite_mnist/tools/mnist-c-model.py

@ -1,41 +0,0 @@
#!/usr/bin/env python3
# ==========================================================================================
# Copyright (c) 2020 AIIT XUOS Lab
# XiOS is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# @file: mnist-c-model.py
# @brief: open file path and load model
# @version: 1.0
# @author: AIIT XUOS Lab
# @date: 2021/4/30
# ==========================================================================================
#tflite_file_path = 'mnist-default-quan.tflite'
tflite_file_path = 'mnist.tflite'
model_file_path = 'model.h'
tflite_file = open(tflite_file_path, 'rb')
tflite_data = tflite_file.read()
tflite_file.close()
tflite_array = [ '0x%02x' % byte for byte in tflite_data ]
model_content = '''unsigned char mnist_model[] = {
%s
};
unsigned int mnist_model_len = %d;
'''
# 12 bytes in a line, the same with xxd
bytes_of_line = 12
model_data = (',\n ').join([ (', ').join(tflite_array[i:i+bytes_of_line]) for i in range(0, len(tflite_array), bytes_of_line) ])
model_file = open(model_file_path, 'w')
model_file.write(model_content % (model_data, len(tflite_array)))
model_file.close()

4
APP_Framework/Framework/know/tflite_sin/Kconfig

@ -1,4 +0,0 @@
menuconfig USING_TFLITE_SIN
bool "sin(x) demo app for tflite micro"
depends on INTELLIGENT_TFLITE
default n

11
APP_Framework/Framework/know/tflite_sin/Makefile

@ -1,11 +0,0 @@
ifeq ($(CONFIG_USING_TFLITE_SIN),y)
SRC_FILES := \
sinmain.c \
main_functions.cc \
model.cc \
output_handler.cc \
constants.cc
CPPPATHS += -I.
endif
include $(KERNEL_ROOT)/compiler.mk

8
APP_Framework/Framework/knowing/Kconfig

@ -0,0 +1,8 @@
menuconfig SUPPORT_KNOWING_FRAMEWORK
bool "support knowing framework"
default y
if SUPPORT_KNOWING_FRAMEWORK
source "$APP_DIR/Framework/knowing/tensorflow-lite/Kconfig"
source "$APP_DIR/Framework/knowing/kpu-postprocessing/Kconfig"
endif

0
APP_Framework/Framework/know/Makefile → APP_Framework/Framework/knowing/Makefile

14
APP_Framework/Framework/knowing/SConscript

@ -0,0 +1,14 @@
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(path, 'SConscript'))
Return('objs')

5
APP_Framework/Framework/knowing/kpu-postprocessing/Kconfig

@ -0,0 +1,5 @@
menuconfig USING_KPU_POSTPROCESSING
bool "kpu model postprocessing"
default y
source "$APP_DIR/Framework/knowing/kpu-postprocessing/yolov2/Kconfig"

14
APP_Framework/Framework/knowing/kpu-postprocessing/SConscript

@ -0,0 +1,14 @@
import os
Import('RTT_ROOT')
from building import *
cwd = GetCurrentDir()
objs = []
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(path, 'SConscript'))
Return('objs')

7
APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/Kconfig

@ -0,0 +1,7 @@
menuconfig USING_YOLOV2
bool "yolov2 region layer"
depends on USING_KPU_POSTPROCESSING
default n

10
APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/SConscript

@ -0,0 +1,10 @@
from building import *
import os
cwd = GetCurrentDir()
src = Glob('*.c')
group = DefineGroup('yolov2', src, depend = ['USING_YOLOV2'], CPPPATH = [cwd])
Return('group')

437
APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/region_layer.c

@ -0,0 +1,437 @@
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include "region_layer.h"
typedef struct
{
float x;
float y;
float w;
float h;
} box_t;
typedef struct
{
int index;
int class;
float **probs;
} sortable_box_t;
int region_layer_init(region_layer_t *rl, int width, int height, int channels, int origin_width, int origin_height)
{
int flag = 0;
rl->coords = 4;
rl->image_width = 320;
rl->image_height = 240;
rl->classes = channels / 5 - 5;
rl->net_width = origin_width;
rl->net_height = origin_height;
rl->layer_width = width;
rl->layer_height = height;
rl->boxes_number = (rl->layer_width * rl->layer_height * rl->anchor_number);
rl->output_number = (rl->boxes_number * (rl->classes + rl->coords + 1));
rl->output = malloc(rl->output_number * sizeof(float));
if (rl->output == NULL)
{
flag = -1;
goto malloc_error;
}
rl->boxes = malloc(rl->boxes_number * sizeof(box_t));
if (rl->boxes == NULL)
{
flag = -2;
goto malloc_error;
}
rl->probs_buf = malloc(rl->boxes_number * (rl->classes + 1) * sizeof(float));
if (rl->probs_buf == NULL)
{
flag = -3;
goto malloc_error;
}
rl->probs = malloc(rl->boxes_number * sizeof(float *));
if (rl->probs == NULL)
{
flag = -4;
goto malloc_error;
}
for (uint32_t i = 0; i < rl->boxes_number; i++)
rl->probs[i] = &(rl->probs_buf[i * (rl->classes + 1)]);
return 0;
malloc_error:
free(rl->output);
free(rl->boxes);
free(rl->probs_buf);
free(rl->probs);
return flag;
}
void region_layer_deinit(region_layer_t *rl)
{
free(rl->output);
free(rl->boxes);
free(rl->probs_buf);
free(rl->probs);
}
static inline float sigmoid(float x)
{
return 1.f / (1.f + expf(-x));
}
static void activate_array(region_layer_t *rl, int index, int n)
{
float *output = &rl->output[index];
float *input = &rl->input[index];
for (int i = 0; i < n; ++i)
output[i] = sigmoid(input[i]);
}
static int entry_index(region_layer_t *rl, int location, int entry)
{
int wh = rl->layer_width * rl->layer_height;
int n = location / wh;
int loc = location % wh;
return n * wh * (rl->coords + rl->classes + 1) + entry * wh + loc;
}
static void softmax(region_layer_t *rl, float *input, int n, int stride, float *output)
{
int i;
float diff;
float e;
float sum = 0;
float largest_i = input[0];
for (i = 0; i < n; ++i)
{
if (input[i * stride] > largest_i)
largest_i = input[i * stride];
}
for (i = 0; i < n; ++i) {
diff = input[i * stride] - largest_i;
e = expf(diff);
sum += e;
output[i * stride] = e;
}
for (i = 0; i < n; ++i)
output[i * stride] /= sum;
}
static void softmax_cpu(region_layer_t *rl, float *input, int n, int batch, int batch_offset, int groups, int stride, float *output)
{
int g, b;
for (b = 0; b < batch; ++b) {
for (g = 0; g < groups; ++g)
softmax(rl, input + b * batch_offset + g, n, stride, output + b * batch_offset + g);
}
}
static void forward_region_layer(region_layer_t *rl)
{
int index;
for (index = 0; index < rl->output_number; index++)
rl->output[index] = rl->input[index];
for (int n = 0; n < rl->anchor_number; ++n)
{
index = entry_index(rl, n * rl->layer_width * rl->layer_height, 0);
activate_array(rl, index, 2 * rl->layer_width * rl->layer_height);
index = entry_index(rl, n * rl->layer_width * rl->layer_height, 4);
activate_array(rl, index, rl->layer_width * rl->layer_height);
}
index = entry_index(rl, 0, rl->coords + 1);
softmax_cpu(rl, rl->input + index, rl->classes, rl->anchor_number,
rl->output_number / rl->anchor_number, rl->layer_width * rl->layer_height,
rl->layer_width * rl->layer_height, rl->output + index);
}
static void correct_region_boxes(region_layer_t *rl, box_t *boxes)
{
uint32_t net_width = rl->net_width;
uint32_t net_height = rl->net_height;
uint32_t image_width = rl->image_width;
uint32_t image_height = rl->image_height;
uint32_t boxes_number = rl->boxes_number;
int new_w = 0;
int new_h = 0;
if (((float)net_width / image_width) <
((float)net_height / image_height)) {
new_w = net_width;
new_h = (image_height * net_width) / image_width;
} else {
new_h = net_height;
new_w = (image_width * net_height) / image_height;
}
for (int i = 0; i < boxes_number; ++i) {
box_t b = boxes[i];
b.x = (b.x - (net_width - new_w) / 2. / net_width) /
((float)new_w / net_width);
b.y = (b.y - (net_height - new_h) / 2. / net_height) /
((float)new_h / net_height);
b.w *= (float)net_width / new_w;
b.h *= (float)net_height / new_h;
boxes[i] = b;
}
}
static box_t get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
volatile box_t b;
b.x = (i + x[index + 0 * stride]) / w;
b.y = (j + x[index + 1 * stride]) / h;
b.w = expf(x[index + 2 * stride]) * biases[2 * n] / w;
b.h = expf(x[index + 3 * stride]) * biases[2 * n + 1] / h;
return b;
}
static void get_region_boxes(region_layer_t *rl, float *predictions, float **probs, box_t *boxes)
{
uint32_t layer_width = rl->layer_width;
uint32_t layer_height = rl->layer_height;
uint32_t anchor_number = rl->anchor_number;
uint32_t classes = rl->classes;
uint32_t coords = rl->coords;
float threshold = rl->threshold;
for (int i = 0; i < layer_width * layer_height; ++i)
{
int row = i / layer_width;
int col = i % layer_width;
for (int n = 0; n < anchor_number; ++n)
{
int index = n * layer_width * layer_height + i;
for (int j = 0; j < classes; ++j)
probs[index][j] = 0;
int obj_index = entry_index(rl, n * layer_width * layer_height + i, coords);
int box_index = entry_index(rl, n * layer_width * layer_height + i, 0);
float scale = predictions[obj_index];
boxes[index] = get_region_box(predictions, rl->anchor, n, box_index, col, row,
layer_width, layer_height, layer_width * layer_height);
float max = 0;
for (int j = 0; j < classes; ++j)
{
int class_index = entry_index(rl, n * layer_width * layer_height + i, coords + 1 + j);
float prob = scale * predictions[class_index];
probs[index][j] = (prob > threshold) ? prob : 0;
if (prob > max)
max = prob;
}
probs[index][classes] = max;
}
}
correct_region_boxes(rl, boxes);
}
static int nms_comparator(void *pa, void *pb)
{
sortable_box_t a = *(sortable_box_t *)pa;
sortable_box_t b = *(sortable_box_t *)pb;
float diff = a.probs[a.index][b.class] - b.probs[b.index][b.class];
if (diff < 0)
return 1;
else if (diff > 0)
return -1;
return 0;
}
static float overlap(float x1, float w1, float x2, float w2)
{
float l1 = x1 - w1/2;
float l2 = x2 - w2/2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1/2;
float r2 = x2 + w2/2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
static float box_intersection(box_t a, box_t b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
if (w < 0 || h < 0)
return 0;
return w * h;
}
static float box_union(box_t a, box_t b)
{
float i = box_intersection(a, b);
float u = a.w * a.h + b.w * b.h - i;
return u;
}
static float box_iou(box_t a, box_t b)
{
return box_intersection(a, b) / box_union(a, b);
}
static void do_nms_sort(region_layer_t *rl, box_t *boxes, float **probs)
{
uint32_t boxes_number = rl->boxes_number;
uint32_t classes = rl->classes;
float nms_value = rl->nms_value;
int i, j, k;
sortable_box_t s[boxes_number];
for (i = 0; i < boxes_number; ++i)
{
s[i].index = i;
s[i].class = 0;
s[i].probs = probs;
}
for (k = 0; k < classes; ++k)
{
for (i = 0; i < boxes_number; ++i)
s[i].class = k;
qsort(s, boxes_number, sizeof(sortable_box_t), nms_comparator);
for (i = 0; i < boxes_number; ++i)
{
if (probs[s[i].index][k] == 0)
continue;
box_t a = boxes[s[i].index];
for (j = i + 1; j < boxes_number; ++j)
{
box_t b = boxes[s[j].index];
if (box_iou(a, b) > nms_value)
probs[s[j].index][k] = 0;
}
}
}
}
static int max_index(float *a, int n)
{
int i, max_i = 0;
float max = a[0];
for (i = 1; i < n; ++i)
{
if (a[i] > max)
{
max = a[i];
max_i = i;
}
}
return max_i;
}
static void region_layer_output(region_layer_t *rl, obj_info_t *obj_info)
{
uint32_t obj_number = 0;
uint32_t image_width = rl->image_width;
uint32_t image_height = rl->image_height;
uint32_t boxes_number = rl->boxes_number;
float threshold = rl->threshold;
box_t *boxes = (box_t *)rl->boxes;
for (int i = 0; i < rl->boxes_number; ++i)
{
int class = max_index(rl->probs[i], rl->classes);
float prob = rl->probs[i][class];
if (prob > threshold)
{
box_t *b = boxes + i;
obj_info->obj[obj_number].x1 = b->x * image_width - (b->w * image_width / 2);
obj_info->obj[obj_number].y1 = b->y * image_height - (b->h * image_height / 2);
obj_info->obj[obj_number].x2 = b->x * image_width + (b->w * image_width / 2);
obj_info->obj[obj_number].y2 = b->y * image_height + (b->h * image_height / 2);
obj_info->obj[obj_number].class_id = class;
obj_info->obj[obj_number].prob = prob;
obj_number++;
}
}
obj_info->obj_number = obj_number;
}
void region_layer_run(region_layer_t *rl, obj_info_t *obj_info)
{
forward_region_layer(rl);
get_region_boxes(rl, rl->output, rl->probs, rl->boxes);
do_nms_sort(rl, rl->boxes, rl->probs);
region_layer_output(rl, obj_info);
}
void draw_edge(uint32_t *gram, obj_info_t *obj_info, uint32_t index, uint16_t color)
{
uint32_t data = ((uint32_t)color << 16) | (uint32_t)color;
uint32_t *addr1, *addr2, *addr3, *addr4, x1, y1, x2, y2;
x1 = obj_info->obj[index].x1;
y1 = obj_info->obj[index].y1;
x2 = obj_info->obj[index].x2;
y2 = obj_info->obj[index].y2;
if (x1 <= 0)
x1 = 1;
if (x2 >= 319)
x2 = 318;
if (y1 <= 0)
y1 = 1;
if (y2 >= 239)
y2 = 238;
addr1 = gram + (320 * y1 + x1) / 2;
addr2 = gram + (320 * y1 + x2 - 8) / 2;
addr3 = gram + (320 * (y2 - 1) + x1) / 2;
addr4 = gram + (320 * (y2 - 1) + x2 - 8) / 2;
for (uint32_t i = 0; i < 4; i++)
{
*addr1 = data;
*(addr1 + 160) = data;
*addr2 = data;
*(addr2 + 160) = data;
*addr3 = data;
*(addr3 + 160) = data;
*addr4 = data;
*(addr4 + 160) = data;
addr1++;
addr2++;
addr3++;
addr4++;
}
addr1 = gram + (320 * y1 + x1) / 2;
addr2 = gram + (320 * y1 + x2 - 2) / 2;
addr3 = gram + (320 * (y2 - 8) + x1) / 2;
addr4 = gram + (320 * (y2 - 8) + x2 - 2) / 2;
for (uint32_t i = 0; i < 8; i++)
{
*addr1 = data;
*addr2 = data;
*addr3 = data;
*addr4 = data;
addr1 += 160;
addr2 += 160;
addr3 += 160;
addr4 += 160;
}
}

49
APP_Framework/Framework/knowing/kpu-postprocessing/yolov2/region_layer.h

@ -0,0 +1,49 @@
#ifndef _REGION_LAYER
#define _REGION_LAYER
#include <stdint.h>
#include "kpu.h"
typedef struct
{
uint32_t obj_number;
struct
{
uint32_t x1;
uint32_t y1;
uint32_t x2;
uint32_t y2;
uint32_t class_id;
float prob;
} obj[10];
} obj_info_t;
typedef struct
{
float threshold;
float nms_value;
uint32_t coords;
uint32_t anchor_number;
float *anchor;
uint32_t image_width;
uint32_t image_height;
uint32_t classes;
uint32_t net_width;
uint32_t net_height;
uint32_t layer_width;
uint32_t layer_height;
uint32_t boxes_number;
uint32_t output_number;
void *boxes;
float *input;
float *output;
float *probs_buf;
float **probs;
} region_layer_t;
int region_layer_init(region_layer_t *rl, int width, int height, int channels, int origin_width, int origin_height);
void region_layer_deinit(region_layer_t *rl);
void region_layer_run(region_layer_t *rl, obj_info_t *obj_info);
void draw_edge(uint32_t *gram, obj_info_t *obj_info, uint32_t index, uint16_t color);
#endif // _REGION_LAYER

24
APP_Framework/Framework/knowing/tensorflow-lite/Kconfig

@ -0,0 +1,24 @@
menuconfig USING_TENSORFLOWLITEMICRO
bool "Tensorflow Lite for Micro"
select RT_USING_CPLUSPLUS
default n
if USING_TENSORFLOWLITEMICRO
choice
prompt "Select Tensorflow Lite Operators Type"
default USING_TENSORFLOWLITEMICRO_NORMAL
config USING_TENSORFLOWLITEMICRO_NORMAL
bool "Using Tensorflow Lite normal operations"
config USING_TENSORFLOWLITEMICRO_CMSISNN
bool "Using Tensorflow Lite CMSIS NN operations"
endchoice
config USING_TENSORFLOWLITEMICRO_DEMOAPP
bool "Using tensorflow lite for micro demo app"
default n
endif

207
APP_Framework/Framework/knowing/tensorflow-lite/SConscript

@ -0,0 +1,207 @@
from building import *
import os
cwd = GetCurrentDir()
common = Split('''
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/all_ops_resolver.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/debug_log.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/memory_helpers.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_allocator.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_error_reporter.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_interpreter.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_profiler.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_string.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_time.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/micro_utils.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/recording_micro_allocator.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/simple_memory_allocator.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/test_helpers.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/testing/test_conv_model.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/c/common.c
tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/error_reporter.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/op_resolver.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/core/api/tensor_utils.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/internal/quantization_util.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/kernels/kernel_util.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/schema/schema_utils.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/activations.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/arg_min_max.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/ceil.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/circular_buffer.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/comparisons.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/concatenation.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/conv_test_common.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/dequantize.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/detection_postprocess.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/elementwise.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/ethosu.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/floor.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/hard_swish.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/kernel_util.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/l2norm.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/logical.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/logistic.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/maximum_minimum.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/neg.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/pack.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/pad.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/prelu.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/quantize.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/quantize_common.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/reduce.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/reshape.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/round.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/shape.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/split.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/split_v.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/strided_slice.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/sub.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/svdf_common.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/tanh.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/transpose_conv.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/unpack.cc
''')
app = Split('''
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/examples/hello_world/main.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/examples/hello_world/main_functions.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/examples/hello_world/model.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/examples/hello_world/output_handler.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/examples/hello_world/constants.cc
''')
normal_ops = Split('''
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/add.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/conv.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/depthwise_conv.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/fully_connected.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/mul.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/pooling.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/softmax.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/normal/svdf.cc
''')
cmsis_ops = Split('''
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/add.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/mul.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc
tensorflow-lite-for-mcu/source/tensorflow/lite/micro/kernels/cmsis-nn/svdf.cc
''')