GR-SAKURA
GR-KURUMI
GR-COTTON
GR-CITRUS
GR-PEACH
GR-KAEDE
GR-ADZUKI
GR-LYCHEE
GR-ROSE
GR-MANGO(*)
SNShield
Web Compiler
IDE for GR
TOPPERS関連
女子美コラボ
その他
※プロデューサミーティング中
作り方使い方資料
イベント関連
作品記事
体験記事
ライブラリ
ツール
その他・過去ファイル
がじぇるね岡宮です。
GR-LYCHEEでOpenCVを使った小ネタです。
2枚の画像の差分から、動体の中心位置検出するサンプルです。サンプルは2種類で、およその処理の流れが分かるバージョンと、本気で検出だけしてシリアルX,Y座標を表示するものです。
サンプル1:
LYCHEEのボタンUB0を押すことで、グレー→ノイズ除去→2値化+差分→差分の輪郭+動体の中心位置特定の流れでデモします。
#include <Arduino.h>#include <Camera.h>#include <LCD.h>#include <opencv.hpp>#include <DisplayApp.h>using namespace cv;using namespace std;#define USE_DISPLAYAPP#define IMAGE_HW 480#define IMAGE_VW 272#define LOOP_WAITTIME 16 // 16ms for 60 fps of camera spec.Camera camera(IMAGE_HW, IMAGE_VW);LCD lcd(IMAGE_HW, IMAGE_VW);#ifdef USE_DISPLAYAPPDisplayApp display_app;#endif// 1MB of NC_BSS is not used for malloc function.// It's better to secure buffer as a static memory in this NC_BSSuint8_t lcd_buf [2 * IMAGE_HW * IMAGE_VW] __attribute((section("NC_BSS"),aligned(32)));uint8_t bgr_buf [3 * IMAGE_HW * IMAGE_VW] __attribute((section("NC_BSS"),aligned(32)));uint8_t gray_buf[1 * IMAGE_HW * IMAGE_VW] __attribute((section("NC_BSS"),aligned(32)));uint8_t diff_buf[1 * IMAGE_HW * IMAGE_VW] __attribute((section("NC_BSS"),aligned(32)));uint8_t back_buf[1 * IMAGE_HW * IMAGE_VW] __attribute((section("NC_BSS"),aligned(32)));#define NUM_DEMO 5LineTypes linetypes = LINE_AA;string string_text[NUM_DEMO] = {"Gadget","Gray","GaussianBlur","absdiff + threshold","findContours"};uint8_t g_demo = 0;bool g_disp_back = false;void ub0_interrupt() { g_demo++; if(g_demo >= NUM_DEMO){ g_demo = 0; }}void ub1_interrupt() { g_disp_back = !g_disp_back;}void setup() { camera.begin(); lcd.begin(lcd_buf, IMAGE_HW, IMAGE_VW); pinMode(PIN_SW0, INPUT); pinMode(PIN_SW1, INPUT); pinMode(PIN_LED_RED, OUTPUT); attachInterrupt(4, ub0_interrupt, FALLING); attachInterrupt(3, ub1_interrupt, FALLING);}void loop() { static unsigned long last_time = millis(); static unsigned long update_last_time = millis(); static int x = 0, y = 0, ax = 10, ay = 10; while ((millis() - last_time) < LOOP_WAITTIME); unsigned long loop_time = millis() - last_time; last_time = millis(); Scalar red(0, 0, 255), green(0, 255, 0), blue(255, 0, 0); Scalar yellow = red + green; Scalar sky = green + blue; Scalar white = Scalar::all(255); Scalar pink = Scalar(154, 51, 255); Mat img_raw(IMAGE_VW, IMAGE_HW, CV_8UC2, camera.getImageAdr()); Mat img_bgr(IMAGE_VW, IMAGE_HW, CV_8UC3, bgr_buf); Mat img_gray(IMAGE_VW, IMAGE_HW, CV_8UC1, gray_buf); Mat img_diff(IMAGE_VW, IMAGE_HW, CV_8UC1, diff_buf); Mat img_back(IMAGE_VW, IMAGE_HW, CV_8UC1, back_buf); cvtColor(img_raw, img_bgr, COLOR_YUV2BGR_YUYV); //covert YUV to RGB if(millis() - update_last_time > 1000){ cvtColor(img_raw, img_back, COLOR_YUV2GRAY_YUY2); // capturing background image GaussianBlur(img_back, img_back, Size(21, 21), 0); } if(g_demo >= 1){ cvtColor(img_raw, img_gray, COLOR_YUV2GRAY_YUY2); // convert YUV to GRAY } if(g_demo >= 2){ GaussianBlur(img_gray, img_gray, Size(21, 21), 0); } if(g_demo >= 3){ absdiff(img_back, img_gray, img_diff); threshold(img_diff, img_diff, 25, 255, THRESH_BINARY); } if(g_demo >= 4){ vector<vector<Point> > contours; vector<Vec4i> hierarchy; findContours(img_diff, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE); if (contours.size() > 0){ vector<Moments> mu(contours.size()); //get moments for (size_t i = 0; i < contours.size(); i++) { mu[i] = moments(contours[i], false); } vector<Point2f> mc(contours.size()); //get centers for (size_t i = 0; i < contours.size(); i++) { mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); } size_t indexOfBiggestContour = -1; size_t sizeOfBiggestContour = 0; for (size_t i = 0; i < contours.size(); i++){ if (contours[i].size() > sizeOfBiggestContour){ sizeOfBiggestContour = contours[i].size(); indexOfBiggestContour = i; } } drawContours(img_bgr, contours, indexOfBiggestContour, sky, 2, 8, vector<Vec4i>(), 0, Point()); circle(img_bgr, mc[indexOfBiggestContour], 5, red, -1, 8, 0); stringstream ss; ss << (int)mc[indexOfBiggestContour].x << ", " << (int)mc[indexOfBiggestContour].y; putText(img_bgr, ss.str(), mc[indexOfBiggestContour], FONT_HERSHEY_PLAIN, 1, white, 1, linetypes); } } switch(g_demo){ case 0 : break; case 1 : case 2 : cvtColor(img_gray, img_bgr, COLOR_GRAY2BGR); break; case 3 : cvtColor(img_diff, img_bgr, COLOR_GRAY2BGR); break; case 4 : break; default : break; } if(g_disp_back){ cvtColor(img_back, img_bgr, COLOR_GRAY2BGR); } x += ax; y += ay; if (x > (img_bgr.cols - 10) || x < 10) { ax *= -1; } if (y > (img_bgr.rows - 10) || y < 10) { ay *= -1; } line(img_bgr, Point(10, 10), Point(img_bgr.cols - 10, 10), blue, 3, linetypes); //Line line(img_bgr, Point(10, img_bgr.rows - 10), Point(img_bgr.cols - 10, img_bgr.rows - 10), blue, 3, linetypes); //Line rectangle(img_bgr, Point(10, 30), Point(img_bgr.cols - 10, 60), white, FILLED); putText(img_bgr, string_text[g_demo], Point(15, 55), FONT_HERSHEY_SIMPLEX, 1, pink, 2, linetypes); circle(img_bgr, Point(x, y), 10, yellow, FILLED); stringstream ss; ss << x << ", " << y << ", " << (int)(loop_time) << "ms loop"; putText(img_bgr, ss.str(), Point(10, img_bgr.rows - 20), FONT_HERSHEY_SIMPLEX, 1, white, 1, linetypes); lcd.BGR2YUV(img_bgr.data, lcd_buf, IMAGE_HW, IMAGE_VW);#ifdef USE_DISPLAYAPP size_t jpegSize = camera.createJpeg(IMAGE_HW, IMAGE_VW, img_bgr.data, Camera::FORMAT_RGB888); display_app.SendJpeg(camera.getJpegAdr(), jpegSize);#endif}
サンプル2:
シリアルで動体のX,Yを出力する。取得画像はQVGAにしてやや小さめとし、検出用のバッファはキャッシュエリアにしているため、サンプル1よりも5倍速ぐらいで検出します。
#include <Arduino.h>#include <Camera.h>#include <opencv.hpp>using namespace cv;using namespace std;#define IMAGE_HW 320#define IMAGE_VW 240#define LOOP_WAITTIME 16 // 16ms for 60 fps of camera spec.Camera camera(IMAGE_HW, IMAGE_VW);// 1MB of NC_BSS is not used for malloc function.// It's better to secure buffer as a static memory in this NC_BSSuint8_t bgr_buf [3 * IMAGE_HW * IMAGE_VW];uint8_t gray_buf[1 * IMAGE_HW * IMAGE_VW];uint8_t diff_buf[1 * IMAGE_HW * IMAGE_VW];uint8_t back_buf[1 * IMAGE_HW * IMAGE_VW];void setup() { Serial.begin(9600); camera.begin();}void loop() { static unsigned long last_time = millis(); static unsigned long update_last_time = millis(); while ((millis() - last_time) < LOOP_WAITTIME); unsigned long loop_time = millis() - last_time; last_time = millis(); Mat img_raw(IMAGE_VW, IMAGE_HW, CV_8UC2, camera.getImageAdr()); Mat img_bgr(IMAGE_VW, IMAGE_HW, CV_8UC3, bgr_buf); Mat img_gray(IMAGE_VW, IMAGE_HW, CV_8UC1, gray_buf); Mat img_diff(IMAGE_VW, IMAGE_HW, CV_8UC1, diff_buf); Mat img_back(IMAGE_VW, IMAGE_HW, CV_8UC1, back_buf);// cvtColor(img_raw, img_bgr, COLOR_YUV2BGR_YUYV); //covert YUV to RGB if(millis() - update_last_time > 1000){ cvtColor(img_raw, img_back, COLOR_YUV2GRAY_YUY2); // capturing background image GaussianBlur(img_back, img_back, Size(21, 21), 0); } cvtColor(img_raw, img_gray, COLOR_YUV2GRAY_YUY2); // convert YUV to GRAY GaussianBlur(img_gray, img_gray, Size(21, 21), 0); absdiff(img_back, img_gray, img_diff); threshold(img_diff, img_diff, 25, 255, THRESH_BINARY); vector<vector<Point> > contours; vector<Vec4i> hierarchy; findContours(img_diff, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE); Serial.println(loop_time); if (contours.size() > 0){ vector<Moments> mu(contours.size()); //get moments for (size_t i = 0; i < contours.size(); i++) { mu[i] = moments(contours[i], false); } vector<Point2f> mc(contours.size()); //get centers for (size_t i = 0; i < contours.size(); i++) { mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00); } size_t indexOfBiggestContour = -1; size_t sizeOfBiggestContour = 0; for (size_t i = 0; i < contours.size(); i++){ if (contours[i].size() > sizeOfBiggestContour){ sizeOfBiggestContour = contours[i].size(); indexOfBiggestContour = i; } } Serial.print("x:"); Serial.print((int)mc[indexOfBiggestContour].x); Serial.print(",y:"); Serial.println((int)mc[indexOfBiggestContour].y); } else { Serial.println("no moving"); }}