-
Notifications
You must be signed in to change notification settings - Fork 3
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[WIP] - Feature train orb #31
base: dev
Are you sure you want to change the base?
Changes from 9 commits
1ed5e9b
16ef354
7b4c753
4947a36
84f244e
4a98c9d
fa99cb0
6cb26db
c3cb621
63b379c
dc0f0bd
bc246fb
f7d062f
5d75388
8094ba6
220a528
6fe0b12
1b0ff8b
b5afa69
549e18a
a7f311f
4a0cf97
283c22e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,6 +7,8 @@ | |
#include <jsfeat.h> | ||
#include <stdio.h> | ||
|
||
#include <cmath> | ||
#include <memory> | ||
#include <string> | ||
#include <vector> | ||
|
||
|
@@ -85,6 +87,139 @@ emscripten::val load_jpeg(const char* filename) { | |
emscripten::val load_jpeg_data(std::string filename) { | ||
auto out = load_jpeg(filename.c_str()); | ||
return out; | ||
}; | ||
|
||
void train_orb_pattern_internal(const char* filename) { | ||
char* ext; | ||
char buf1[512], buf2[512]; | ||
|
||
AR2JpegImageT* jpegImage; | ||
|
||
auto lev = 0, i = 0; | ||
auto sc = 1.0; | ||
auto max_pattern_size = 512; | ||
auto max_per_level = 300; | ||
auto sc_inc = std::sqrt(2.0); // magic number ;) | ||
auto new_width = 0, new_height = 0; | ||
// var lev_corners, lev_descr; | ||
auto corners_num = 0; | ||
|
||
// if (!filename) return emscripten::val::null(); | ||
ext = arUtilGetFileExtensionFromPath(filename, 1); | ||
if (!ext) { | ||
webarkitLOGe( | ||
"Error: unable to determine extension of file '%s'. Exiting.\n", | ||
filename); | ||
} | ||
if (strcmp(ext, "jpeg") == 0 || strcmp(ext, "jpg") == 0 || | ||
strcmp(ext, "jpe") == 0) { | ||
webarkitLOGi("Waiting for the jpeg..."); | ||
webarkitLOGi("Reading JPEG file..."); | ||
ar2UtilDivideExt(filename, buf1, buf2); | ||
jpegImage = ar2ReadJpegImage(buf1, buf2); | ||
if (jpegImage == NULL) { | ||
webarkitLOGe( | ||
"Error: unable to read JPEG image from file '%s'. Exiting.\n", | ||
filename); | ||
} | ||
webarkitLOGi(" Done."); | ||
|
||
if (jpegImage->nc != 1 && jpegImage->nc != 3) { | ||
ARLOGe( | ||
"Error: Input JPEG image is in neither RGB nor grayscale format. " | ||
"%d bytes/pixel %sformat is unsupported. Exiting.\n", | ||
jpegImage->nc, (jpegImage->nc == 4 ? "(possibly CMYK) " : "")); | ||
} | ||
webarkitLOGi("JPEG image number of channels: '%d'", jpegImage->nc); | ||
webarkitLOGi("JPEG image width is: '%d'", jpegImage->xsize); | ||
webarkitLOGi("JPEG image height is: '%d'", jpegImage->ysize); | ||
webarkitLOGi("JPEG image, dpi is: '%d'", jpegImage->dpi); | ||
|
||
if (jpegImage->dpi == 0.0f) { | ||
webarkitLOGw( | ||
"JPEG image '%s' does not contain embedded resolution data, and no " | ||
"resolution specified on command-line.", | ||
filename); | ||
} | ||
|
||
} else if (strcmp(ext, "png") == 0) { | ||
webarkitLOGe( | ||
"Error: file has extension '%s', which is not supported for " | ||
"reading. Exiting.\n", | ||
ext); | ||
free(ext); | ||
} | ||
webarkitLOGi("Image done!"); | ||
|
||
JSLOGi("Starting detection routine...\n"); | ||
|
||
Orb orb; | ||
Imgproc imgproc; | ||
detectors::Detectors detectors; | ||
std::unique_ptr<Matrix_t> lev0_img = std::make_unique<Matrix_t>(jpegImage->xsize, jpegImage->ysize, ComboTypes::U8C1_t); | ||
std::unique_ptr<Matrix_t> lev_img = std::make_unique<Matrix_t>(jpegImage->xsize, jpegImage->ysize, ComboTypes::U8C1_t); | ||
Array<std::unique_ptr<Matrix_t>> pattern_corners; | ||
std::cout << "after orb" << std::endl; | ||
|
||
auto sc0 = std::min(max_pattern_size / jpegImage->ysize, max_pattern_size / jpegImage->xsize); | ||
new_width = (jpegImage->ysize * sc0) | 0; | ||
new_height = (jpegImage->xsize * sc0) | 0; | ||
auto num_train_levels = 4; | ||
|
||
auto i_u8_size = jpegImage->xsize * jpegImage->ysize * jpegImage->nc; | ||
Array<u_char> i_u8(jpegImage->image, jpegImage->image + i_u8_size); | ||
std::unique_ptr<Matrix_t> img_u8 = std::make_unique<Matrix_t>(jpegImage->xsize, jpegImage->ysize, ComboTypes::U8C4_t, i_u8); | ||
|
||
JSLOGi("Resampling image..."); | ||
|
||
//imgproc.resample(img_u8.get(), lev0_img.get(), new_width, new_height); | ||
|
||
JSLOGi("Image resampled, starting pyramid now..."); | ||
// prepare preview | ||
std::unique_ptr<Matrix_t> pattern_preview = std::make_unique<Matrix_t>(jpegImage->xsize >> 1, jpegImage->ysize >> 1, ComboTypes::U8C1_t); | ||
imgproc.pyrdown_internal(lev0_img.get(), pattern_preview.get()); | ||
|
||
Array<KeyPoints*> lev_corners(4); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i think we should provide a array of pointers and pre init them. |
||
Array<std::unique_ptr<Matrix_t>> pattern_descriptors; | ||
|
||
for (lev = 0; lev < num_train_levels; ++lev) { | ||
// what we should do with this code? | ||
// pattern_corners[lev] = []; | ||
// lev_corners = pattern_corners[lev]; | ||
|
||
// preallocate corners array | ||
i = (new_width * new_height) >> lev; | ||
while (--i >= 0) { | ||
lev_corners[lev]->set_size(i); | ||
lev_corners[lev]->allocate(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we need to set the size and allocate the entire set. |
||
} | ||
std::cout << "Num. of level: " << lev << std::endl; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this print 4 levels in the console: |
||
pattern_descriptors.push_back(std::unique_ptr<Matrix_t>(new Matrix_t(32, max_per_level, ComboTypes::U8C1_t))); | ||
} | ||
|
||
//std::cout << "Size of first lev_corners: " << lev_corners[0]->kpoints.size() << std::endl; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is not printed... |
||
|
||
imgproc.gaussian_blur_internal(lev0_img.get(), lev_img.get(), 5, 2); // this is more robust | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is ok , it is printed... |
||
JSLOGi("After Gaussian blur"); | ||
|
||
corners_num = detectors.detect_keypoints(lev_img.get(), lev_corners[0], max_per_level); | ||
|
||
// orb.describe(lev_img.get(), lev_corners[0], corners_num, lev_descr.get()); | ||
// This probablly will work in a near future | ||
// orb.describe(lev_img.get(), lev_corners[0], corners_num, &pattern_descriptors[0]); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. orb.describe can not be yet used here because it accept in the first parameter a |
||
|
||
// console.log("train " + lev_img.cols + "x" + lev_img.rows + " points: " + corners_num); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ...continuning from below, These two printings instead do nothing. I will open an issue as reminder. |
||
std::cout << "Corners num: " << (int)corners_num << std::endl; | ||
//JSLOGi("Corners num: %i", corners_num); | ||
//JSLOGi("train %i x %i points: %i\n", lev_img.get()->get_cols(), lev_img.get()->get_rows(), corners_num); | ||
//std::cout << "train " << lev_img.get()->get_cols() << " x " << lev_img.get()->get_rows() << " points: " << corners_num << std::endl; | ||
free(ext); | ||
free(jpegImage); | ||
}; | ||
|
||
void train_orb_pattern(std::string filename) { | ||
train_orb_pattern_internal(filename.c_str()); | ||
} | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
<html> | ||
|
||
<body> | ||
|
||
<script type="module"> | ||
import jsfeatCpp from "./../build/jsfeatES6cpp_debug.js" | ||
import { trainOrbPattern } from "./js/loader.js" | ||
|
||
const jsfeat = await jsfeatCpp(); | ||
|
||
trainOrbPattern("pinball.jpg", () => {}, ()=>{}); | ||
|
||
</script> | ||
|
||
</body> | ||
|
||
</html> |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
// before all: | ||
git submodule update --init | ||
//Assumend that you have emscripten engine installed under docker, you may run: | ||
// for the first time | ||
docker exec emscripten ./build.sh emscripten-all | ||
// and then when WebarkitLib is compiled: | ||
docker exec emscripten ./build.sh emscripten |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
#ifndef DETECTORS_H | ||
#define DETECTORS_H | ||
|
||
#include <keypoint_t/keypoint_t.h> | ||
#include <keypoints/keypoints.h> | ||
#include <keypoints_filter/keypoints_filter.h> | ||
#include <math/math.h> | ||
#include <matrix_t/matrix_t.h> | ||
#include <types/types.h> | ||
#include <yape06/yape06.h> | ||
|
||
namespace jsfeat { | ||
|
||
namespace detectors { | ||
|
||
class Detectors : public Yape06, public Math, public KeyPointsFilter { | ||
public: | ||
Detectors() {} | ||
~Detectors() {} | ||
|
||
int detect_keypoints(Matrix_t* img, KeyPoints* corners, int max_allowed) { | ||
// detect features | ||
auto kpc = detect_internal(img, corners, 17); | ||
auto count = kpc.count; | ||
std::cout << "here" << std::endl; | ||
//std::cout << count << std::endl; | ||
// sort by score and reduce the count if needed | ||
if (count > max_allowed) { | ||
// qsort_internal<KeyPoint_t, bool>(corners.kpoints, 0, count - 1, [](KeyPoint_t i, KeyPoint_t j){return (i.score < j.score);}); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure of this, maybe it's better to use another small different approach. I'm looking to the OpenCV code in the Orb implementation and there is another possibility. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. retainBest is taken from OpenCV, but i need to figure out if this is correct. |
||
retainBest(corners->kpoints, count); | ||
count = max_allowed; | ||
} | ||
|
||
// calculate dominant orientation for each keypoint | ||
for (auto i = 0; i < count; ++i) { | ||
corners->kpoints[i].angle = ic_angle(img, corners->kpoints[i].x, corners->kpoints[i].y); | ||
} | ||
|
||
//std::cout << count << std::endl; | ||
|
||
return count; | ||
} | ||
|
||
private: | ||
// function(a, b) { return (b.score < a.score); } | ||
// bool myfunction(KeyPoint_t i, KeyPoint_t j) { return (i.score < j.score); } | ||
// central difference using image moments to find dominant orientation | ||
// var u_max = new Int32Array([15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3, 0]); | ||
float ic_angle(Matrix_t* img, int px, int py) { | ||
Array<u_int> u_max{15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3, 0}; | ||
auto half_k = 15; // half patch size | ||
auto m_01 = 0, m_10 = 0; | ||
auto src = img->u8; | ||
auto step = img->get_cols(); | ||
auto u = 0, v = 0, center_off = (py * step + px) | 0; | ||
auto v_sum = 0, d = 0, val_plus = 0, val_minus = 0; | ||
|
||
// Treat the center line differently, v=0 | ||
for (u = -half_k; u <= half_k; ++u) | ||
m_10 += u * src[center_off + u]; | ||
|
||
// Go line by line in the circular patch | ||
for (v = 1; v <= half_k; ++v) { | ||
// Proceed over the two lines | ||
v_sum = 0; | ||
d = u_max[v]; | ||
for (u = -d; u <= d; ++u) { | ||
val_plus = src[center_off + u + v * step]; | ||
val_minus = src[center_off + u - v * step]; | ||
v_sum += (val_plus - val_minus); | ||
m_10 += u * (val_plus + val_minus); | ||
} | ||
m_01 += v * v_sum; | ||
} | ||
|
||
return std::atan2(m_01, m_10); | ||
} | ||
}; | ||
|
||
} // namespace detectors | ||
|
||
} // namespace jsfeat | ||
|
||
#endif |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -35,6 +35,12 @@ class KeyPoints { | |
this->size = kp.size; | ||
this->kpoints = kp.kpoints; | ||
} | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added tha allocate function because if you initialize a KeyPoints with the default constructor it will not init the |
||
auto allocate() { | ||
KeyPoint_t kpt(0, 0, 0, 0, -1); | ||
kpoints.assign(this->size, kpt); | ||
} | ||
|
||
auto get_size() const {return size; }; | ||
|
||
auto set_size(int size) { this->size = size; }; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Resampling is not needed in our case, because we provide our image with the right size. The code was taken by the jsfeat sample_orb example and in that case we simply resampled the image taken by the canvas(webcam) to a smaller size. Anyway the resample function has some issues, infact the log console "Image resampled, starting pyrmaid now..." can not be printed with this function enabled. (just comment out and recompile to test)