diff --git a/.gitignore b/.gitignore
index 85b7772..ce28416 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,7 @@ app/bin/*
build/*
*.pbxuser
*.mode1v3
+*.perspectivev3
# old skool
.svn
diff --git a/README.md b/README.md
index d8b09ef..143fc0f 100644
--- a/README.md
+++ b/README.md
@@ -22,6 +22,35 @@ We want to recognize:
- Individual fingers
- Finger-based hand gestures (e.g. peace sign, etc)
+Getting Started
+---------------
+
+Right now, you'll want to be on Chrome, on OSX, then:
+
+- Plug in your Kinect to your computer via USB
+- Download this code from http://github.com/doug/depthjs
+- Install the chrome extension
+ - Bring up the extensions management page by clicking the wrench icon and choosing *Tools* > *Extensions*.
+ - If *Developer mode* has a + by it, click the + to add developer information to the page. The + changes to a -, and more buttons and information appear.
+ - Click the *Load unpacked extension* button. A file dialog appears.
+ - In the file dialog, navigate to your *depthjs/chrome-extension-mac* and click *OK*.
+- Open a new web page (it only affects new pages)
+- Have fun!
+
+What the camera sees...
+
+- When the extension starts up, it opens 3 windows, blob, first, and second
+- first & second show the limits of the depth of field the camera is paying attention to, you should try to make it so that your hand is in both first & second and not much else
+- blob shows you everything the camera can see as well as a blue circle around where it thinks your hand is.
+
+After opening a new page in Chrome,
+
+- Pull your hand back so that both first & second windows are blank and there is no circle in the blob window
+- Then bring your hand forward until it gets an outline in the blob window, and pause for a second until the blue circle appears
+- You should then see a blue circle on the web site that will track your movements
+- To click, just close your hand into a fist!
+
+
Components
----------
DepthJS is very modular. The Kinect driver and computer vision are written on top of Open Frameworks and OpenCV in C++. This component can output the raw RGB image, the raw depth map (filtered for the hand), as well as the high-level events that the computer vision recognizes. The three outputs are pumped out on three separate 0MQ TCP sockets. Next, a Torando web server (written in Python) takes the 0MQ data and wraps it into a WebSocket, which is what enables the web browser extension to receive the data. Finally a pure javascript-based extension connects to the WebSocket server to receive the events. Event handlers may be placed globally, in content scripts injected into each web page, or pushed via the content script to local DOM elements written by 3rd parties.
diff --git a/chrome-extension-mac/content_script/event_handlers.js b/chrome-extension-mac/content_script/event_handlers.js
index 1c4875d..b63133c 100644
--- a/chrome-extension-mac/content_script/event_handlers.js
+++ b/chrome-extension-mac/content_script/event_handlers.js
@@ -30,16 +30,27 @@ console.log("DepthJS: Loading event handlers");
DepthJS.state = null;
DepthJS.lastRegisterTime = null;
+DepthJS.trigger = function(element, name) {
+ var event = document.createEvent("Events")
+ event.initEvent(name, true, true); //true for can bubble, true for cancelable
+ element.dispatchEvent(event);
+}
+
+DepthJS.trigger(window, "depthjs-loading");
+
DepthJS.eventHandlers.onSwipeLeft = function() {
+ DepthJS.trigger(window, "swipeLeft");
// history.back();
};
DepthJS.eventHandlers.onSwipeRight = function() {
+ DepthJS.trigger(window, "swipeRight");
// We interpret as "forward".
// history.forward();
};
DepthJS.eventHandlers.onSwipeDown = function() {
+ DepthJS.trigger(window, "swipeDown");
// We interpret as "scroll down 75% of window".
// var scrollAmount = Math.floor($(window).height() * 0.75);
// $("html, body").animate({
@@ -48,6 +59,7 @@ DepthJS.eventHandlers.onSwipeDown = function() {
};
DepthJS.eventHandlers.onSwipeUp = function() {
+ DepthJS.trigger(window, "swipeUp");
// We interpret as "scroll up 75% of window".
// var scrollAmount = Math.floor($(window).height() * 0.75);
// $("html, body").animate({
@@ -56,12 +68,14 @@ DepthJS.eventHandlers.onSwipeUp = function() {
};
DepthJS.eventHandlers.onHandPointer = function(){
+ DepthJS.trigger(window, "handPointer");
if (DepthJS.verbose) console.log("DepthJS. Hand Pointer");
DepthJS.eventHandlers.onUnregister();
DepthJS.state = "selectorBox";
};
DepthJS.eventHandlers.onHandOpen = function(){
+ DepthJS.trigger(window, "handOpen");
if (DepthJS.verbose) console.log("DepthJS. Hand Open");
DepthJS.eventHandlers.onUnregister();
DepthJS.state = "panner";
@@ -83,11 +97,12 @@ DepthJS.eventHandlers.onSelectorBoxMode = function() {
// POINTER -----------------------------------------------------------------------------------------
DepthJS.eventHandlers.onRegister = function(data) {
if (DepthJS.verbose) console.log("DepthJS: User registered their hand");
- $(window).trigger("touchstart");
+ DepthJS.trigger(window, "touchStart");
if (data.mode == "twohands") {
console.log("Ignoring in two hands for now");
return;
}
+ data.mode = "theforce";
if (data.mode == "theforce") {
DepthJS.registerMode = "selectorBox";
} else if (data.mode == "twohands") {
@@ -109,6 +124,7 @@ DepthJS.eventHandlers.onUnregister = function() {
DepthJS.selectorBox.hide();
DepthJS.selectorBoxPopup.hide();
DepthJS.depthose.hide();
+ DepthJS.trigger(window, "touchStop");
};
DepthJS.eventHandlers.onHandClick = function() {
@@ -167,6 +183,8 @@ DepthJS.eventHandlers.onMove = function(data) {
} else if (DepthJS.state == "selectorBoxPopup") {
DepthJS.selectorBoxPopup.move(accumulatedX, accumulatedY, accumulatedZ);
} else {
+ //console.debug("setting the force")
+ DepthJS.eventHandlers.onRegister({mode:"theforce"});
if (DepthJS.verbose) console.log("Ignoring move in state " + DepthJS.state);
}
};
diff --git a/chrome-extension-mac/content_script/init.js b/chrome-extension-mac/content_script/init.js
index b7c3482..8772047 100644
--- a/chrome-extension-mac/content_script/init.js
+++ b/chrome-extension-mac/content_script/init.js
@@ -9,15 +9,15 @@ $(function() {
// Let us know its running
console.log("Finished initing, sticking in logo");
- $("
").css({
- position: "fixed",
- width: "32px",
- height: "32px",
- bottom: "20px",
- left: "20px"
- }).appendTo("body");
- console.log($("img"));
-
+ $("
").attr("src", chrome.extension.getURL("logo_128x128.png"))
+ .css({
+ position: "fixed",
+ width: "32px",
+ height: "32px",
+ bottom: "20px",
+ left: "20px"
+ })
+ .appendTo("body");
var lastTime = null;
function reloadChecker() {
diff --git a/chrome-extension-mac/content_script/root.js b/chrome-extension-mac/content_script/root.js
index c456577..e69512a 100644
--- a/chrome-extension-mac/content_script/root.js
+++ b/chrome-extension-mac/content_script/root.js
@@ -54,7 +54,7 @@ function print() {
});
var alphabeticalKeys = _.keys(counts).sort();
- console.log("------" + (new Date() + ""));
+ // console.log("------" + (new Date() + ""));
_.each(alphabeticalKeys, function(type) {
console.log([" " + counts[type] + " " + type + "; last = ", lastByType[type]]);
});
diff --git a/chrome-extension-mac/content_script/selector_box.js b/chrome-extension-mac/content_script/selector_box.js
index 9f999b2..7cd7e37 100644
--- a/chrome-extension-mac/content_script/selector_box.js
+++ b/chrome-extension-mac/content_script/selector_box.js
@@ -46,36 +46,48 @@ DepthJS.selectorBox.move = function(x, y) {
if (x != $box.css("left") || y != $box.css("top")) {
$box.css({left: x, top: y});
}
+ DepthJS.selectorBox.handleHover()
};
-DepthJS.selectorBox.activate = function() {
- if (DepthJS.verbose) console.log("DepthJS: Activating underneath selectorBox");
- // Lame code for now...
-
- var $intersectingLinks = $("a").filter(function() {
- var $a = $(this);
- var ax = $a.offset().left + $(window).scrollLeft();
- var aw = $a.width();
- var ay = $a.offset().top + $(window).scrollTop();
- var ah = $a.height();
-
- var $box = DepthJS.selectorBox.$box;
- var bx = $box.position().left;
- var by = $box.position().top;
- var bw = $box.width();
- var bh = $box.height();
-
- if (by > ay + ah || // box-top is lower than link-bottom
- by + bh < ay || // box-bottom is higher than link-top
- bx > ax + aw || // box-left is right of link right
- bx + bw < aw) { // box-right is left of link left
- return false;
+DepthJS.selectorBox.elementAtCursor = function() {
+ var $box = DepthJS.selectorBox.$box;
+ var x = $box.position().left + $box.width() / 2;
+ var y = $box.position().top + $box.height() / 2;
+
+ $box.hide();
+ var element = document.elementFromPoint(x, y);
+ $box.show();
+ return $(element).closestMatching('a,.hoverable');
+}
+
+$.fn.closestMatching = function(selector) {
+ if ($(this).is(selector)) return $(this)[0];
+
+ var parents = $(this).parents();
+ for (var i = 0; i < parents.length; i++) {
+ if ($(parents[i]).is(selector)) return $(parents[i])[0];
+ }
+ return undefined;
+}
+
+DepthJS.selectorBox.handleHover = function() {
+ var lastElement = $('.depthjs-hover')[0];
+ var element = DepthJS.selectorBox.elementAtCursor();
+
+ if (element == lastElement) { // same element
+ // do nothing
+ } else {
+ if (lastElement) {
+ DepthJS.trigger($(lastElement).removeClass('depthjs-hover')[0], 'hoverOut');
}
- return true;
- });
+ if (element) {
+ DepthJS.trigger($(element).addClass("depthjs-hover")[0], "hoverOver");
+ }
+ }
+};
- if (DepthJS.verbose) console.log("Got " + $intersectingLinks.length + " links");
- if (DepthJS.verbose) console.log($intersectingLinks);
+DepthJS.selectorBox.activate = function() {
+ var $intersectingLinks = DepthJS.selectorBox.elementAtCursor();
if ($intersectingLinks.length > 0) {
DepthJS.selectorBoxPopup.$links = $intersectingLinks;
DepthJS.selectorBoxPopup.activate();
diff --git a/new_cv/NewCV/FreenectDevice.h b/new_cv/NewCV/FreenectDevice.h
new file mode 100644
index 0000000..8c889f8
--- /dev/null
+++ b/new_cv/NewCV/FreenectDevice.h
@@ -0,0 +1,104 @@
+/*
+ * FreenectDevice.h
+ * NewCV
+ *
+ * Created by Roy Shilkrot on 3/3/11.
+ * Copyright 2011 MIT. All rights reserved.
+ *
+ */
+#include "libfreenect.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+
+#include
+using namespace std;
+
+class Mutex {
+public:
+ Mutex() {
+ pthread_mutex_init( &m_mutex, NULL );
+ }
+ void lock() {
+ pthread_mutex_lock( &m_mutex );
+ }
+ void unlock() {
+ pthread_mutex_unlock( &m_mutex );
+ }
+private:
+ pthread_mutex_t m_mutex;
+};
+
+class MyFreenectDevice : public Freenect::FreenectDevice {
+public:
+ MyFreenectDevice(freenect_context *_ctx, int _index)
+ : Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(FREENECT_DEPTH_11BIT_SIZE),m_buffer_rgb(FREENECT_VIDEO_RGB_SIZE), m_gamma(2048), m_new_rgb_frame(false), m_new_depth_frame(false),
+ depthMat(Size(640,480),CV_16UC1), rgbMat(Size(640,480),CV_8UC3,Scalar(0)), ownMat(Size(640,480),CV_8UC3,Scalar(0))
+ {
+ for( unsigned int i = 0 ; i < 2048 ; i++) {
+ float v = i/2048.0;
+ v = std::pow(v, 3)* 6;
+ m_gamma[i] = v*6*256;
+ }
+ }
+ // Do not call directly even in child
+ void VideoCallback(void* _rgb, uint32_t timestamp) {
+// std::cout << "RGB callback" << std::endl;
+ m_rgb_mutex.lock();
+ uint8_t* rgb = static_cast(_rgb);
+ rgbMat.data = rgb;
+ m_new_rgb_frame = true;
+ m_rgb_mutex.unlock();
+ };
+ // Do not call directly even in child
+ void DepthCallback(void* _depth, uint32_t timestamp) {
+// std::cout << "Depth callback" << std::endl;
+ m_depth_mutex.lock();
+ uint16_t* depth = static_cast(_depth);
+ depthMat.data = (uchar*) depth;
+ m_new_depth_frame = true;
+ m_depth_mutex.unlock();
+ }
+
+ bool getVideo(Mat& output) {
+ m_rgb_mutex.lock();
+ if(m_new_rgb_frame) {
+ cv::cvtColor(rgbMat, output, CV_RGB2BGR);
+ m_new_rgb_frame = false;
+ m_rgb_mutex.unlock();
+ return true;
+ } else {
+ m_rgb_mutex.unlock();
+ return false;
+ }
+ }
+
+ bool getDepth(Mat& output) {
+ m_depth_mutex.lock();
+ if(m_new_depth_frame) {
+ depthMat.copyTo(output);
+ m_new_depth_frame = false;
+ m_depth_mutex.unlock();
+ return true;
+ } else {
+ m_depth_mutex.unlock();
+ return false;
+ }
+ }
+
+private:
+ std::vector m_buffer_depth;
+ std::vector m_buffer_rgb;
+ std::vector m_gamma;
+ Mat depthMat;
+ Mat rgbMat;
+ Mat ownMat;
+ Mutex m_rgb_mutex;
+ Mutex m_depth_mutex;
+ bool m_new_rgb_frame;
+ bool m_new_depth_frame;
+};
+
diff --git a/new_cv/NewCV/main.cpp b/new_cv/NewCV/main.cpp
new file mode 100644
index 0000000..46c876f
--- /dev/null
+++ b/new_cv/NewCV/main.cpp
@@ -0,0 +1,457 @@
+#include "FreenectDevice.h"
+
+Scalar refineSegments(const Mat& img,
+ Mat& mask,
+ Mat& dst,
+ vector& contour,
+ vector& second_contour,
+ Point2i& previous)
+{
+ // int niters = 3;
+
+ vector > contours;
+ vector hierarchy;
+
+ Mat temp;
+
+ blur(mask, temp, Size(11,11));
+ temp = temp > 85.0;
+
+ findContours( temp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE );
+
+ if(dst.data==NULL)
+ dst = Mat::zeros(img.size(), CV_8UC1);
+ else
+ dst.setTo(Scalar(0));
+
+ if( contours.size() == 0 )
+ return Scalar(-1,-1);
+
+ // iterate through all the top-level contours,
+ // draw each connected component with its own random color
+ int idx = 0, largestComp = -1, secondlargest = -1;
+ double maxWArea = 0, maxJArea = 0;
+ vector justarea(contours.size());
+ vector weightedarea(contours.size());
+
+ // for( ; idx >= 0; idx = hierarchy[idx][0] )
+ for (; idx& c = contours[idx];
+ Scalar _mean = mean(Mat(contours[idx]));
+ justarea[idx] = fabs(contourArea(Mat(c)));
+ weightedarea[idx] = fabs(contourArea(Mat(c))) /
+ ((previous.x >- 1) ? (1.0 + norm(Point(_mean[0],_mean[1])-previous)) : 1.0); //consider distance from last blob
+ }
+ for (idx = 0; idx maxWArea )
+ {
+ maxWArea = weightedarea[idx];
+ largestComp = idx;
+ }
+ }
+ for (idx = 0; idx < contours.size(); idx++) {
+ if ( justarea[idx] > maxJArea && idx != largestComp ) {
+ maxJArea = justarea[idx];
+ secondlargest = idx;
+ }
+ }
+
+ Scalar color( 255 );
+ // cout << "largest cc " << largestComp << endl;
+ // drawContours( dst, contours, largestComp, color, CV_FILLED); //, 8, hierarchy );
+ // for (idx=0; idx= 0) {
+ //find top-left values
+ int maxx = -INT_MAX,miny = INT_MAX;
+ int num = contours[largestComp].size();
+ for (int i=0; i maxx) maxx = contours[largestComp][i].x;
+ if(contours[largestComp][i].y < miny) miny = contours[largestComp][i].y;
+ }
+
+ //crop contour to 150x150 "window"
+ vector newblob;
+ int maxxp150 = MAX(maxx-200,0),minyp150 = MIN(miny+170,480);
+
+ for (int i=0; i maxxp150 && _p.y < minyp150) newblob.push_back(_p);
+ }
+
+ Point* pts = &(newblob[0]);
+ num = newblob.size();
+ fillPoly(dst, (const Point**)(&pts), &num, 1, color);
+
+ Scalar b = mean(Mat(newblob));
+ b[2] = justarea[largestComp];
+
+ contour.clear();
+ contour = newblob;
+
+ second_contour.clear();
+ if(secondlargest >= 0) {
+ second_contour = contours[secondlargest];
+ b[3] = maxJArea;
+ }
+
+ previous.x = b[0]; previous.y = b[1];
+ return b;
+ } else
+ return Scalar(-1,-1);
+
+}
+
+#define LABEL_GARBAGE 0
+#define LABEL_OPEN 1
+#define LABEL_FIST 2
+#define LABEL_THUMB 3
+
+
+int main(int argc, char **argv) {
+ bool die(false);
+ string filename("snapshot");
+ string suffix(".png");
+ int i_snap(0),iter(0);
+
+ Mat depthMat(Size(640,480),CV_16UC1);
+ Mat depthf (Size(640,480),CV_8UC1);
+ Mat rgbMat(Size(640,480),CV_8UC3,Scalar(0));
+ Mat ownMat(Size(640,480),CV_8UC3,Scalar(0));
+
+ Freenect::Freenect freenect;
+ MyFreenectDevice& device = freenect.createDevice(0);
+
+ bool registered = false;
+ Mat blobMaskOutput = Mat::zeros(Size(640,480),CV_8UC1),outC;
+ Point midBlob;
+
+ int startX = 200, sizeX = 180, num_x_reps = 20, num_y_reps = 50;
+ double height_over_num_y_reps = 480/num_y_reps,
+ width_over_num_x_reps = sizeX/num_x_reps;
+
+
+ vector _d(num_x_reps * num_y_reps); //the descriptor
+ Mat descriptorMat(_d);
+
+// CvNormalBayesClassifier classifier; //doesnt work
+ CvKNearest classifier;
+// CvSVM classifier; //doesnt work
+// CvBoost classifier; //only good for 2 classes
+// CvDTree classifier;
+
+
+ vector > training_data;
+ vector label_data;
+ PCA pca;
+ Mat labelMat, dataMat;
+ vector label_counts(4);
+
+ bool trained = false, loaded = false;
+
+ device.startVideo();
+ device.startDepth();
+ while (!die) {
+ device.getVideo(rgbMat);
+ device.getDepth(depthMat);
+// cv::imshow("rgb", rgbMat);
+ depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
+ cv::imshow("depth",depthf);
+
+ //interpolation & inpainting
+ {
+ Mat _tmp,_tmp1; // = (depthMat - 400.0); //minimum observed value is ~440. so shift a bit
+ Mat(depthMat - 400.0).convertTo(_tmp1,CV_64FC1);
+ _tmp.setTo(Scalar(2048), depthMat > 750.0); //cut off at 600 to create a "box" where the user interacts
+// _tmp.convertTo(depthf, CV_8UC1, 255.0/1648.0); //values are 0-2048 (11bit), account for -400 = 1648
+
+ //quadratic interpolation
+// cv::pow(_tmp,2.0,_tmp1);
+// _tmp1 = _tmp1 * 4.0;
+
+// try {
+// cv:log(_tmp,_tmp1);
+// }
+// catch (cv::Exception e) {
+// cerr << e.what() << endl;
+// exit(0);
+// }
+
+ Point minLoc; double minval,maxval;
+ minMaxLoc(_tmp1, &minval, &maxval, NULL, NULL);
+ _tmp1.convertTo(depthf, CV_8UC1, 255.0/maxval);
+
+ Mat small_depthf; resize(depthf,small_depthf,Size(),0.2,0.2);
+ cv::inpaint(small_depthf,(small_depthf == 255),_tmp1,5.0,INPAINT_TELEA);
+
+ resize(_tmp1, _tmp, depthf.size());
+ _tmp.copyTo(depthf, (depthf == 255));
+ }
+
+ cvtColor(depthf, outC, CV_GRAY2BGR);
+
+ Mat blobMaskInput = depthf < 120; //anything not white is "real" depth, TODO: inpainting invalid data
+ vector ctr,ctr2;
+
+ //closest point to the camera
+ Point minLoc; double minval,maxval;
+ minMaxLoc(depthf, &minval, &maxval, &minLoc, NULL, blobMaskInput);
+ circle(outC, minLoc, 5, Scalar(0,255,0), 3);
+
+ blobMaskInput = depthf < (minval + 18);
+
+ Scalar blb = refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob); //find contours in the foreground, choose biggest
+// if (blobMaskOutput.data != NULL) {
+// imshow("first", blobMaskOutput);
+// }
+ /////// blb :
+ //blb[0] = x, blb[1] = y, blb[2] = 1st blob size, blb[3] = 2nd blob size.
+
+
+
+ if(blb[0]>=0 && blb[2] > 500) { //1st blob detected, and is big enough
+ //cvtColor(depthf, outC, CV_GRAY2BGR);
+
+ Scalar mn,stdv;
+ meanStdDev(depthf,mn,stdv,blobMaskInput);
+
+ //cout << "min: " << minval << ", max: " << maxval << ", mean: " << mn[0] << endl;
+
+ //now refining blob by looking at the mean depth value it has...
+ blobMaskInput = depthf < (mn[0] + stdv[0]);
+
+ blb = refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob);
+
+ imshow("blob", blobMaskOutput);
+
+ if(blb[0] >= 0 && blb[2] > 300) {
+ //draw contour
+ Scalar color(0,0,255);
+ for (int idx=0; idx 0) { //second blob detected
+ Scalar color2(255,0,255);
+ for (int idx=0; idx, <#const int *channels#>, <#const Mat mask#>, <#MatND hist#>, <#int dims#>, <#const int *histSize#>, <#const float **ranges#>, <#bool uniform#>, <#bool accumulate#>)
+ */
+
+// Mat _tmp(logPolar.size(),CV_8UC1);
+// cvLogPolar(&((IplImage)logPolar), &((IplImage)_tmp),Point2f(blb[0],blb[1]), 80.0, CV_WARP_INVERSE_MAP);
+// imshow("descriptor", _tmp);
+// imshow("logpolar", logPolar);
+ }
+ }
+
+ if(trained) {
+ Mat results(1,1,CV_32FC1);
+ Mat samples; Mat(Mat(_d).t()).convertTo(samples,CV_32FC1);
+
+ Mat samplesAfterPCA = samples; //pca.project(samples);
+
+ classifier.find_nearest(&((CvMat)samplesAfterPCA), 1, &((CvMat)results));
+// ((float*)results.data)[0] = classifier.predict(&((CvMat)samples))->value;
+
+ Mat lc(label_counts); lc *= 0.9;
+
+// label_counts[(int)((float*)results.data)[0]] *= 0.9;
+ label_counts[(int)((float*)results.data)[0]] += 0.1;
+ Point maxLoc;
+ minMaxLoc(lc, NULL, NULL, NULL, &maxLoc);
+ int res = maxLoc.y;
+
+ stringstream ss; ss << "prediction: ";
+ if (res == LABEL_OPEN) {
+ ss << "Open hand";
+ }
+ if (res == LABEL_FIST) {
+ ss << "Fist";
+ }
+ if (res == LABEL_THUMB) {
+ ss << "Thumb";
+ }
+ if (res == LABEL_GARBAGE) {
+ ss << "Garbage";
+ }
+ putText(outC, ss.str(), Point(20,50), CV_FONT_HERSHEY_PLAIN, 3.0, Scalar(0,0,255), 2);
+ }
+
+ stringstream ss; ss << "samples: " << training_data.size();
+ putText(outC, ss.str(), Point(30,outC.rows - 30), CV_FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 1);
+
+ imshow("blobs", outC);
+
+ char k = cvWaitKey(5);
+ if( k == 27 ){
+ break;
+ }
+ if( k == 8 ) {
+ std::ostringstream file;
+ file << filename << i_snap << suffix;
+ cv::imwrite(file.str(),rgbMat);
+ i_snap++;
+ }
+ if (k == 'g') {
+ //put into training as 'garbage'
+ training_data.push_back(_d);
+ label_data.push_back(LABEL_GARBAGE);
+ cout << "learn grabage" << endl;
+ }
+ if(k == 'o') {
+ //put into training as 'open'
+ training_data.push_back(_d);
+ label_data.push_back(LABEL_OPEN);
+ cout << "learn open" << endl;
+ }
+ if(k == 'f') {
+ //put into training as 'fist'
+ training_data.push_back(_d);
+ label_data.push_back(LABEL_FIST);
+ cout << "learn fist" << endl;
+ }
+ if(k == 'h') {
+ //put into training as 'thumb'
+ training_data.push_back(_d);
+ label_data.push_back(LABEL_THUMB);
+ cout << "learn thumb" << endl;
+ }
+ if (k=='t') {
+ //train model
+ cout << "train model" << endl;
+ if(loaded != true) {
+ dataMat = Mat(training_data.size(),_d.size(),CV_32FC1); //descriptors as matrix rows
+ for (uint i=0; i> dataMat;
+ fs["labels"] >> labelMat;
+ fs["startX"] >> startX;
+ fs["sizeX"] >> sizeX;
+ fs["num_x_reps"] >> num_x_reps;
+ fs["num_y_reps"] >> num_y_reps;
+ height_over_num_y_reps = 480/num_y_reps;
+ width_over_num_x_reps = sizeX/num_x_reps;
+
+ loaded = true;
+ fs.release();
+ } else {
+ cerr << "can't open saved data" << endl;
+ }
+ }
+ }
+
+ device.stopVideo();
+ device.stopDepth();
+ return 0;
+}
diff --git a/safari-extension-mac/DepthJS.safariextension/content_script/root.js b/safari-extension-mac/DepthJS.safariextension/content_script/root.js
index 2caa7d7..cdc8aae 100644
--- a/safari-extension-mac/DepthJS.safariextension/content_script/root.js
+++ b/safari-extension-mac/DepthJS.safariextension/content_script/root.js
@@ -32,6 +32,7 @@ var DepthJS = {
panner: {},
depthose: {},
browser: {},
+ tabs: {},
MAX_HANDPLANE_WIDTH: 100,
MAX_HANDPLANE_HEIGHT: 100
};
@@ -52,16 +53,16 @@ function print() {
counts[msg.type] = counts[msg.type] + 1;
lastByType[msg.type] = msg.data;
});
-
+
var alphabeticalKeys = _.keys(counts).sort();
console.log("------" + (new Date() + ""));
_.each(alphabeticalKeys, function(type) {
console.log([" " + counts[type] + " " + type + "; last = ", lastByType[type]]);
});
-
+
lastMessages = [];
}
setTimeout(print, 1000);
})();
-}
\ No newline at end of file
+}
diff --git a/webkit-plugin-mac/DLog.h b/webkit-plugin-mac/DLog.h
new file mode 100644
index 0000000..f128a00
--- /dev/null
+++ b/webkit-plugin-mac/DLog.h
@@ -0,0 +1,20 @@
+/*
+ * DebugLog.h
+ * DebugLog
+ *
+ * Created by Karl Kraft on 3/22/09.
+ * Copyright 2009 Karl Kraft. All rights reserved.
+ *
+ */
+
+#ifndef __OPTIMIZE__
+
+#define DLog(args...) _DLog(__FILE__,__PRETTY_FUNCTION__,__LINE__,args);
+
+#else
+
+#define DLog(x...)
+
+#endif
+
+void _DLog(const char *file, const char *function, int lineNumber, NSString *format,...);
diff --git a/webkit-plugin-mac/DLog.mm b/webkit-plugin-mac/DLog.mm
new file mode 100644
index 0000000..f4ac7bc
--- /dev/null
+++ b/webkit-plugin-mac/DLog.mm
@@ -0,0 +1,39 @@
+/*
+ * DebugLog.m
+ * DebugLog
+ *
+ * Created by Karl Kraft on 3/22/09.
+ * Copyright 2009 Karl Kraft. All rights reserved.
+ *
+ */
+
+#include "DLog.h"
+
+//void _DebugLog(const char *function, int lineNumber, NSString *format,...) {
+// va_list ap;
+//
+// va_start (ap, format);
+// if (![format hasSuffix: @"\n"]) {
+// format = [format stringByAppendingString: @"\n"];
+// }
+// NSString *body = [[NSString alloc] initWithFormat: format arguments: ap];
+// va_end (ap);
+// NSString *fileName=[[NSString stringWithUTF8String:file] lastPathComponent];
+// fprintf(stderr,"%s:%d %s",[fileName UTF8String],lineNumber,[body UTF8String]);
+// [body release];
+//}
+
+
+void _DLog(const char *file, const char *function, int lineNumber, NSString *format,...){
+ va_list args;
+ va_start(args, format);
+
+ NSString *body = [[NSString alloc] initWithFormat:format arguments:args];
+ //NSString *filename = [[NSString stringWithFormat:@"%s",file] lastPathComponent];
+ NSString *logLine = [[NSString alloc] initWithFormat:@"%s %@\n", function, body];
+ va_end(args);
+
+ [[NSFileHandle fileHandleWithStandardOutput] writeData: [logLine dataUsingEncoding: NSNEXTSTEPStringEncoding]];
+ [logLine release];
+ [body release];
+}
diff --git a/webkit-plugin-mac/FreenectDevice.h b/webkit-plugin-mac/FreenectDevice.h
new file mode 100644
index 0000000..d70f10d
--- /dev/null
+++ b/webkit-plugin-mac/FreenectDevice.h
@@ -0,0 +1,119 @@
+/*
+ DepthJS
+ Copyright (C) 2010 Aaron Zinman, Doug Fritz, Roy Shilkrot, Greg Elliott
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+ *
+ * FreenectDevice.h
+ * NewCV
+ *
+ * Created by Roy Shilkrot on 3/3/11.
+ *
+ */
+#include "libfreenect.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+
+#include
+using namespace std;
+
+class Mutex {
+public:
+ Mutex() {
+ pthread_mutex_init( &m_mutex, NULL );
+ }
+ void lock() {
+ pthread_mutex_lock( &m_mutex );
+ }
+ void unlock() {
+ pthread_mutex_unlock( &m_mutex );
+ }
+private:
+ pthread_mutex_t m_mutex;
+};
+
+class MyFreenectDevice : public Freenect::FreenectDevice {
+public:
+ MyFreenectDevice(freenect_context *_ctx, int _index)
+ : Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(FREENECT_DEPTH_11BIT_SIZE),m_buffer_rgb(FREENECT_VIDEO_RGB_SIZE), m_gamma(2048), m_new_rgb_frame(false), m_new_depth_frame(false),
+ depthMat(Size(640,480),CV_16UC1), rgbMat(Size(640,480),CV_8UC3,Scalar(0)), ownMat(Size(640,480),CV_8UC3,Scalar(0))
+ {
+ for( unsigned int i = 0 ; i < 2048 ; i++) {
+ float v = i/2048.0;
+ v = std::pow(v, 3)* 6;
+ m_gamma[i] = v*6*256;
+ }
+ }
+ // Do not call directly even in child
+ void VideoCallback(void* _rgb, uint32_t timestamp) {
+// std::cout << "RGB callback" << std::endl;
+ m_rgb_mutex.lock();
+ uint8_t* rgb = static_cast(_rgb);
+ rgbMat.data = rgb;
+ m_new_rgb_frame = true;
+ m_rgb_mutex.unlock();
+ };
+ // Do not call directly even in child
+ void DepthCallback(void* _depth, uint32_t timestamp) {
+// std::cout << "Depth callback" << std::endl;
+ m_depth_mutex.lock();
+ uint16_t* depth = static_cast(_depth);
+ depthMat.data = (uchar*) depth;
+ m_new_depth_frame = true;
+ m_depth_mutex.unlock();
+ }
+
+ bool getVideo(Mat& output) {
+ m_rgb_mutex.lock();
+ if(m_new_rgb_frame) {
+ cv::cvtColor(rgbMat, output, CV_RGB2BGR);
+ m_new_rgb_frame = false;
+ m_rgb_mutex.unlock();
+ return true;
+ } else {
+ m_rgb_mutex.unlock();
+ return false;
+ }
+ }
+
+ bool getDepth(Mat& output) {
+ m_depth_mutex.lock();
+ if(m_new_depth_frame) {
+ depthMat.copyTo(output);
+ m_new_depth_frame = false;
+ m_depth_mutex.unlock();
+ return true;
+ } else {
+ m_depth_mutex.unlock();
+ return false;
+ }
+ }
+
+private:
+ std::vector m_buffer_depth;
+ std::vector m_buffer_rgb;
+ std::vector m_gamma;
+ Mat depthMat;
+ Mat rgbMat;
+ Mat ownMat;
+ Mutex m_rgb_mutex;
+ Mutex m_depth_mutex;
+ bool m_new_rgb_frame;
+ bool m_new_depth_frame;
+};
+
diff --git a/webkit-plugin-mac/gesture_engine.cpp b/webkit-plugin-mac/gesture_engine.cpp
new file mode 100644
index 0000000..66cb24c
--- /dev/null
+++ b/webkit-plugin-mac/gesture_engine.cpp
@@ -0,0 +1,693 @@
+/*
+ DepthJS
+ Copyright (C) 2010 Aaron Zinman, Doug Fritz, Roy Shilkrot, Greg Elliott
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+ */
+#include "FreenectDevice.h"
+
+#define LABEL_GARBAGE 0
+#define LABEL_OPEN 1
+#define LABEL_FIST 2
+#define LABEL_THUMB 3
+
+extern void send_event(const string& etype, const string& edata);
+
+class GestureEngine {
+private:
+ bool running;
+
+ Mat depthMat;
+ Mat depthf;
+ Mat rgbMat;
+ Mat ownMat;
+
+ Freenect::Freenect freenect;
+ MyFreenectDevice* device;
+
+ bool registered;
+ Mat blobMaskOutput;
+ Mat outC;
+ Point midBlob;
+
+ //descriptor parameters
+ int startX, sizeX, num_x_reps, num_y_reps;
+ double height_over_num_y_reps,width_over_num_x_reps;
+
+
+ vector _d; //the descriptor
+ Mat descriptorMat; //as a matrix
+
+ CvKNearest classifier;
+
+ vector > training_data;
+ vector label_data;
+ PCA pca;
+ Mat labelMat, dataMat;
+ vector label_counts;
+
+ bool trained;
+ bool loaded;
+
+ int mode;
+
+ int register_ctr,register_secondbloc_ctr;
+
+ Point2i appear; double appearTS;
+
+ Point2i lastMove;
+
+ int hcr_ctr;
+ vector hc_stack;
+ int hc_stack_ptr;
+
+ int pca_number_of_features;
+
+ Scalar _refineSegments(const Mat& img,
+ Mat& mask,
+ Mat& dst,
+ vector& contour,
+ vector& second_contour,
+ Point2i& previous);
+ int TrainModel();
+ void SaveModelData();
+ int LoadModelData(const char* filename);
+ void InterpolateAndInpaint();
+ void ComputeDescriptor(Scalar);
+ string GetStringForGestureCode(int);
+ void CheckRegistered(Scalar,int,Scalar);
+ int GetMostLikelyGesture();
+
+public:
+ bool die;
+
+ GestureEngine(): running(false),
+ registered(false),
+ startX(250),
+ sizeX(150),
+ num_x_reps(10),
+ num_y_reps(10),
+ height_over_num_y_reps(480/num_y_reps),
+ width_over_num_x_reps(sizeX/num_x_reps),
+ label_counts(vector(4)),
+ trained(false),
+ loaded(false),
+ mode(LABEL_GARBAGE),
+ pca_number_of_features(50),
+ die(false)
+ {
+ depthMat = Mat(Size(640,480),CV_16UC1);
+ depthf = Mat(Size(640,480),CV_8UC1);
+ rgbMat = Mat(Size(640,480),CV_8UC3,Scalar(0));
+ ownMat = Mat(Size(640,480),CV_8UC3,Scalar(0));
+ blobMaskOutput = Mat(Size(640,480),CV_8UC1,Scalar(0));
+
+ _d = vector(num_x_reps*num_y_reps);
+ descriptorMat = Mat(_d);
+
+ register_ctr = register_secondbloc_ctr = 0;
+ registered = false;
+
+ appear = Point2i(-1,-1);
+ appearTS = -1;
+
+ midBlob = Point2i(-1,-1);
+ lastMove = Point2i(-1,-1);
+
+ hcr_ctr = -1;
+ hc_stack = vector(20);
+ hc_stack_ptr = 0;
+ };
+
+ void RunEngine();
+ bool getRunning() { return running; }
+ int InitializeFreenect(const char* );
+};
+
+Scalar GestureEngine::_refineSegments(const Mat& img,
+ Mat& mask,
+ Mat& dst,
+ vector& contour,
+ vector& second_contour,
+ Point2i& previous)
+{
+ // int niters = 3;
+
+ vector > contours;
+ vector hierarchy;
+
+ Mat temp;
+
+ blur(mask, temp, Size(11,11));
+ temp = temp > 85.0;
+
+ findContours( temp, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE );
+
+ if(dst.data==NULL)
+ dst = Mat::zeros(img.size(), CV_8UC1);
+ else
+ dst.setTo(Scalar(0));
+
+ if( contours.size() == 0 )
+ return Scalar(-1,-1);
+
+ // iterate through all the top-level contours,
+ // draw each connected component with its own random color
+ int idx = 0, largestComp = -1, secondlargest = -1;
+ double maxWArea = 0, maxJArea = 0;
+ vector justarea(contours.size());
+ vector weightedarea(contours.size());
+
+ // for( ; idx >= 0; idx = hierarchy[idx][0] )
+ for (; idx& c = contours[idx];
+ Scalar _mean = mean(Mat(contours[idx]));
+ justarea[idx] = fabs(contourArea(Mat(c)));
+ weightedarea[idx] = fabs(contourArea(Mat(c))) /
+ ((previous.x >- 1) ? (1.0 + norm(Point(_mean[0],_mean[1])-previous)) : 1.0); //consider distance from last blob
+ }
+ for (idx = 0; idx maxWArea )
+ {
+ maxWArea = weightedarea[idx];
+ largestComp = idx;
+ }
+ }
+ for (idx = 0; idx < contours.size(); idx++) {
+ if ( justarea[idx] > maxJArea && idx != largestComp ) {
+ maxJArea = justarea[idx];
+ secondlargest = idx;
+ }
+ }
+
+ Scalar color( 255 );
+ // cout << "largest cc " << largestComp << endl;
+ // drawContours( dst, contours, largestComp, color, CV_FILLED); //, 8, hierarchy );
+ // for (idx=0; idx= 0) {
+
+ //find top-left values
+ int maxx = -INT_MAX,miny = INT_MAX;
+ int num = contours[largestComp].size();
+ for (int i=0; i maxx) maxx = contours[largestComp][i].x;
+ if(contours[largestComp][i].y < miny) miny = contours[largestComp][i].y;
+ }
+
+ //crop contour to 150x150 "window"
+ vector newblob;
+ int maxxp150 = MAX(maxx-200,0),minyp150 = MIN(miny+170,480);
+
+ circle(outC, Point(maxx,miny), 2, Scalar(0,255,0), 1);
+ circle(outC, Point(maxxp150,minyp150), 2, Scalar(0,255,0), 1);
+
+ for (int i=0; i maxxp150 && _p.y < minyp150) newblob.push_back(_p);
+ }
+
+ Point* pts = &(newblob[0]);
+ num = newblob.size();
+ fillPoly(dst, (const Point**)(&pts), &num, 1, color);
+
+ Scalar b = mean(Mat(newblob));
+ b[2] = justarea[largestComp];
+
+ contour.clear();
+ contour = newblob;
+
+ second_contour.clear();
+ if(secondlargest >= 0) {
+ second_contour = contours[secondlargest];
+ b[3] = maxJArea;
+ }
+
+ previous.x = b[0]; previous.y = b[1];
+ return b;
+ } else
+ return Scalar(-1,-1);
+
+}
+
+int GestureEngine::TrainModel() {
+ cout << "train model" << endl;
+ if(loaded != true) {
+ dataMat = Mat(training_data.size(),_d.size(),CV_32FC1); //descriptors as matrix rows
+ for (uint i=0; i> dataMat;
+ fs["labels"] >> labelMat;
+ fs["startX"] >> startX;
+ fs["sizeX"] >> sizeX;
+ fs["num_x_reps"] >> num_x_reps;
+ fs["num_y_reps"] >> num_y_reps;
+ height_over_num_y_reps = 480/num_y_reps;
+ width_over_num_x_reps = sizeX/num_x_reps;
+ _d = vector(num_x_reps*num_y_reps);
+ descriptorMat = Mat(_d);
+ loaded = true;
+ fs.release();
+ } else {
+ cerr << "can't open saved data" << endl;
+ return 0;
+ }
+ return 1;
+}
+
+void GestureEngine::InterpolateAndInpaint() {
+ //interpolation & inpainting
+ Mat _tmp,_tmp1; // = (depthMat - 400.0); //minimum observed value is ~440. so shift a bit
+ Mat(depthMat - 400.0).convertTo(_tmp1,CV_64FC1);
+// _tmp1.setTo(Scalar(2048-400.0), depthMat > 750.0); //cut off at 600 to create a "box" where the user interacts
+
+ Point minLoc; double minval,maxval;
+ minMaxLoc(_tmp1, &minval, &maxval, NULL, NULL);
+ _tmp1.convertTo(depthf, CV_8UC1, 255.0/maxval);
+
+ Mat small_depthf; resize(depthf,small_depthf,Size(),0.2,0.2);
+ cv::inpaint(small_depthf,(small_depthf == 255),_tmp1,5.0,INPAINT_TELEA);
+
+ resize(_tmp1, _tmp, depthf.size());
+ _tmp.copyTo(depthf, (depthf == 255));
+}
+
+void GestureEngine::ComputeDescriptor(Scalar blb) {
+ Mat blobDepth,blobEdge;
+ depthf.copyTo(blobDepth,blobMaskOutput);
+ Laplacian(blobDepth, blobEdge, 8);
+ // equalizeHist(blobEdge, blobEdge);//just for visualization
+
+ Mat logPolar(depthf.size(),CV_8UC1);
+ cvLogPolar(&((IplImage)blobEdge), &((IplImage)logPolar), Point2f(blb[0],blb[1]), 80.0);
+
+ // for (int i=0; i, <#const int *channels#>, <#const Mat mask#>, <#MatND hist#>, <#int dims#>, <#const int *histSize#>, <#const float **ranges#>, <#bool uniform#>, <#bool accumulate#>)
+ */
+
+ // Mat _tmp(logPolar.size(),CV_8UC1);
+ // cvLogPolar(&((IplImage)logPolar), &((IplImage)_tmp),Point2f(blb[0],blb[1]), 80.0, CV_WARP_INVERSE_MAP);
+ // imshow("descriptor", _tmp);
+ // imshow("logpolar", logPolar);
+
+}
+
+string GestureEngine::GetStringForGestureCode(int res) {
+ if (res == LABEL_OPEN) {
+ return "openhand";
+ }
+ if (res == LABEL_FIST) {
+ return "theforce";
+ }
+ if (res == LABEL_THUMB) {
+ return "Thumb";
+ }
+ if (res == LABEL_GARBAGE) {
+ return "Garbage";
+ }
+ return "none";
+}
+
+void GestureEngine::CheckRegistered(Scalar blb, int recognized_gesture, Scalar mn) {
+ if(recognized_gesture != LABEL_GARBAGE) {
+ register_ctr = MIN((register_ctr + 1),60);
+
+ if(blb[3] > 5000)
+ register_secondbloc_ctr = MIN((register_secondbloc_ctr + 1),60);
+
+ if (register_ctr > 30 && !registered) {
+ registered = true;
+ appear.x = -1;
+ lastMove.x = blb[0]; lastMove.y = blb[1];
+
+ cout << "blob size " << blb[2] << endl;
+
+ if(register_secondbloc_ctr < 30) {
+ cout << "register pointer" << endl;
+ stringstream ss; ss << "\"mode\":\""<< GetStringForGestureCode(recognized_gesture) <<"\"";
+ send_event("Register", ss.str());
+
+ mode = recognized_gesture;
+ } else {
+ cout << "register tab swithcer" << endl;
+ send_event("Register", "\"mode\":\"twohands\"");
+ }
+ }
+
+ if(registered) {
+ stringstream ss;
+ ss << "\"x\":" << (int)floor(blb[0]*100.0/640.0)
+ << ",\"y\":" << (int)floor(blb[1]*100.0/480.0)
+ << ",\"z\":" << (int)(mn[0] * 2.0);
+ //cout << "move: " << ss.str() << endl;
+ send_event("Move", ss.str());
+
+ hc_stack.at(hc_stack_ptr) = hcr_ctr;
+ hc_stack_ptr = (hc_stack_ptr + 1) % hc_stack.size();
+
+ //if thumb recognized - send "hand click"
+ if (mode == LABEL_FIST && recognized_gesture == LABEL_THUMB) {
+ bool fireClick = false;
+ if (appearTS > 0) {
+ double timediff = ((double)getTickCount()-appearTS)/getTickFrequency();
+ fireClick = (timediff > 1.0);
+ } else {
+ fireClick = true;
+ }
+ if(fireClick) {
+ cout << "Hand click!" << endl;
+ send_event("HandClick", "");
+
+ appearTS = getTickCount();
+ }
+ } else {
+ appearTS = -1;
+ }
+ }
+ } else {
+ if(!registered) {
+ //not registered, look for gestures
+ if(appear.x<0) {
+ //first appearence of blob
+ appear = midBlob;
+ // update_bg_model = false;
+ appearTS = getTickCount();
+ cout << "appear ("< .2 && timediff < 1.0) {
+ //enough time passed from appearence
+ line(outC, appear, cv::Point(blb[0],blb[1]), Scalar(0,0,255), 3);
+ if (appear.x - blb[0] > 100) {
+ cout << "right"< 100) {
+ cout << "up" << endl; appear.x = -1;
+ send_event("SwipeUp", "");
+ register_ctr = 0;
+ } else if (appear.y - blb[1] < -100) {
+ cout << "down" << endl; appear.x = -1;
+ send_event("SwipeDown", "");
+ register_ctr = 0;
+ }
+ }
+ if(timediff >= 1.0) {
+ cout << "a ghost..."<startVideo();
+ device->startDepth();
+ }
+ catch (std::runtime_error e) {
+ return 0;
+ }
+ if(!LoadModelData(data)) return 0;
+ if(!TrainModel()) return 0;
+
+ return 1;
+}
+
+int GestureEngine::GetMostLikelyGesture() {
+ Mat results(1,1,CV_32FC1);
+ Mat samples; Mat(Mat(_d).t()).convertTo(samples,CV_32FC1);
+ Mat samplesAfterPCA = pca.project(samples);
+
+ classifier.find_nearest(&((CvMat)samplesAfterPCA), 1, &((CvMat)results));
+
+ Mat lc(label_counts); lc *= 0.9;
+ label_counts[(int)((float*)results.data)[0]] += 0.1;
+ Point maxLoc;
+ minMaxLoc(lc, NULL, NULL, NULL, &maxLoc);
+ return maxLoc.y;
+}
+
+void GestureEngine::RunEngine() {
+
+ running = true;
+
+ while (!die) {
+ device->getVideo(rgbMat);
+ device->getDepth(depthMat);
+
+ InterpolateAndInpaint();
+
+ cvtColor(depthf, outC, CV_GRAY2BGR);
+
+ Mat blobMaskInput = depthf < 120; //take closer values
+ vector ctr,ctr2;
+
+ //closest point to the camera
+ Point minLoc; double minval,maxval;
+ minMaxLoc(depthf, &minval, &maxval, &minLoc, NULL, blobMaskInput);
+ circle(outC, minLoc, 5, Scalar(0,255,0), 3);
+
+ blobMaskInput = depthf < (minval + 20);
+
+ Scalar blb = _refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob); //find contours in the foreground, choose biggest
+ /////// blb :
+ //blb[0] = x, blb[1] = y, blb[2] = 1st blob size, blb[3] = 2nd blob size.
+ if(blb[0]>=0 && blb[2] > 500) { //1st blob detected, and is big enough
+ //cvtColor(depthf, outC, CV_GRAY2BGR);
+
+ Scalar mn,stdv;
+ meanStdDev(depthf,mn,stdv,blobMaskInput);
+
+ //cout << "min: " << minval << ", max: " << maxval << ", mean: " << mn[0] << endl;
+
+ //now refining blob by looking at the mean depth value it has...
+ blobMaskInput = depthf < (mn[0] + stdv[0]*.5);
+
+ blb = _refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob);
+
+// imshow("blob", blobMaskOutput);
+
+ if(blb[0] >= 0 && blb[2] > 300) {
+ //draw contour
+ Scalar color(0,0,255);
+ for (int idx=0; idx 0) { //second blob detected
+ Scalar color2(255,0,255);
+ for (int idx=0; idxstopVideo();
+ device->stopDepth();
+
+ running = false;
+}
+
+GestureEngine ge;
+
+void* gesture_engine(void* _arg) {
+
+ ge.RunEngine();
+
+}
+
+void kill_gesture_engine() {
+ ge.die = true;
+}
+
+bool is_gesture_engine_dead() { return !ge.getRunning(); }
+
+int init_gesture_engine(const char* data) { return ge.InitializeFreenect(data); }
\ No newline at end of file
diff --git a/webkit-plugin-mac/gesture_engine.hpp b/webkit-plugin-mac/gesture_engine.hpp
new file mode 100644
index 0000000..c3dbda7
--- /dev/null
+++ b/webkit-plugin-mac/gesture_engine.hpp
@@ -0,0 +1,34 @@
+/*
+ DepthJS
+ Copyright (C) 2010 Aaron Zinman, Doug Fritz, Roy Shilkrot, Greg Elliott
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+ * gesture_engine.hpp
+ * webkit-plugin-mac
+ *
+ * Created by Roy Shilkrot on 3/6/11.
+ *
+ */
+
+#ifndef _GESTURE_ENGINE_HPP
+#define _GESTURE_ENGINE_HPP
+
+
+int gesture_engine(void* _arg);
+void kill_gesture_engine();
+bool is_gesture_engine_dead();
+int init_gesture_engine(const char* data);
+
+#endif
\ No newline at end of file
diff --git a/webkit-plugin-mac/include/libfreenect.hpp b/webkit-plugin-mac/include/libfreenect.hpp
new file mode 100644
index 0000000..98b4e8a
--- /dev/null
+++ b/webkit-plugin-mac/include/libfreenect.hpp
@@ -0,0 +1,158 @@
+/*
+ * This file is part of the OpenKinect Project. http://www.openkinect.org
+ *
+ * Copyright (c) 2010 individual OpenKinect contributors. See the CONTRIB file
+ * for details.
+ *
+ * This code is licensed to you under the terms of the Apache License, version
+ * 2.0, or, at your option, the terms of the GNU General Public License,
+ * version 2.0. See the APACHE20 and GPL2 files for the text of the licenses,
+ * or the following URLs:
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.gnu.org/licenses/gpl-2.0.txt
+ *
+ * If you redistribute this file in source form, modified or unmodified, you
+ * may:
+ * 1) Leave this header intact and distribute it under the same terms,
+ * accompanying it with the APACHE20 and GPL20 files, or
+ * 2) Delete the Apache 2.0 clause and accompany it with the GPL2 file, or
+ * 3) Delete the GPL v2 clause and accompany it with the APACHE20 file
+ * In all cases you must keep the copyright notice intact and include a copy
+ * of the CONTRIB file.
+ *
+ * Binary distributions must follow the binary distribution requirements of
+ * either License.
+ */
+
+#pragma once
+
+#include
+#include
+#include