How to get the hash key? - hash

When I solve this question(149. Max Points on a Line) on leetcode, it have a bug when met this case:
Input [[0,0],[94911151,94911150],[94911152,94911151]]
Output 3
Expected 2
This is my code:
/**
* Definition for a point.
* struct Point {
* int x;
* int y;
* Point() : x(0), y(0) {}
* Point(int a, int b) : x(a), y(b) {}
* };
*/
class Solution {
public:
int maxPoints(vector<Point>& points) {
int size = points.size();
int ans = 0;
if (size == 0) return 0;
unordered_map<double, int> mp;
double k;
for (int i = 0; i < size; ++i) {
int num = 0;
for (int j = i + 1; j < size; ++j) {
if (points[i].x == points[j].x && points[i].y == points[j].y) {
num++;
continue;
}
// my question in below code.
// how can I get the hash key according to slope
if (points[j].x - points[i].x != 0)
k = (double)(points[j].y - points[i].y) / (double)(points[j].x - points[i].x); // calculate the slope.
else k = INT_MAX;
mp[k]++;
}
if (mp[k] == 0) mp[k] = 1, num--;
for (auto it = mp.begin(); it != mp.end(); ++it) {
if (it->second > ans) {
ans = it->second;
ans += num;
}
}
mp.clear();
}
return ans+1;
}
};
In above test case, when it calculate the slope with [0,0] and [94911151,94911150] it comeback k = 1. So I want to know how to get the right hash key to solve this problem?

Related

Unity-How to Keep the Player out of the Walls

So I put together a procedural generated map, and when I click it, the map randomly changes. What it does is it loops through the plane and randomly generates a mesh depending on the square's placement compared to randomfillpercent. If the map coordinate is marked 1, it's a wall, and if it's 0, its open space. I'm having an issue where if I click the map, the player sphere ends up inside the randomly generated wall. I want to make it so if the player's position is equal to a map coordinate that's a wall, then move it down the map until it reaches open space. Unfortunately, I keep getting null reference errors. I anyone could give me some ideas, I would appreciate it. Here's my variables and my RandomFillMap function. I'm not showing the whole code. If there's something you need to see, let me know. Thank you.
public class MapGeneratorCave : MonoBehaviour {
public int width;
public int height;
public string seed;
public bool useRandomSeed;
public GameObject player;
int[,] playerPosition;
[Range(0, 100)]
public int randomFillPercent;
int[,] map;
void Start()
{
GenerateMap();
}
void Update()
{
if (Input.GetMouseButtonDown(0))
{
GenerateMap();
}
}
void GenerateMap()
{
map = new int[width, height];
RandomFillMap();
for (int i = 0; i < 5; i++)
{
SmoothMap();
}
ProcessMap();
int borderSize = 1;
int[,] borderedMap = new int[width + borderSize * 2, height + borderSize * 2];
for (int x = 0; x < borderedMap.GetLength(0); x++)
{
for (int y = 0; y < borderedMap.GetLength(1); y++)
{
if (x >= borderSize && x < width + borderSize && y >= borderSize && y < height + borderSize)
{
borderedMap[x, y] = map[x - borderSize, y - borderSize];
}
else
{
borderedMap[x, y] = 1;
}
}
}
MeshGenerator meshGen = GetComponent<MeshGenerator>();
meshGen.GenerateMesh(borderedMap, 1);
}
void RandomFillMap()
{
int playerX = (int)player.transform.position.x;
int playerY = (int)player.transform.position.y;
if (useRandomSeed)
{
seed = Time.time.ToString();
}
System.Random pseudoRandom = new System.Random(seed.GetHashCode());
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
if (x == 0 || x == width - 1 || y == 0 || y == height - 1)
{
map[x, y] = 1;
}
else
{
map[x, y] = (pseudoRandom.Next(0, 100) < randomFillPercent) ? 1 : 0;
}
if (playerPosition[playerX, playerY] == map[x, y] && map[x, y] == 1)
{
playerPosition[playerX, playerY] = map[x, y - 1];
}
}
}
}
You're probably getting null references because playerPosition[playerX, playerY] doesn't exist.
What you should be using instead of a multi dimensional array (int[,]) is a Vector2
Then you would do something like this:
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
if (x == 0 || x == width - 1 || y == 0 || y == height - 1)
{
map[x, y] = 1;
}
else
{
map[x, y] = (pseudoRandom.Next(0, 100) < randomFillPercent) ? 1 : 0;
}
}
}
while(map[playerPosition.x, playerPosition.y] == 1){
playerPosition.y --;
// make sure you aren't out of bounds and such
}

iRPROP+ Multilayer Perceptron

Hello everyone This is the code of iRPROP+ algo for my MLP. When I try to train my network, standart deviation decreases for 1500 epoches (so slow: from ~0.5 to 0.4732) but suddenly it starts to increase.
Can someone say what did I do wrong?
public void RPROP()
{
double a = 1.2, b = 0.5, nMax = 50, nMin = 0.000001;
for (int l = Network.Length - 1; l > 0; l--)
{
for (int i = 0; i < Network[l].getSize(); i++)
{
Neuron n = Network[l].Neurons[i];
double sum = 0;
if (l == Network.Length - 1) n.Delta = (n.Output - DesiredOutput[i]) * ActFunc.calcDeprivateFunction(n.Output);
else
{
for (int k = 0; k < Network[l + 1].getSize(); k++)
{
sum += Network[l + 1].Neurons[k].getWeight(i) * Network[l + 1].Neurons[k].Delta;
}
n.Delta = sum * ActFunc.calcDeprivateFunction(n.Output);
}
}
}
for (int l = 1; l < Network.Length; l++)
{
for (int i = 0; i < Network[l].getSize(); i++)
{
Neuron n = Network[l].Neurons[i];
if ((n.PrevDelta * n.Delta) > 0)
{
n.N = Math.Min(a * n.PrevN, nMax);
n.Bias -= n.N * Math.Sign(n.Delta);
for (int j = 0; j < Network[l - 1].getSize(); j++)
{
n.setWeight(j, n.getWeight(j) - n.N * Math.Sign(n.Delta));
}
n.PrevDelta = n.Delta;
}
else if ((n.PrevDelta * n.Delta) < 0)
{
n.N = Math.Max(b * n.PrevN, nMin);
if (this.CurrentError > this.LastError)
{
n.Bias += n.PrevN * Math.Sign(n.PrevDelta);
for (int j = 0; j < Network[l - 1].getSize(); j++)
{
n.setWeight(j, n.getWeight(j) + n.PrevN * Math.Sign(n.PrevDelta));
}
}
n.Delta = 0;
}
else if ((n.PrevDelta * n.Delta) == 0)
{
n.Bias -= n.N * Math.Sign(n.Delta);
for (int j = 0; j < Network[l - 1].getSize(); j++)
{
n.setWeight(j, n.getWeight(j) - n.N * Math.Sign(n.Delta));
}
n.PrevDelta = n.Delta;
}
n.PrevN = n.N;
}
}
}
For the first view, you calculate one train element error and you instantly teach it to the network. try to run over the full train set, without change the weights, and just summarize the Delta. After that, update the weights once, set the prev delta and start over.
Also, there is no update for neuron threshold.

How to make the blackboard text appear clearer using MATLAB?

What are the sequence of filters I should put if I want the final image to be more clearer with a digital type look. I mean only two distinct colors, one for the board and one for the chalk writing.
When it comes to identifying text in images you better use Stroke Width Transform.
Here's a little result I obtained on your image (the basic transform + connected component w/o filtering):
My mex implementation based on code from here
#include "mex.h"
#include <vector>
#include <map>
#include <set>
#include <algorithm>
#include <math.h>
using namespace std;
#define PI 3.14159265
struct Point2d {
int x;
int y;
float SWT;
};
struct Point2dFloat {
float x;
float y;
};
struct Ray {
Point2d p;
Point2d q;
std::vector<Point2d> points;
};
void strokeWidthTransform(const float * edgeImage,
const float * gradientX,
const float * gradientY,
bool dark_on_light,
float * SWTImage,
int h, int w,
std::vector<Ray> & rays) {
// First pass
float prec = .05f;
for( int row = 0; row < h; row++ ){
const float* ptr = edgeImage + row*w;
for ( int col = 0; col < w; col++ ){
if (*ptr > 0) {
Ray r;
Point2d p;
p.x = col;
p.y = row;
r.p = p;
std::vector<Point2d> points;
points.push_back(p);
float curX = (float)col + 0.5f;
float curY = (float)row + 0.5f;
int curPixX = col;
int curPixY = row;
float G_x = gradientX[ col + row*w ];
float G_y = gradientY[ col + row*w ];
// normalize gradient
float mag = sqrt( (G_x * G_x) + (G_y * G_y) );
if (dark_on_light){
G_x = -G_x/mag;
G_y = -G_y/mag;
} else {
G_x = G_x/mag;
G_y = G_y/mag;
}
while (true) {
curX += G_x*prec;
curY += G_y*prec;
if ((int)(floor(curX)) != curPixX || (int)(floor(curY)) != curPixY) {
curPixX = (int)(floor(curX));
curPixY = (int)(floor(curY));
// check if pixel is outside boundary of image
if (curPixX < 0 || (curPixX >= w) || curPixY < 0 || (curPixY >= h)) {
break;
}
Point2d pnew;
pnew.x = curPixX;
pnew.y = curPixY;
points.push_back(pnew);
if ( edgeImage[ curPixY*w+ curPixX ] > 0) {
r.q = pnew;
// dot product
float G_xt = gradientX[ curPixY*w + curPixX ];
float G_yt = gradientY[ curPixY*w + curPixX ];
mag = sqrt( (G_xt * G_xt) + (G_yt * G_yt) );
if (dark_on_light){
G_xt = -G_xt/mag;
G_yt = -G_yt/mag;
} else {
G_xt = G_xt/mag;
G_yt = G_yt/mag;
}
if (acos(G_x * -G_xt + G_y * -G_yt) < PI/2.0 ) {
float length = sqrt( ((float)r.q.x - (float)r.p.x)*((float)r.q.x - (float)r.p.x) + ((float)r.q.y - (float)r.p.y)*((float)r.q.y - (float)r.p.y));
for (std::vector<Point2d>::iterator pit = points.begin(); pit != points.end(); pit++) {
float* pSWT = SWTImage + w * pit->y + pit->x;
if (*pSWT < 0) {
*pSWT = length;
} else {
*pSWT = std::min(length, *pSWT);
}
}
r.points = points;
rays.push_back(r);
}
break;
}
}
}
}
ptr++;
}
}
}
bool Point2dSort(const Point2d &lhs, const Point2d &rhs) {
return lhs.SWT < rhs.SWT;
}
void SWTMedianFilter(float * SWTImage, int h, int w,
std::vector<Ray> & rays, float maxWidth = -1 ) {
for (std::vector<Ray>::iterator rit = rays.begin(); rit != rays.end(); rit++) {
for (std::vector<Point2d>::iterator pit = rit->points.begin(); pit != rit->points.end(); pit++) {
pit->SWT = SWTImage[ w*pit->y + pit->x ];
}
std::sort(rit->points.begin(), rit->points.end(), &Point2dSort);
//std::nth_element( rit->points.begin(), rit->points.end(), rit->points.size()/2, &Point2dSort );
float median = (rit->points[rit->points.size()/2]).SWT;
if ( maxWidth > 0 && median >= maxWidth ) {
median = -1;
}
for (std::vector<Point2d>::iterator pit = rit->points.begin(); pit != rit->points.end(); pit++) {
SWTImage[ w*pit->y + pit->x ] = std::min(pit->SWT, median);
}
}
}
typedef std::vector< std::set<int> > graph_t; // graph as a list of neighbors per node
void connComp( const graph_t& g, std::vector<int>& c, int i, int l ) {
// starting from node i labe this conn-comp with label l
if ( i < 0 || i > g.size() ) {
return;
}
std::vector< int > stack;
// push i
stack.push_back(i);
c[i] = l;
while ( ! stack.empty() ) {
// pop
i = stack.back();
stack.pop_back();
// go over all nieghbors
for ( std::set<int>::const_iterator it = g[i].begin(); it != g[i].end(); it++ ) {
if ( c[*it] < 0 ) {
stack.push_back( *it );
c[ *it ] = l;
}
}
}
}
int findNextToLabel( const graph_t& g, const vector<int>& c ) {
for ( int i = 0 ; i < c.size(); i++ ) {
if ( c[i] < 0 ) {
return i;
}
}
return c.size();
}
int connected_components(const graph_t& g, vector<int>& c) {
// check for empty graph!
if ( g.empty() ) {
return 0;
}
int i = 0;
int num_conn = 0;
do {
connComp( g, c, i, num_conn );
num_conn++;
i = findNextToLabel( g, c );
} while ( i < g.size() );
return num_conn;
}
std::vector< std::vector<Point2d> >
findLegallyConnectedComponents(const float* SWTImage, int h, int w,
std::vector<Ray> & rays) {
std::map<int, int> Map;
std::map<int, Point2d> revmap;
std::vector<std::vector<Point2d> > components; // empty
int num_vertices = 0, idx = 0;
graph_t g;
// Number vertices for graph. Associate each point with number
for( int row = 0; row < h; row++ ){
for (int col = 0; col < w; col++ ){
idx = col + w * row;
if (SWTImage[idx] > 0) {
Map[idx] = num_vertices;
Point2d p;
p.x = col;
p.y = row;
revmap[num_vertices] = p;
num_vertices++;
std::set<int> empty;
g.push_back(empty);
}
}
}
if ( g.empty() ) {
return components; // nothing to do with an empty graph...
}
for( int row = 0; row < h; row++ ){
for (int col = 0; col < w; col++ ){
idx = col + w * row;
if ( SWTImage[idx] > 0) {
// check pixel to the right, right-down, down, left-down
int this_pixel = Map[idx];
float thisVal = SWTImage[idx];
if (col+1 < w) {
float right = SWTImage[ w*row + col + 1 ];
if (right > 0 && (thisVal/right <= 3.0 || right/thisVal <= 3.0)) {
g[this_pixel].insert( Map[ w*row + col + 1 ] );
g[ Map[ w*row + col + 1 ] ].insert( this_pixel );
//boost::add_edge(this_pixel, map.at(row * SWTImage->width + col + 1), g);
}
}
if (row+1 < h) {
if (col+1 < w) {
float right_down = SWTImage[ w*(row+1) + col + 1 ];
if (right_down > 0 && (thisVal/right_down <= 3.0 || right_down/thisVal <= 3.0)) {
g[ this_pixel ].insert( Map[ w*(row+1) + col + 1 ] );
g[ Map[ w*(row+1) + col + 1 ] ].insert(this_pixel);
// boost::add_edge(this_pixel, map.at((row+1) * SWTImage->width + col + 1), g);
}
}
float down = SWTImage[ w*(row+1) + col ];
if (down > 0 && (thisVal/down <= 3.0 || down/thisVal <= 3.0)) {
g[ this_pixel ].insert( Map[ w*(row+1) + col ] );
g[ Map[ w*(row+1) + col ] ].insert( this_pixel );
//boost::add_edge(this_pixel, map.at((row+1) * SWTImage->width + col), g);
}
if (col-1 >= 0) {
float left_down = SWTImage[ w*(row+1) + col - 1 ];
if (left_down > 0 && (thisVal/left_down <= 3.0 || left_down/thisVal <= 3.0)) {
g[ this_pixel ].insert( Map[ w*(row+1) + col - 1 ] );
g[ Map[ w*(row+1) + col - 1 ] ].insert( this_pixel );
//boost::add_edge(this_pixel, map.at((row+1) * SWTImage->width + col - 1), g);
}
}
}
}
}
}
std::vector<int> c(num_vertices, -1);
int num_comp = connected_components(g, c);
components.reserve(num_comp);
//std::cout << "Before filtering, " << num_comp << " components and " << num_vertices << " vertices" << std::endl;
for (int j = 0; j < num_comp; j++) {
std::vector<Point2d> tmp;
components.push_back( tmp );
}
for (int j = 0; j < num_vertices; j++) {
Point2d p = revmap[j];
(components[c[j]]).push_back(p);
}
return components;
}
enum {
EIN = 0,
GXIN,
GYIN,
DOLFIN,
MAXWIN,
NIN };
void mexFunction( int nout, mxArray* pout[], int nin, const mxArray* pin[] ) {
//
// make sure images are input in transposed so that they are arranged row-major in memory
//
mxAssert( nin == NIN, "wrong number of inputs" );
mxAssert( nout > 1, "only one output" );
int h = mxGetN( pin[EIN] ); // inputs are transposed!
int w = mxGetM( pin[EIN] );
mxAssert( mxIsClass( pin[EIN], mxSINGLE_CLASS ) && h == mxGetN( pin[EIN] ) && w == mxGetM( pin[EIN] ), "edge map incorrect");
mxAssert( mxIsClass( pin[GXIN], mxSINGLE_CLASS ) && h == mxGetN( pin[GXIN] ) && w == mxGetM( pin[GXIN] ), "edge map incorrect");
mxAssert( mxIsClass( pin[GYIN], mxSINGLE_CLASS ) && h == mxGetN( pin[GYIN] ) && w == mxGetM( pin[GYIN] ), "edge map incorrect");
const float * edgeImage = (float*) mxGetData( pin[EIN] );
const float * gradientX = (float*) mxGetData( pin[GXIN] );
const float * gradientY = (float*) mxGetData( pin[GYIN] );
bool dark_on_light = mxGetScalar( pin[DOLFIN] ) != 0 ;
float maxWidth = mxGetScalar( pin[MAXWIN] );
// allocate output
pout[0] = mxCreateNumericMatrix( w, h, mxSINGLE_CLASS, mxREAL );
float * SWTImage = (float*) mxGetData( pout[0] );
// set SWT to -1
for ( int i = 0 ; i < w*h; i++ ) {
SWTImage[i] = -1;
}
std::vector<Ray> rays;
strokeWidthTransform ( edgeImage, gradientX, gradientY, dark_on_light, SWTImage, h, w, rays );
SWTMedianFilter ( SWTImage, h, w, rays, maxWidth );
// connected components
if ( nout > 1 ) {
// Calculate legally connect components from SWT and gradient image.
// return type is a vector of vectors, where each outer vector is a component and
// the inner vector contains the (y,x) of each pixel in that component.
std::vector<std::vector<Point2d> > components = findLegallyConnectedComponents(SWTImage, h, w, rays);
pout[1] = mxCreateNumericMatrix( w, h, mxSINGLE_CLASS, mxREAL );
float* pComp = (float*) mxGetData( pout[1] );
for ( int i = 0 ; i < w*h; i++ ) {
pComp[i] = 0;
}
for ( int ci = 0 ; ci < components.size(); ci++ ) {
for ( std::vector<Point2d>::iterator it = components[ci].begin() ; it != components[ci].end(); it++ ) {
pComp[ w * it->y + it->x ] = ci + 1;
}
}
}
}
Matlab function calling stroke-width-transform (SWT) mex-file:
function [swt swtcc] = SWT( img, dol, maxWidth )
if size( img, 3 ) == 3
img = rgb2gray(img);
end
img = im2single(img);
edgeMap = single( edge( img, 'canny', .15 ) );
img = imfilter( img, fspecial('gauss',[5 5], 0.3*(2.5-1)+.8) );
gx = imfilter( img, fspecial('prewitt')' ); %//'
gy = imfilter( img, fspecial('prewitt') );
gx = single(medfilt2( gx, [3 3] ));
gy = single(medfilt2( gy, [3 3] ));
[swt swtcc] = swt_mex( edgeMap.', gx.', gy.', dol, maxWidth ); %//'
swt = swt'; %//'
swtcc = double(swtcc'); %//'
Try this :
I = imread('...'); % Your board image
ThreshConstant = 1; % Try to vary this constant.
bw = im2bw(I , ThreshConstant * graythresh(I)); % Black-white image
SegmentedImg = I.*repmat(uint8(bw), [1 1 3]);
Just do imshow(bw); and you will have a 2 color image normally well segmented.
If the threshold is too strong, try to turn around 0.5 to 1.5 with ThreshConstant.
or you could try this
im = imread('http://i.imgur.com/uJIXp13.jpg'); %the image posted above
im2=rgb2gray(im);
maxp=uint16(max(max(im2)));
minp=uint16(min(min(im2)));
bw=im2bw(im2,(double(minp+maxp))/(2*255)); %the threshold as alexandre said, but with the min max idensity as threshold
bw=~bw; % you need to reverse from black font - whit letters to black letters white font :P
imshow(bw)
this should be the result
have in mind , that you can use this technique adaptively with a window, finding the threshold of the window every time for best results.

imregionalmax matlab function's equivalent in opencv

I have an image of connected components(circles filled).If i want to segment them i can use watershed algorithm.I prefer writing my own function for watershed instead of using the inbuilt function in OPENCV.I have successfu How do i find the regionalmax of objects using opencv?
I wrote a function myself. My results were quite similar to MATLAB, although not exact. This function is implemented for CV_32F but it can easily be modified for other types.
I mark all the points that are not part of a minimum region by checking all the neighbors. The remaining regions are either minima, maxima or areas of inflection.
I use connected components to label each region.
I check each region for any point belonging to a maxima, if yes then I push that label into a vector.
Finally I sort the bad labels, erase all duplicates and then mark all the points in the output as not minima.
All that remains are the regions of minima.
Here is the code:
// output is a binary image
// 1: not a min region
// 0: part of a min region
// 2: not sure if min or not
// 3: uninitialized
void imregionalmin(cv::Mat& img, cv::Mat& out_img)
{
// pad the border of img with 1 and copy to img_pad
cv::Mat img_pad;
cv::copyMakeBorder(img, img_pad, 1, 1, 1, 1, IPL_BORDER_CONSTANT, 1);
// initialize binary output to 2, unknown if min
out_img = cv::Mat::ones(img.rows, img.cols, CV_8U)+2;
// initialize pointers to matrices
float* in = (float *)(img_pad.data);
uchar* out = (uchar *)(out_img.data);
// size of matrix
int in_size = img_pad.cols*img_pad.rows;
int out_size = img.cols*img.rows;
int x, y;
for (int i = 0; i < out_size; i++) {
// find x, y indexes
y = i % img.cols;
x = i / img.cols;
neighborCheck(in, out, i, x, y, img_pad.cols); // all regions are either min or max
}
cv::Mat label;
cv::connectedComponents(out_img, label);
int* lab = (int *)(label.data);
in = (float *)(img.data);
in_size = img.cols*img.rows;
std::vector<int> bad_labels;
for (int i = 0; i < out_size; i++) {
// find x, y indexes
y = i % img.cols;
x = i / img.cols;
if (lab[i] != 0) {
if (neighborCleanup(in, out, i, x, y, img.rows, img.cols) == 1) {
bad_labels.push_back(lab[i]);
}
}
}
std::sort(bad_labels.begin(), bad_labels.end());
bad_labels.erase(std::unique(bad_labels.begin(), bad_labels.end()), bad_labels.end());
for (int i = 0; i < out_size; ++i) {
if (lab[i] != 0) {
if (std::find(bad_labels.begin(), bad_labels.end(), lab[i]) != bad_labels.end()) {
out[i] = 0;
}
}
}
}
int inline neighborCleanup(float* in, uchar* out, int i, int x, int y, int x_lim, int y_lim)
{
int index;
for (int xx = x - 1; xx < x + 2; ++xx) {
for (int yy = y - 1; yy < y + 2; ++yy) {
if (((xx == x) && (yy==y)) || xx < 0 || yy < 0 || xx >= x_lim || yy >= y_lim)
continue;
index = xx*y_lim + yy;
if ((in[i] == in[index]) && (out[index] == 0))
return 1;
}
}
return 0;
}
void inline neighborCheck(float* in, uchar* out, int i, int x, int y, int x_lim)
{
int indexes[8], cur_index;
indexes[0] = x*x_lim + y;
indexes[1] = x*x_lim + y+1;
indexes[2] = x*x_lim + y+2;
indexes[3] = (x+1)*x_lim + y+2;
indexes[4] = (x + 2)*x_lim + y+2;
indexes[5] = (x + 2)*x_lim + y + 1;
indexes[6] = (x + 2)*x_lim + y;
indexes[7] = (x + 1)*x_lim + y;
cur_index = (x + 1)*x_lim + y+1;
for (int t = 0; t < 8; t++) {
if (in[indexes[t]] < in[cur_index]) {
out[i] = 0;
break;
}
}
if (out[i] == 3)
out[i] = 1;
}
The following listing is a function similar to Matlab's "imregionalmax". It looks for at most nLocMax local maxima above threshold, where the found local maxima are at least minDistBtwLocMax pixels apart. It returns the actual number of local maxima found. Notice that it uses OpenCV's minMaxLoc to find global maxima. It is "opencv-self-contained" except for the (easy to implement) function vdist, which computes the (euclidian) distance between points (r,c) and (row,col).
input is one-channel CV_32F matrix, and locations is nLocMax (rows) by 2 (columns) CV_32S matrix.
int imregionalmax(Mat input, int nLocMax, float threshold, float minDistBtwLocMax, Mat locations)
{
Mat scratch = input.clone();
int nFoundLocMax = 0;
for (int i = 0; i < nLocMax; i++) {
Point location;
double maxVal;
minMaxLoc(scratch, NULL, &maxVal, NULL, &location);
if (maxVal > threshold) {
nFoundLocMax += 1;
int row = location.y;
int col = location.x;
locations.at<int>(i,0) = row;
locations.at<int>(i,1) = col;
int r0 = (row-minDistBtwLocMax > -1 ? row-minDistBtwLocMax : 0);
int r1 = (row+minDistBtwLocMax < scratch.rows ? row+minDistBtwLocMax : scratch.rows-1);
int c0 = (col-minDistBtwLocMax > -1 ? col-minDistBtwLocMax : 0);
int c1 = (col+minDistBtwLocMax < scratch.cols ? col+minDistBtwLocMax : scratch.cols-1);
for (int r = r0; r <= r1; r++) {
for (int c = c0; c <= c1; c++) {
if (vdist(Point2DMake(r, c),Point2DMake(row, col)) <= minDistBtwLocMax) {
scratch.at<float>(r,c) = 0.0;
}
}
}
} else {
break;
}
}
return nFoundLocMax;
}
I do not know if it is what you want, but in my answer to this post, I gave some code to find local maxima (peaks) in a grayscale image (resulting from distance transform).
The approach relies on subtracting the original image from the dilated image and finding the zero pixels).
I hope it helps,
Good luck
I had the same problem some time ago, and the solution was to reimplement the imregionalmax algorithm in OpenCV/Cpp. It is not that complicated, because you can find the C++ source code of the function in the Matlab distribution. (somewhere in toolbox). All you have to do is to read carefully and understand the algorithm described there. Then rewrite it or remove the matlab-specific checks and you'll have it.

Looking for SLAB6 implementation

I'm looking to implement SLAB6 into my raycaster, especially the kv6 support for voxelmodels. However the SLAB6 source by Ken Silverman is totally unreadably (mostly ASM) so I was hoping someone could point me to a proper C / Java source to load kv6 models or maybe to explain me the workings in some pseudocode preferably (since I want to know how to support the kv6, I know how it works). Thanks, Kaj
EDIT: the implementation would be in Java.
I found some code in an application called VoxelGL (author not mentioned in sourcecode):
void CVoxelWorld::generateSlabFromData(unsigned char *data, VoxelData *vdata, Slab *slab)
{
int currentpattern = 1;
int i = 0;
int n, totalcount, v, count;
n = 0;
v = 0;
while (1)
{
while (data[i] == currentpattern)
{
if (currentpattern == 1)
v++;
i++;
if (i == 256)
break;
}
n++;
if (i == 256)
{
if (currentpattern == 0)
n--;
break;
}
currentpattern ^= 1;
}
slab->nentries = n;
if (slab->description != 0)delete [] slab->description;
if (slab->data != 0)delete [] slab->data;
slab->description = new int[n];
slab->data = new VoxelData[v];
totalcount = 0;
v = 0;
currentpattern = 1;
for (i = 0; i < n; i++)
{
count = 0;
while (data[totalcount] == currentpattern)
{
count++;
totalcount++;
if (totalcount == 256)
break;
}
slab->description[i] = count-1;
if (i % 2 == 0)
{
memcpy(slab->data + v, vdata + totalcount - count, 3 * count);
v += count;
}
currentpattern ^= 1;
}
}
And:
#define clustersize 8
Slab *CVoxelWorld::getSlab(int x, int z)
{
int xgrid = x / clustersize;
int ygrid = z / clustersize;
int clusteroffset = xgrid * 1024 * clustersize + ygrid * clustersize * clustersize;
return &m_data[clusteroffset + (x & (clustersize - 1)) + (z & (clustersize - 1)) * clustersize];
}
And:
int CVoxelWorld::isSolid(int x, int y, int z)
{
Slab *slab;
if (y < 0 || y > 256)
return 0;
slab = getSlab(x, z);
int counter = 0;
for (int i = 0; i < slab->nentries; i++)
{
int height = slab->description[i] + 1;
if (i % 2 == 0)
{
if (y >= counter && y < counter + height)
return 1;
}
counter += height;
}
return 0;
}