I want to use pixel coordinates in my shader - coordinates

From https://webgl2fundamentals.org/webgl/lessons/webgl-image-processing.html
WebGL2 adds the ability to read a texture using pixel coordinates as well. Which way is best is up to you. I feel like it's more common to use texture coordinates than pixel coordinates.
Nowhere is this mentioned other then passing uniform with texture dimensions in pixels and calculate from there, is there a way to access these pixel coords without calculation as it is supposed here?

You can read individual pixels/texels from a texture in WebGL2 with texelFetch
vec4 color = texelFetch(someUniformSampler, ivec2(pixelX, pixelY), intMipLevel);
For example, compute the average color of a texture by reading each pixel
const vs = `#version 300 es
void main() {
gl_Position = vec4(0, 0, 0, 1);
gl_PointSize = 1.0;
}
`;
const fs = `#version 300 es
precision highp float;
uniform sampler2D tex;
out vec4 outColor;
void main() {
int level = 0;
ivec2 size = textureSize(tex, level);
vec4 color = vec4(0);
for (int y = 0; y < size.y; ++y) {
for (int x = 0; x < size.x; ++x) {
color += texelFetch(tex, ivec2(x, y), level);
}
}
outColor = color / float(size.x * size.y);
}
`;
function main() {
const gl = document.createElement('canvas').getContext('webgl2');
if (!gl) {
return alert('need webgl2');
}
const prg = twgl.createProgram(gl, [vs, fs]);
gl.canvas.width = 1;
gl.canvas.height = 1;
gl.viewport(0, 0, 1, 1);
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
// so we don't need mips
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
// so we can pass a non multiple of 4 bytes
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
const values = new Uint8Array([10, 255, 13, 70, 56, 45, 89]);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.R8, values.length, 1, 0, gl.RED, gl.UNSIGNED_BYTE, values);
gl.useProgram(prg);
gl.drawArrays(gl.POINTS, 0, 1);
const gpuAverage = new Uint8Array(4);
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, gpuAverage);
const jsAverage = values.reduce((s, v) => s + v) / values.length;
console.log('gpuAverage:', gpuAverage[0]);
console.log('jsAverage:', jsAverage);
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
notes: since the canvas is RGBA8 can only get integer result. Could change to some float format but that would complicate the example which is not about rendering it's about texelFetch.
Of course just by changing the data from R8 to RGBA8 we can do 4 arrays as once if we interleave the values
const vs = `#version 300 es
void main() {
gl_Position = vec4(0, 0, 0, 1);
gl_PointSize = 1.0;
}
`;
const fs = `#version 300 es
precision highp float;
uniform sampler2D tex;
out vec4 outColor;
void main() {
int level = 0;
ivec2 size = textureSize(tex, level);
vec4 color = vec4(0);
for (int y = 0; y < size.y; ++y) {
for (int x = 0; x < size.x; ++x) {
color += texelFetch(tex, ivec2(x, y), level);
}
}
outColor = color / float(size.x * size.y);
}
`;
function main() {
const gl = document.createElement('canvas').getContext('webgl2');
if (!gl) {
return alert('need webgl2');
}
const prg = twgl.createProgram(gl, [vs, fs]);
gl.canvas.width = 1;
gl.canvas.height = 1;
gl.viewport(0, 0, 1, 1);
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
// so we don't need mips
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
// so we can pass a non multiple of 4 bytes
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
const values = new Uint8Array([
10, 100, 200, 1,
12, 150, 231, 9,
50, 129, 290, 3,
45, 141, 300, 2,
12, 123, 212, 4,
]);
const width = 1;
const height = values.length / 4;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA8, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, values);
gl.useProgram(prg);
gl.drawArrays(gl.POINTS, 0, 1);
const gpuAverages = new Uint8Array(4);
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, gpuAverages);
let jsAverages = [0, 0, 0, 0];
for (let i = 0; i < height; ++i) {
for (let j = 0; j < 4; ++j) {
jsAverages[j] += values[i * 4 + j];
}
}
jsAverages = jsAverages.map(v => v / height);
console.log('gpuAverage:', gpuAverages.join(', '));
console.log('jsAverage:', jsAverages.join(', '));
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
To do more requires figuring out some way to arrange the data and use an input to the fragment shader to figure out where the data is. For example we again interleave the data, 5 arrays so the data goes 0,1,2,3,4,0,1,2,3,4,0,1,2,3,4.
Let's go back to R8 and do 5 separate arrays. We need to draw 5 pixels. We can tell which pixel is being drawn by looking at gl_FragCoord. We can use that to offset which pixels we look at and pass in how many to skip.
const vs = `#version 300 es
void main() {
gl_Position = vec4(0, 0, 0, 1);
gl_PointSize = 100.0;
}
`;
const fs = `#version 300 es
precision highp float;
uniform sampler2D tex;
uniform int numArrays;
out vec4 outColor;
void main() {
int level = 0;
int start = int(gl_FragCoord.x);
ivec2 size = textureSize(tex, level);
vec4 color = vec4(0);
for (int y = 0; y < size.y; ++y) {
for (int x = start; x < size.x; x += numArrays) {
color += texelFetch(tex, ivec2(x, y), level);
}
}
outColor = color / float(size.x / numArrays * size.y);
}
`;
function main() {
const gl = document.createElement('canvas').getContext('webgl2');
if (!gl) {
return alert('need webgl2');
}
const prg = twgl.createProgram(gl, [vs, fs]);
const numArraysLoc = gl.getUniformLocation(prg, "numArrays");
const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
// so we don't need mips
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
// so we can pass a non multiple of 4 bytes
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
const numArrays = 5;
const values = new Uint8Array([
10, 100, 200, 1, 70,
12, 150, 231, 9, 71,
50, 129, 290, 3, 90,
45, 141, 300, 2, 80,
12, 123, 212, 4, 75,
]);
const width = values.length;
const height = 1;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.R8, width, height, 0, gl.RED, gl.UNSIGNED_BYTE, values);
gl.canvas.width = numArrays;
gl.canvas.height = 1;
gl.viewport(0, 0, numArrays, 1);
gl.useProgram(prg);
gl.uniform1i(numArraysLoc, numArrays);
gl.drawArrays(gl.POINTS, 0, 1);
const gpuData = new Uint8Array(4 * numArrays);
gl.readPixels(0, 0, numArrays, 1, gl.RGBA, gl.UNSIGNED_BYTE, gpuData);
const gpuAverages = [];
for (let i = 0; i < numArrays; ++i) {
gpuAverages.push(gpuData[i * 4]); // because we're only using the RED channel
}
const jsAverages = (new Array(numArrays)).fill(0);
const numValues = (width / numArrays) * height;
for (let i = 0; i < width / numArrays; ++i) {
for (let j = 0; j < numArrays; ++j) {
jsAverages[j] += values[i * numArrays + j] / numValues;
}
}
console.log('gpuAverage:', gpuAverages.join(', '));
console.log('jsAverage:', jsAverages.join(', '));
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>

Related

Building a Perspective Projection Matrix

my first post here but hopefully I can explain my dilemma with building a perspective projection matrix similar to the one in OpenGL. Being new to the 3D graphics space, I'm having trouble understanding what to do after multiplying my matrix after using a perspective projection multiplication. I'm attempting to create this in Flutter but it should be a moot point as I believe my conversion is off.
Here is what I have:
var center = {
'x': size.width / 2,
'y': size.height / 2
};
List points = [];
points.add(createVector(-50, -50, -50, center));
points.add(createVector(50, -50, -50, center));
points.add(createVector(50, 50, -50, center));
points.add(createVector(-50, 50, -50, center));
points.add(createVector(-50, -50, 50, center));
points.add(createVector(50, -50, 50, center));
points.add(createVector(50, 50, 50, center));
points.add(createVector(-50, 50, 50, center));
for (int i = 0; i < points.length; i++) {
var matrix = matmul(projection, points[i]);
var w = matrix[3][0];
projected.add(
Offset(
(matrix[0][0] / w),
(matrix[1][0] / w)
)
);
}
And these are the 2 custom functions I've created:
List createVector(x, y, z, center) {
return [
[center['x'] + x],
[center['y'] + y],
[z],
[0]
];
}
List matmul(a, b) {
int colsA = a[0].length;
int rowsA = a.length;
int colsB = b[0].length;
int rowsB = b.length;
if (colsA != rowsB) {
return null;
}
List result = [];
for (int j = 0; j < rowsA; j++) {
result.add([]);
for (int i = 0; i < colsB; i++) {
double sum = 0.0;
for (int n = 0; n < colsA; n++) {
sum += a[j][n] * b[n][i];
}
result[j].add(sum);
}
}
return result;
}
My projection matrix that I'm multiplying each point with is:
var aspect = size.width / size.height;
var fov = 100;
var near = 200;
var far = 300;
List projection = [
[1 / (aspect * tan(fov / 2)), 0, 0, 0],
[0, 1 / (tan(fov / 2)), 0, 0],
[0, 0, (near + far) / (near - far), (2 * near * far) / (near - far)],
[0, 0, -1, 0]
];
I believe I am using the correct projection matrix to multiply each vector point that I have. The only thing is, after I get the result from this multiplication, I'm not entirely sure what to do with the resultant vector. I've read about the perspective divide so I am dividing the x, y and z values by the 4th values but I could be incorrect.
Any insight or help is much appreciated. Have been stumped for a long time as I have been learning this online on my own.
In OpenGL the projection matrix turns from a right handed system to a left handed system. See Right-hand rule). This is accomplished by mirroring the z axis.
The terms in the 3rd column have to be inverted (- (near+far) / (near-far) respectively - (2*near*far) / (near-far)):
List projection = [
[1 / (aspect * tan(fov/2)), 0, 0, 0],
[0, 1 / (tan(fov/2)), 0, 0],
[0, 0, - (near+far) / (near-far), - (2*near*far) / (near-far)],
[0, 0, -1, 0]
];
The perspective projection matrix defines a Viewing frustum. It defines a 3 dimensional space (clip space) which is projected on the 2 dimensional viewport.
In OponGL all the geometry which is not in clip space is clipped. You have to ensure that the geometry is in between the near and far plane.

How to generate non-repeating Random Numbers in Unity

I am trying to create a simple Bingo game and want to make sure the numbers are not repeating on the bingo card. I have a random number generator, but for some reason the code I'm using doesn't work as the same numbers will constantly repeat. Could somebody please take a look at my code below and either tell me what I need to fix or fix the code for me?
public Grid(int width, int height, float cellSize)
{
this.width = width;
this.height = height;
this.cellSize = cellSize;
gridArray = new int[width, height];
debugTextArray = new TextMesh[width, height];
for (int x = 0; x < gridArray.GetLength(0); x++)
{
for (int y = 0; y < gridArray.GetLength(1); y++)
{
debugTextArray[x, y] = UtilsClass.CreateWorldText(gridArray[x, y].ToString(), null, GetWorldPosition(x, y) + new Vector3(cellSize, cellSize) * .5f, 20, Color.white, TextAnchor.MiddleCenter);
Debug.DrawLine(GetWorldPosition(x, y), GetWorldPosition(x, y + 1), Color.white, 100f);
Debug.DrawLine(GetWorldPosition(x, y), GetWorldPosition(x + 1, y), Color.white, 100f);
}
}
Debug.DrawLine(GetWorldPosition(0, height), GetWorldPosition(width, height), Color.white, 100f);
Debug.DrawLine(GetWorldPosition(width, 0), GetWorldPosition(width, height), Color.white, 100f);
for (int x = 0; x <= 4; x++)
{
RandomValue(0, x);
RandomValue(1, x);
RandomValue(2, x);
RandomValue(3, x);
RandomValue(4, x);
}
}
private Vector3 GetWorldPosition(int x, int y)
{
return new Vector3(x, y) * cellSize;
}
public void RandomValue(int x, int y)
{
if (x >= 0 && y >= 0 && x < width && y < height)
{
list = new List<int>(new int[Lenght]);
for (int j = 0; j < 25; j++)
{
Rand = UnityEngine.Random.Range(1, 50);
while (list.Contains(Rand))
{
Rand = UnityEngine.Random.Range(1, 50);
}
list[j] = Rand;
gridArray[x, y] = list[j];
}
debugTextArray[x, y].text = gridArray[x, y].ToString();
debugTextArray[2, 2].text = "Free";
}
}
Basically your concept in function RandomValue() is correct, but problem is it only check in same column, so you have to bring the concept of RandomValue() to Grid() level. You need a List contain all approved value, then check Contains() at Grid().
But in fact you can do it in all one go.
Make sure your width*height not larger than maxValue.
Dictionary<Vector2Int, int> CreateBingoGrid(int width, int height, int maxValue)
{
var grid = new Dictionary<Vector2Int, int>();
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
var num = Random.Range(1, maxValue);
while (grid.ContainsValue(num))
{
num = Random.Range(1, maxValue);
}
grid.Add(new Vector2Int(x, y), num);
}
}
return grid;
}
As mentioned in the comment on your question, it's probably the easiest to just shuffle the numbers in the range [1,50] and then take the first 25 or however many you want.
The reason your code isn't working properly and you see a lot of repeats is because you're calling the RandomValue() function multiple separate times and the list variable you're comparing against if a value is already on the chart is inside of that function. Meaning that it will only ever check the values it has generated in that call, in this case meaning only for one row.
Also, if you make a list that you know will always be the same size, you should use an array instead. Lists are for when you want the size to be adjustable.
Solution 1:
A very simple way to generate an array with the numbers 1-50 would be to do this:
//Initialize Array
int[] numbers = new int[50];
for (int i = 1; i <= numbers.Length; i++)
{
numbers[i] = i;
}
//Shuffle Array
for (int i = 0; i < numbers.Length; i++ )
{
int tmp = numbers[i];
int r = Random.Range(i, numbers.Length);
numbers[i] = numbers[r];
numbers[r] = tmp;
}
//Get first 'n' numbers
int[] result = Array.Copy(numbers, 0, result, 0, n);
return result;
I'm not sure if it's the most efficient way, but it would work.
Solution 2:
To change your code to check against the entire list, I would change this section:
for (int x = 0; x <= 4; x++)
{
RandomValue(0, x);
RandomValue(1, x);
RandomValue(2, x);
RandomValue(3, x);
RandomValue(4, x);
}
To something like this:
List<int> values = new List<int>();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
int r = RandomValue(1, 50);
while (values.Contains(r))
{
r = RandomValue(1, 50);
}
values[y * width + x].Add(r);
gridArray[x, y] = r;
}
}
int RandomValue(int min, int max) {
return UnityEngine.Random.Range(min, max);
}
Hope this helps!

Transformation of coordinates, Concept

i want to convert x,y,z coordinates to polar coordinates. I am getting (-) in y coordiantes. Can someone explain me why I am getting it. It would be great help.
I am reading these values (xyz , az_elev_r) from a software and can't be changed.I am just not sure of the order of angles( az & elev). Using my code I get -y instead of y. It means there is 180 rotation.My code is:
xyz=[-0.564 3.689 -0.735;
2.011 5.067 -1.031;
-1.181 3.943 -1.825; % Reference values
];
%% az_elev_r-->xyz
az_elev_r=[ 261.30 -11.24 3.80;
291.65 -10.692 5.548;
253.34 -23.897 4.50]; % Also Reference (degree)
az_elev_r(:,1:2)=deg2rad(az_elev_r(:,1:2));
r=az_elev_r(:,3);
az=az_elev_r(:,1);
elev=az_elev_r(:,2);
x=r.*cos(az).*cos(elev)
y=r.*sin(az).*cos(elev)
z=r.*sin(elev)
Your az_elev_r matrix is not consistent with your xyz reference.
>> [az, el, r] = cart2sph(xyz(:,1), xyz(:,2), xyz(:,3));
>> rad2deg(az)
ans =
98.6924675475501
68.3527736950233
106.673911589314
Your answers are consistent with the values returned by the sph2cart function. (Example starts with your original input, before the dec2rad replacement.
>> [x, y, z] = sph2cart(deg2rad(az_elev_r(:,1)), deg2rad(az_elev_r(:,2)), az_elev_r(:,3))
x =
-0.563766229670505
2.01131973806906
-1.17951822049783
y =
-3.68422880893852
-5.06709019311118
-3.94153436658676
z =
-0.740692730942158
-1.02931719412937
-1.82292172199717
Incidentally, you're code will be more readable if you just use the sph2cart function, and work in radians, unless you are trying to understand the conversions for their own sake.
OpenCV has the code for conversion to polar coordinates and back. This conversion is useful for finding object rotation through correlation or otherwise creating object-centred 'rotation-independent' representation of objects. It is useful to visualize each of the polar coordinates as well as their joint image. The images below should be self_explanatory. The polar plot has angle as a horizontal axis and Radius as a vertical axis, so that 4 peaks correspond to the 4 corners of the input image. The code (C++ with OpenCV) is attached.
//================================
// Name : PolarCoord.cpp
// Author : V.Ivanchenko cudassimo#gmail.com
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//======================================
#include <iostream>
#include "opencv.hpp"
using namespace std;
using namespace cv;
#define VALID(x, y, w, h) ((x)>=0 && (y)>=0 && (x)<(w) && (y)<(h)) // validates index
/*
* 1. Original binary image HxW CV_8U
* |
* |
* V
* 2. Two coordinate Mats HxW CV_32F
* |
* |
* V
* 3. Visualization CV_8U
* a. gray HxW for a single coordinate image
* b. binary Rx360 for two coordinate images
*/
// convert a binary 2D image into two Mats with float coordiantes
void imageToCoord(const Mat& img, Mat& X, Mat& Y, bool centered = true) {
if (img.empty())
return;
int h = img.rows;
int w = img.cols;
X.create(h, w, CV_32F);
Y.create(h, w, CV_32F);
float Cx = w/2.0f;
float Cy = h/2.0f;
for (int i=0; i<h; ++i){
const uchar* img_row = img.ptr<uchar>(i);
float* x_row = X.ptr<float>(i);
float* y_row = Y.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (img_row[j]>0) {
float x = j;
float y = i;
if (centered) {
x-=Cx;
y-=Cy;
}
x_row[j] = x;
y_row[j] = y;
}
} // j
} // i
} //imageToCoord()
// convert a single float ploar coord Mat to a gray image
void polarToImg(const Mat& PolarCoord, Mat& img) {
if (PolarCoord.empty())
return;
int h = PolarCoord.rows;
int w = PolarCoord.cols;
img.create(h, w, CV_8U);
float maxVal = std::numeric_limits<float>::min();
// find maxVal
for (int i=0; i<h; ++i){
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
if (maxVal < x_row[j])
maxVal = x_row[j];
} // j
} // i
// create an image
if (maxVal>0) {
float k = 255.0/maxVal;
for (int i=0; i<h; ++i){
uchar* img_row = img.ptr<uchar>(i);
const float* x_row = PolarCoord.ptr<float>(i);
for (int j=0; j<w; ++j) {
img_row[j] = saturate_cast<uchar>(k*x_row[j]);
}// j
} // i
} // if
} // plarToImg()
// convert two polar coord Mats to a binary image
void polarToImg(const Mat& radius, const Mat& angle, Mat& img) {
if (angle.empty() || radius.empty())
return;
int h = angle.rows;
int w = angle.cols;
assert(radius.cols==w && radius.rows==h);
const int imgH = sqrt(h*h+w*w)+0.5f; // radius
const int imgW = 360; // angle, deg
img.create(imgH, imgW, CV_8U);
// create an image
for (int i=0; i<h; ++i){
const float* ang_row = angle.ptr<float>(i);
const float* r_row = radius.ptr<float>(i);
for (int j=0; j<w; ++j) {
int x = ang_row[j] + 0.5f;
int y = r_row[j] + 0.5f;
if (x>0) {
cout<<x<<endl;
}
if (VALID(x, y, imgW, imgH))
img.at<uchar>(y, x) = 255;
else {
cout<<"Invalid x, y: "<<x<<", "<<y<<endl;
}
}// j
} // i
} // plarToImg()
int main() {
cout << "Cartesian to polar" << endl; // prints "Syntax training in openCV"
const int W=400, H=400;
Mat Minput(H, W, CV_8U);
Minput(Rect(W/4, H/4, W/2, H/2)) = 255;
Mat X, Y, Angle, Radius, Mr, Mang, Mpolar;
// processing
imageToCoord(Minput, X, Y); // extract coordinates
cartToPolar(X, Y, Radius, Angle, true);// convert coordiantes
// visualize
polarToImg(Radius, Mr);
polarToImg(Angle, Mang);
polarToImg(Radius, Angle, Mpolar);
// debug
//cout<<Mpolar<<endl;
namedWindow("input", 0);
namedWindow("angle", 0);
namedWindow("radius", 0);
namedWindow("Polar", 0);
const int winw=200, winh=200;
resizeWindow("input", winw, winh);
resizeWindow("angle", winw, winh);
resizeWindow("radius", winw, winh);
resizeWindow("Polar", 360, (int)sqrt(H*H + W*W));
moveWindow("input", 0, 0);
moveWindow("angle", winw, 0);
moveWindow("radius", 2*winw, 0);
moveWindow("Polar", 3*winw, 0);
imshow("input", Minput);
imshow("angle", Mang);
imshow("radius", Mr);
imshow("Polar", Mpolar);
waitKey(-1);
return 0;
}

Ray Tracing question, how to map screen coordinates to world coordinates?

I was studying Ray Tracing on http://www.devmaster.net/articles/raytracing_series/part1.php when I came across this piece of code:
void Engine::InitRender()
{
// set first line to draw to
m_CurrLine = 20;
// set pixel buffer address of first pixel
m_PPos = 20 * m_Width;
// screen plane in world space coordinates
m_WX1 = -4, m_WX2 = 4, m_WY1 = m_SY = 3, m_WY2 = -3;
// calculate deltas for interpolation
m_DX = (m_WX2 - m_WX1) / m_Width;
m_DY = (m_WY2 - m_WY1) / m_Height;
m_SY += 20 * m_DY;
// allocate space to store pointers to primitives for previous line
m_LastRow = new Primitive*[m_Width];
memset( m_LastRow, 0, m_Width * 4 );
}
I'm quite confused on how the author map screen coordinates to world coordinates...
Can anyone please tell me how the author derived these lines?
Or tell me how one would map screen coordinates to world coordinates?
// screen plane in world space coordinates
m_WX1 = -4, m_WX2 = 4, m_WY1 = m_SY = 3, m_WY2 = -3;
Thank you in advance!
EDIT: Here is relevant code from raytracer.cpp:
// render scene
vector3 o( 0, 0, -5 );
// initialize timer
int msecs = GetTickCount();
// reset last found primitive pointer
Primitive* lastprim = 0;
// render remaining lines
for(int y = m_CurrLine; y < (m_Height - 20); y++)
{
m_SX = m_WX1;
// render pixels for current line
for ( int x = 0; x < m_Width; x++ )
{
// fire primary ray
Color acc( 0, 0, 0 );
vector3 dir = vector3( m_SX, m_SY, 0 ) - o;
NORMALIZE( dir );
Ray r( o, dir );
float dist;
Primitive* prim = Raytrace( r, acc, 1, 1.0f, dist );
int red = (int)(acc.r * 256);
int green = (int)(acc.g * 256);
int blue = (int)(acc.b * 256);
if (red > 255) red = 255;
if (green > 255) green = 255;
if (blue > 255) blue = 255;
m_Dest[m_PPos++] = (red << 16) + (green << 8) + blue;
m_SX += m_DX;
}
m_SY += m_DY;
// see if we've been working to long already
if ((GetTickCount() - msecs) > 100)
{
// return control to windows so the screen gets updated
m_CurrLine = y + 1;
return false;
}
}
return true;
Therefore the camera is at (0,0,-5) and the screen onto which the world is being projected has top-left corner (-4,3,0) and bottom-right corner (4,-3,0).

OpenGL ES 2.0 (specifically for the iphone) rendering is slightly off. Best guess is it's a projection matrix problem

So I bought O'reilly's Iphone 3D programming and found what I believe to be a bug in there code. However I can't figure out what the problem is, and unless I do I can't move forward with my own code.
I will paste what I consider to be the appropriate code into this post but luckily all the code is available online at:
http://examples.oreilly.com/9780596804831/HelloCone/
The problem I am having is with their OpenGL ES 2.0 renderer, it does not show up in their ES 1.1 renderer.
So what I have been noticing is that the cone does not render exactly in the correct position. To test this I changed the ModelViewMatrix to render exactly on the FrustumNear plane. So the cone should appear cut completely in two. When I do this with the ES 1.1 render this is the case, when I do the same in OpenGL ES 2.0 however it is not. The cone is for the most part there, but slightly shaved off. Meaning it is not landing exactly on the fustrum's near face.
Here is the initialization code where the projection matrix is created and set up:
void RenderingEngine2::Initialize(int width, int height)
{
const float coneRadius = 0.5f;
const float coneHeight = 1.0f;
const int coneSlices = 40;
{
// Allocate space for the cone vertices.
m_cone.resize((coneSlices + 1) * 2);
// Initialize the vertices of the triangle strip.
vector<Vertex>::iterator vertex = m_cone.begin();
const float dtheta = TwoPi / coneSlices;
for (float theta = 0; vertex != m_cone.end(); theta += dtheta) {
// Grayscale gradient
float brightness = abs(sin(theta));
vec4 color(brightness, brightness, brightness, 1);
// Apex vertex
vertex->Position = vec3(0, 1, 0);
vertex->Color = color;
vertex++;
// Rim vertex
vertex->Position.x = coneRadius * cos(theta);
vertex->Position.y = 1 - coneHeight;
vertex->Position.z = coneRadius * sin(theta);
vertex->Color = color;
vertex++;
}
}
{
// Allocate space for the disk vertices.
m_disk.resize(coneSlices + 2);
// Initialize the center vertex of the triangle fan.
vector<Vertex>::iterator vertex = m_disk.begin();
vertex->Color = vec4(0.75, 0.75, 0.75, 1);
vertex->Position.x = 0;
vertex->Position.y = 1 - coneHeight;
vertex->Position.z = 0;
vertex++;
// Initialize the rim vertices of the triangle fan.
const float dtheta = TwoPi / coneSlices;
for (float theta = 0; vertex != m_disk.end(); theta += dtheta) {
vertex->Color = vec4(0.75, 0.75, 0.75, 1);
vertex->Position.x = coneRadius * cos(theta);
vertex->Position.y = 1 - coneHeight;
vertex->Position.z = coneRadius * sin(theta);
vertex++;
}
}
// Create the depth buffer.
glGenRenderbuffers(1, &m_depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, m_depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH_COMPONENT16,
width,
height);
// Create the framebuffer object; attach the depth and color buffers.
glGenFramebuffers(1, &m_framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
m_colorRenderbuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
m_depthRenderbuffer);
// Bind the color buffer for rendering.
glBindRenderbuffer(GL_RENDERBUFFER, m_colorRenderbuffer);
// Set up some GL state.
glViewport(0, 0, width, height);
glEnable(GL_DEPTH_TEST);
// Build the GLSL program.
m_simpleProgram = BuildProgram(SimpleVertexShader, SimpleFragmentShader);
glUseProgram(m_simpleProgram);
// Set the projection matrix.
GLint projectionUniform = glGetUniformLocation(m_simpleProgram, "Projection");
mat4 projectionMatrix = mat4::Frustum(-1.6f, 1.6, -2.4, 2.4, 5, 10);
glUniformMatrix4fv(projectionUniform, 1, 0, projectionMatrix.Pointer());
}
And here is the Render code. As you can see I have changed the ModelVieMatrix to place the cone on the bottom left corner of the near Frustum face.
void RenderingEngine2::Render() const
{
GLuint positionSlot = glGetAttribLocation(m_simpleProgram, "Position");
GLuint colorSlot = glGetAttribLocation(m_simpleProgram, "SourceColor");
glClearColor(0.5f, 0.5f, 0.5f, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableVertexAttribArray(positionSlot);
glEnableVertexAttribArray(colorSlot);
mat4 rotation(m_animation.Current.ToMatrix());
mat4 translation = mat4::Translate(-1.6, -2.4, -5);
// Set the model-view matrix.
GLint modelviewUniform = glGetUniformLocation(m_simpleProgram, "Modelview");
mat4 modelviewMatrix = rotation * translation;
glUniformMatrix4fv(modelviewUniform, 1, 0, modelviewMatrix.Pointer());
// Draw the cone.
{
GLsizei stride = sizeof(Vertex);
const GLvoid* pCoords = &m_cone[0].Position.x;
const GLvoid* pColors = &m_cone[0].Color.x;
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, stride, pCoords);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, stride, pColors);
glDrawArrays(GL_TRIANGLE_STRIP, 0, m_cone.size());
}
// Draw the disk that caps off the base of the cone.
{
GLsizei stride = sizeof(Vertex);
const GLvoid* pCoords = &m_disk[0].Position.x;
const GLvoid* pColors = &m_disk[0].Color.x;
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, stride, pCoords);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, stride, pColors);
glDrawArrays(GL_TRIANGLE_FAN, 0, m_disk.size());
}
glDisableVertexAttribArray(positionSlot);
glDisableVertexAttribArray(colorSlot);
}
Looks like I found the answer to my own question.
The projection matrix in the O'Reilly code is being calculated incorrectly.
In their code they have:
T a = 2 * near / (right - left);
T b = 2 * near / (top - bottom);
T c = (right + left) / (right - left);
T d = (top + bottom) / (top - bottom);
T e = - (far + near) / (far - near);
T f = -2 * far * near / (far - near);
Matrix4 m;
m.x.x = a; m.x.y = 0; m.x.z = 0; m.x.w = 0;
m.y.x = 0; m.y.y = b; m.y.z = 0; m.y.w = 0;
m.z.x = c; m.z.y = d; m.z.z = e; m.z.w = -1;
m.w.x = 0; m.w.y = 0; m.w.z = f; m.w.w = 1;
return m;
However this is not the projection matrix. m.w.w should be 0 not 1.
Matrix4 m;
m.x.x = a; m.x.y = 0; m.x.z = 0; m.x.w = 0;
m.y.x = 0; m.y.y = b; m.y.z = 0; m.y.w = 0;
m.z.x = c; m.z.y = d; m.z.z = e; m.z.w = -1;
m.w.x = 0; m.w.y = 0; m.w.z = f; m.w.w = 0;
return m;