Using if with difference in openSCAD - openscad

I want to create a model in openSCAD
and then I want to cut optionally cut a hole into it (Using difference)
so I can do something like
module model_with_hole( hole=false) {
difference() {
//the_model()
if (hole) {
//the_hole()
}
}
}
But this is actually saying something like "always cut something out of the model except that what you cut might be nothing if a hole is not required".
another alternative would be:
module model_with_hole( hole=false) {
if (hole) {
difference() {
//the_model()
//the_hole()
}
}
else {
//the_model()
}
}
But this is actually saying something like "if you need a hole then render the model and remove the hole, otherwise just render the model".
Is there a way to code this such that the call to render the model would only exist once and the difference action would only happen if required?
if (hole) {the_hole()} the_model();
So the code would feel more like saying render the model and the if require cut the hole ?

Perhaps this is what you want:
add the parameters of the holes to a vector and use this vector in a for loop in difference(). If the vector is empty, nothing is subtracted from the model, try the four examples:
module model(l) {
cube(size = l, center = true);
}
module hole(pos, dim) {
translate(pos) cylinder(h = dim[0] + 0.1, r = dim[1], center = true);
}
// holes = [];
// holes = [[[0,0,0],[10, 1]]];
holes = [[[-2.5,-2.5,0],[10, 0.5]], [[0,0,0],[10, 1]], [[2.5,2.5,0],[10, 1.5]]];
// holes = [[[-2.5,-2.5,0],[10, 1]], [[-2.5,2.5,0],[10, 1]], [[2.5,2.5,0],[10, 1]], [[2.5,-2.5,0],[10, 1]], [[0,0,0],[10, 1]]];
difference() {
model(10);
for (h = holes) hole(h[0], h[1]);
}

Related

How to convert lat/lon to correct pixel location in GridLayer Tile

I'm playing with creating a Konva-based GridLayer for Leaflet (basically an abstraction around canvas elements to try and render tens of thousands of features efficiently). I have some code that seems to work to some degree (the lines in my sample data seem to line up with what I would expect), but I am getting strange behavior. Specifically, features will seem to visibly "teleport" or disappear completely. Additionally, it is not uncommon to see breaks in lines at the edges of the tiles. I suspect this means I'm calculating the pixel location within each tile incorrectly (although it's certainly possible something else is wrong). I am basically identifying the pixel location of the tile (x, y in renderStage()), and am translating the map pixel position by that many pixels (pt.x and pt.y, generated by projecting the lat/lon). This is intended to create an array of [x1, y1, x2, y2, ...], which can be rendered to the individual tile. Everything is expected to be in EPSG:4326.
Does anyone know how to properly project lat/lon to pixel coordinates within individual tiles of a GridLayer? There are plenty of examples for doing it for the entire map, but this doesn't seem to translate cleanly into how to find those same pixel locations in tiles offset from the upper left of the map.
import { GridLayer, withLeaflet } from "react-leaflet";
import { GridLayer as LeafletGridLayer } from "leaflet";
import { Stage, Line, FastLayer } from "konva";
import * as Util from 'leaflet/src/core/Util';
import _ from "lodash";
export const CollectionLayer = LeafletGridLayer.extend({
options: {
tileSize: 256
},
initialize: function(collection, props) {
Util.setOptions(this, props)
this.collection = collection;
this.stages = new Map();
this.shapes = {};
this.cached = {};
this.on('tileunload', (e) => {
const stage = this.stages[e.coords]
if (stage) {
this.stages.delete(e.coords)
stage.destroy()
}
})
},
renderStage: function(stage, coords, tileBounds) {
const x = coords.x * this._tileSize.x
const y = coords.y * this._tileSize.y
const z = coords.z;
const layer = stage.getLayers()[0]
if (!layer || !tileBounds) return;
_.each(this.collection.data, (entity, id) => {
if (entity.bounds && tileBounds.intersects(entity.bounds)) {
let shape = this.shapes[id]
if (!shape) {
shape = new Line()
shape.shadowForStrokeEnabled(false)
this.shapes[id] = shape
}
layer.add(shape);
const points = entity.position.reduce((pts, p) => {
const pt = this._map.project([p.value[1], p.value[0]], this._tileZoom)
pts.push(pt.x - x);
pts.push(pt.y - y);
return pts
}, [])
shape.points(points);
shape.stroke('red');
shape.strokeWidth(2);
this.shapes[id] = shape
}
})
layer.batchDraw()
},
createTile: function(coords) {
const tile = document.createElement("div");
const tileSize = this.getTileSize();
const stage = new Stage({
container: tile,
width: tileSize.x,
height: tileSize.y
});
const bounds = this._tileCoordsToBounds(coords);
const layer = new FastLayer();
stage.add(layer);
this.stages[coords] = stage
this.renderStage(stage, coords, bounds);
return tile;
}
});
class ReactCollectionLayer extends GridLayer {
createLeafletElement(props) {
console.log("PROPS", props);
return new CollectionLayer(props.collection.data, this.getOptions(props));
}
updateLeafletElement(fromProps, toProps) {
super.updateLeafletElement(fromProps, toProps);
if (this.leafletElement.collection !== toProps.collection) {
this.leafletElement.collection = toProps.collection
this.leafletElement.redraw();
}
}
}
export default withLeaflet(ReactCollectionLayer);
Everything is expected to be in EPSG:4326.
No.
Once you are dealing with raster data (image tiles), everything is expected to be either in the map's display CRS, which is (by default) EPSG:3857, or in pixels relative to the CRS origin. These concepts are explained a bit more in-depth in one of Leaflet's tutorials.
In fact, you seem to be working in pixels here, at least for your points:
const pt = this._map.project([p.value[1], p.value[0]], this._tileZoom)
However, your calculation of the pixel offset for each tile is too naïve:
const x = coords.x * this._tileSize.x
const y = coords.y * this._tileSize.y
That should instead rely on the private method _getTiledPixelBounds of L.GridLayer, e.g.:
const tilePixelBounds = this._getTiledPixelBounds();
const x = tilePixelBounds.min.x;
const y = tilePixelBounds.min.y;
And use these bounds to add some sanity checks while looping through the points:
const pt = this._map.project([p.value[1], p.value[0]], this._tileZoom);
if (!tilePixelBounds.contains(pt)) { console.error(....); }
On the other hand:
[...] an abstraction around canvas elements to try and render tens of thousands of features efficiently
I don't think using Konva to actually draw items on a <canvas> is going to improve the performance - the methods are just the same used by Leaflet (and, if we're talking about tiling vector data, the same used by Leaflet.VectorGrid ). Ten thousand calls to canvas draw functions are going to take the same time no matter what the library on top. If you have time to consider other alternatives, Leaflet.GLMarkers and its WebGL rendering might offer better performance at the price of less compatibility and higher integration costs.

Unable to draw in processing language when I integrated it with Eclipse

My setup method looks like below, I want to read one location file(City names with x and y co-ordinates) and then I am creating one hash-map of all cities so that I can draw(Will make points) them all on canvas
public void setup(){
background(0);
PFont title = createFont("Georgia", 16);
textFont(title);
text("This is a visualization of A* algorithm", 240, 20);
stroke(255);
line(0,25,800,25);
selectInput("Select a file for Locations:", "locFileSelected");
}
locFileSelected method(locFilePath is a global variable used):
public void locFileSelected(File locFile) {
locFilePath = locFile.toString();
this.readLocFileAndDraw();
}
Now control is transferred to readLocFileAndDraw (Each line in file has space separated 3 words, 1st is city name followed by x and y co-ordinates:
private void readLocFileAndDraw() {
try (Stream<String> lines = Files.lines(Paths.get(locFilePath))) {
for (String line : (Iterable<String>) lines::iterator){
// Last line in file is END, skip it
if(!line.equalsIgnoreCase("END")) {
List<Double> list = new ArrayList<Double>();
String[] arr= line.split(" ");
// adding coordinates into the list
list.add(Double.valueOf(arr[1]));
list.add(Double.valueOf(arr[2]));
// adding the list into the map with key as city name
locationsMap.put(arr[0], list);
}
}
} catch (IOException e) {
e.printStackTrace();
System.exit(0);
}
// Draw cities on map
// Draw graph of all cities
int w=1, h=1;
Set<Entry<String, List<Double>>> locationKeyEntries = locationsMap.entrySet();
for(Entry<String, List<Double>> currEntry: locationKeyEntries) {
String currCity = currEntry.getKey();
List<Double> currLocationList = currEntry.getValue();
int x = currLocationList.get(0).intValue();
int y = currLocationList.get(1).intValue();
stroke(255);
ellipse(x, y, w, h);
if(x>755)
x = x-(8*currCity.length());
if(y>755)
y=y-(8*currCity.length());
text(currCity, x,y);
}
return;
}
I tried to debug it, control is going to ellipse method but nothing is getting drew. Any idea? As far as I understand, I am missing passing reference of PApplet but I don't know how to do it...
Like you've mentioned, you really need to debug your program. Verifying that you're calling the ellipse() function is a great first step, but now you should be asking yourself more questions:
What is the value of x, y, w, and h being passed into the ellipse() function?
What is the value of currEntry in the for loop? What is the value of line when you're reading it in?
What are the fill, stroke, and background colors when you're drawing?
Note that I'm not asking you to tell me the answer to these questions. I'm pointing out these questions because they're what you should be asking yourself when you debug your program.
If you still can't figure it out, I really recommend breaking your problem down into smaller pieces and approaching each of those steps one at a time. For example, can you just show a single circle at a hard-coded point? Then work your way up from there. Can you read a single point in from a file and draw that to the screen? Then read two points. Work your way forward in small incremental steps, and post a MCVE if you get stuck. Good luck.

Eliminate sudden additions/deletions in D3 line chart transition

You can see this code in action here: http://bl.ocks.org/2626142
This code draws a line chart, then transitions between 3 sample data sets. When moving from a small data set to a larger one, the extra data points suddenly appear instead of smoothly unfolding from the existing line.
When moving from a larger data set to a smaller one, the line is suddenly truncated before transitioning to fill the whole chart.
With this code there are sudden additions and deletions to the line and gridlines. How do I eliminate those?
var data = [
[0,2,3,2,8],
[2,4,1,5,3],
];
var data2 = [
[0,1,2,3,4,5],
[9,8,7,6,5,6],
];
var data3 = [
[1,3,2],
[0,8,5],
];
var w = 300,
h = 100;
var chart = d3.select('body').append('div')
.attr('class', 'chart')
.append('svg:svg')
.attr('width', w)
.attr('height', h);
var color = d3.scale.category10();
function drawdata(data, chart) {
var num = data[0].length-1;
var x = d3.scale.linear().domain([0, num]).range([0,w]);
var y = d3.scale.linear().domain([0, 10]).range([h, 0]);
var line = d3.svg.line()
.x(function(d, i) { return x(i); })
.y(function(d) { return y(d); });
var flat = d3.svg.line()
.x(function(d, i) { return x(i); })
.y(y(-1));
var lines = chart.selectAll('.line')
.data(data);
lines.enter().append('path')
.attr('class', 'line')
.style('stroke', function(d,i) { return color(i); })
.attr('d', line);
lines.transition()
.ease('linear')
.duration(500)
.attr('d', line);
lines.exit().remove();
// legend
var ticks = chart.selectAll('line')
.data(x.ticks(num));
ticks.enter().append('line')
.attr('x1', x)
.attr('x2', x)
.attr('y1', 0)
.attr('y2', h)
.attr('class', 'rule');
ticks.transition()
.ease('linear')
.duration(500)
.attr('x1', x)
.attr('x2', x)
.attr('y1', 0)
.attr('y2', h);
ticks.exit().remove();
}
var dats = [data, data2, data3];
function next() {
var it = dats.shift();
dats.push(it);
drawdata(it, chart);
}
setInterval(next, 2000);
next();
I faced a similar problem recently, and solved it using a custom interpolator for paths:
// Add path interpolator to d3
d3.interpolators.push(function(a, b) {
var isPath, isArea, interpolator, ac, bc, an, bn;
// Create a new array of a given length and fill it with the given value
function fill(value, length) {
return d3.range(length)
.map(function() {
return value;
});
}
// Extract an array of coordinates from the path string
function extractCoordinates(path) {
return path.substr(1, path.length - (isArea ? 2 : 1)).split('L');
}
// Create a path from an array of coordinates
function makePath(coordinates) {
return 'M' + coordinates.join('L') + (isArea ? 'Z' : '');
}
// Buffer the smaller path with coordinates at the same position
function bufferPath(p1, p2) {
var d = p2.length - p1.length;
// Paths created by d3.svg.area() wrap around such that the 'end'
// of the path is in the middle of the list of coordinates
if (isArea) {
return fill(p1[0], d/2).concat(p1, fill(p1[p1.length - 1], d/2));
} else {
return fill(p1[0], d).concat(p1);
}
}
// Regex for matching the 'd' attribute of SVG paths
isPath = /M-?\d*\.?\d*,-?\d*\.?\d*(L-?\d*\.?\d*,-?\d*\.?\d*)*Z?/;
if (isPath.test(a) && isPath.test(b)) {
// A path is considered an area if it closes itself, indicated by a trailing 'Z'
isArea = a[a.length - 1] === 'Z';
ac = extractCoordinates(a);
bc = extractCoordinates(b);
an = ac.length;
bn = bc.length;
// Buffer the ending path if it is smaller than the first
if (an > bn) {
bc = bufferPath(bc, ac);
}
// Or, buffer the starting path if the reverse is true
if (bn > an) {
ac = bufferPath(ac, bc);
}
// Create an interpolater with the buffered paths (if both paths are of the same length,
// the function will end up being the default string interpolator)
interpolator = d3.interpolateString(bn > an ? makePath(ac) : a, an > bn ? makePath(bc) : b);
// If the ending value changed, make sure the final interpolated value is correct
return bn > an ? interpolator : function(t) {
return t === 1 ? b : interpolator(t);
};
}
});
Here's what the original gist looks like with the new interpolator: http://bl.ocks.org/4535474.
Its approach is to 'buffer' the smaller dataset's path by inserting zero-length line segments at the beginning. The effect is that new segments expand out of a single point at the start of the line, and unused segments similarly collapse down to a single point.
Transitioning between datasets of different sizes (apparently) isn't a common problem, and doesn't have a universal solution. Because I was visualizing time-series data and transitioning between daily/weekly/monthly intervals, I needed the segments towards the end of the path to maintain visual continuity. I can imagine a case in which you'd want to do the same for the beginning of the path, or perhaps expand/contract the path by uniformly buffering segments throughout. Either way the same approach will work.

Drag, drop and shape rotation with Raphael JS

I'm using RaphaelJS 2.0 to create several shapes in a div. Each shape needs to be able to be dragged and dropped within the bounds of the div, independently. Upon double clicking a shape, that shape needs to rotate 90 degrees. It may then be dragged and dropped and rotated again.
I've loaded some code onto fiddler: http://jsfiddle.net/QRZMS/. It's basically this:
window.onload = function () {
var angle = 0;
var R = Raphael("paper", "100%", "100%"),
shape1 = R.rect(100, 100, 100, 50).attr({ fill: "red", stroke: "none" }),
shape2 = R.rect(200, 200, 100, 50).attr({ fill: "green", stroke: "none" }),
shape3 = R.rect(300, 300, 100, 50).attr({ fill: "blue", stroke: "none" }),
shape4 = R.rect(400, 400, 100, 50).attr({ fill: "black", stroke: "none" });
var start = function () {
this.ox = this.attr("x");
this.oy = this.attr("y");
},
move = function (dx, dy) {
this.attr({ x: this.ox + dx, y: this.oy + dy });
},
up = function () {
};
R.set(shape1, shape2, shape3, shape4).drag(move, start, up).dblclick(function(){
angle -= 90;
shape1.stop().animate({ transform: "r" + angle }, 1000, "<>");
});
}
The drag and drop is working and also one of the shapes rotates on double click. However, there are two issues/questions:
How can I attach the rotation onto each shape automatically without having to hard-code each item reference into the rotate method? I.e. I just want to draw the shapes once, then have them all automatically exposed to the same behaviour, so they can each be dragged/dropped/rotated independently without having to explicitly apply that behaviour to each shape.
After a shape has been rotated, it no longer drags correctly - as if the drag mouse movement relates to the original orientation of the shape rather than updating when the shape is rotated. How can I get this to work correctly so that shapes can just be dragged and rotated many times, seamlessley?
Many thanks for any pointers!
I've tried several times to wrap my head around the new transform engine, to no avail. So, I've gone back to first principles.
I've finally managed to correctly drag and drop an object thats undergone several transformations, after trying to work out the impact of the different transformations - t,T,...t,...T,r,R etc...
So, here's the crux of the solution
var ox = 0;
var oy = 0;
function drag_start(e)
{
};
function drag_move(dx, dy, posx, posy)
{
r1.attr({fill: "#fa0"});
//
// Here's the interesting part, apply an absolute transform
// with the dx,dy coordinates minus the previous value for dx and dy
//
r1.attr({
transform: "...T" + (dx - ox) + "," + (dy - oy)
});
//
// store the previous versions of dx,dy for use in the next move call.
//
ox = dx;
oy = dy;
}
function drag_up(e)
{
// nothing here
}
That's it. Stupidly simple, and I'm sure it's occurred to loads of people already, but maybe someone might find it useful.
Here's a fiddle for you to play around with.
... and this is a working solution for the initial question.
I solved the drag/rotate issue by re-applying all transformations when a value changes. I created a plugin for it.
https://github.com/ElbertF/Raphael.FreeTransform
Demo here:
http://alias.io/raphael/free_transform/
As amadan suggests, it's usually a good idea to create functions when multiple things have the same (initial) attributes/properties. That is indeed the answer to your first question. As for the second question, that is a little more tricky.
When a Rapheal object is rotated, so is the coordinate plane. For some reason, dmitry and a few other sources on the web seem to agree that it's the correct way to implement it. I, like you, disagree. I've not managed to find an all round good solution but I did mange to create a work around. I'll briefly explain and then show the code.
Create a custom attribute to store the current state of rotation
Depending on that attribute you decide how to handle the move.
Providing that you are only going to be rotating shapes by 90 degrees (if not it becomes a lot more difficult) you can determine how the coordinates should be manipulated.
var R = Raphael("paper", "100%", "100%");
//create the custom attribute which will hold the current rotation of the object {0,1,2,3}
R.customAttributes.rotPos = function (num) {
this.node.rotPos = num;
};
var shape1 = insert_rect(R, 100, 100, 100, 50, { fill: "red", stroke: "none" });
var shape2 = insert_rect(R, 200, 200, 100, 50, { fill: "green", stroke: "none" });
var shape3 = insert_rect(R, 300, 300, 100, 50, { fill: "blue", stroke: "none" });
var shape4 = insert_rect(R, 400, 400, 100, 50, { fill: "black", stroke: "none" });
//Generic insert rectangle function
function insert_rect(paper,x,y, w, h, attr) {
var angle = 0;
var rect = paper.rect(x, y, w, h);
rect.attr(attr);
//on createion of the object set the rotation position to be 0
rect.attr({rotPos: 0});
rect.drag(drag_move(), drag_start, drag_up);
//Each time you dbl click the shape, it gets rotated. So increment its rotated state (looping round 4)
rect.dblclick(function(){
var pos = this.attr("rotPos");
(pos++)%4;
this.attr({rotPos: pos});
angle -= 90;
rect.stop().animate({transform: "r" + angle}, 1000, "<>");
});
return rect;
}
//ELEMENT/SET Dragger functions.
function drag_start(e) {
this.ox = this.attr("x");
this.oy = this.attr("y");
};
//Now here is the complicated bit
function drag_move() {
return function(dx, dy) {
//default position, treat drag and drop as normal
if (this.attr("rotPos") == 0) {
this.attr({x: this.ox + dx, y: this.oy + dy});
}
//The shape has now been rotated -90
else if (this.attr("rotPos") == 1) {
this.attr({x:this.ox-dy, y:this.oy + dx});
}
else if (this.attr("rotPos") == 2) {
this.attr({x: this.ox - dx, y: this.oy - dy});
}
else if (this.attr("rotPos") == 3) {
this.attr({x:this.ox+dy, y:this.oy - dx});
}
}
};
function drag_up(e) {
}
I can't really think of clear concise way to explain how the drag_move works. I think it's probably best that you look at the code and see how it works. Basically, you just need to work out how the x and y variables are now treated from this new rotated state. Without me drawing lots of graphics I'm not sure I could be clear enough. (I did a lot of turning my head sideways to work out what it should be doing).
There are a few drawbacks to this method though:
It only works for 90degree rotations (a huge amount more calculations would be needed to do 45degrees, nevermind any given degree)
There is a slight movement upon drag start after a rotation. This is because the drag takes the old x and y values, which have been rotated. This isn't a massive problem for this size of shape, but bigger shapes you will really start to notice shapes jumping across the canvas.
I'm assuming the reason that you are using transform is that you can animate the rotation. If this isn't necessary then you could use the .rotate() function which always rotates around the center of the element and so would eliminate the 2nd drawback I mentioned.
This isn't a complete solution, but it should definitely get you going along the correct path. I would be interested to see a full working version.
I've also created a version of this on jsfiddle which you can view here: http://jsfiddle.net/QRZMS/3/
Good luck.
I usually create an object for my shape and write the event handling into the object.
function shape(x, y, width, height, a)
{
var that = this;
that.angle = 0;
that.rect = R.rect(x, y, width, height).attr(a);
that.rect.dblclick(function() {
that.angle -= 90;
that.rect.stop().animate({
transform: "r" + that.angle }, 1000, "<>");
});
return that;
}
In the above, the constructor not only creates the rectangle, but sets up the double click event.
One thing to note is that a reference to the object is stored in "that". This is because the "this" reference changes depending on the scope. In the dblClick function I need to refer to the rect and angle values from my object, so I use the stored reference that.rect and that.angle
See this example (updated from a slightly dodgy previous instance)
There may be better ways of doing what you need, but this should work for you.
Hope it help,
Nick
Addendum: Dan, if you're really stuck on this, and can live without some of the things that Raphael2 gives you, I'd recommend moving back to Raphael 1.5.x. Transforms were just added to Raphael2, the rotation/translation/scale code is entirely different (and easier) in 1.5.2.
Look at me, updating my post, hoping for karma...
If you don't want to use a ElbertF library, you can transform Cartesian Coordinates in Polar Coordinates.
After you must add or remove the angle and transform again in Cartesian Coordinate.
We can see this example with a rect rotate in rumble and moved.
HTML
<div id="foo">
</div>
JAVASCRIPT
var paper = Raphael(40, 40, 400, 400);
var c = paper.rect(40, 40, 40, 40).attr({
fill: "#CC9910",
stroke: "none",
cursor: "move"
});
c.transform("t0,0r45t0,0");
var start = function () {
this.ox = this.type == "rect" ? this.attr("x") : this.attr("cx");
this.oy = this.type == "rect" ? this.attr("y") : this.attr("cy");
},
move = function (dx, dy) {
var r = Math.sqrt(Math.pow(dx, 2) + Math.pow(dy, 2));
var ang = Math.atan2(dy,dx);
ang = ang - Math.PI/4;
dx = r * Math.cos(ang);
dy = r * Math.sin(ang);
var att = this.type == "rect" ? { x: this.ox + dx, y: this.oy + dy} : { cx: this.ox + dx, cy: this.oy + dy };
this.attr(att);
},
up = function () {
};
c.drag(move, start, up);?
DEMO
http://jsfiddle.net/Ef83k/74/
my first thought was to use getBBox(false) to capture the x,y coordinates of the object after transform, then removeChild() the original Raphael obj from the canvas, then redraw the object using the coordinate data from getBBox( false ). a hack but i have it working.
one note though: since the object the getBBox( false ) returns is the CORNER coordinates ( x, y) of the object you need to calculate the center of the re-drawn object by doing ...
x = box['x'] + ( box['width'] / 2 );
y = box['y'] + ( box['height'] / 2 );
where
box = shapeObj.getBBox( false );
another way to solve the same problem

Touches on transparent PNGs

I have a PNG in a UIImageView with alpha around the edges (let's say a circle). When I tap it, I want it to register as a tap for the circle if I'm touching the opaque bit, but a tap for the view behind if I touch the transparent bit.
(BTW: On another forum, someone said PNGs automatically do this, and a transparent PNG should pass the click on to the view below, but I've tested it and it doesn't, at least not in my case.)
Is there a flag I just haven't flipped, or do I need to create some kind of formula: "if tapped { get location; calculate distance from centre; if < r { touched circle } else { pass it on } }"?
-k.
I don't believe that PNGs automatically do this, but can't find any references that definitively say one way or the other.
Your radius calculation is probably simpler, but you could also manually check the alpha value of the touched pixel in your image to determine whether to count it as a hit. This code is targetted at OS X 10.5+, but with some minor modifications it should run on iPhone: Getting the pixel data from a CGImage object. Here is some related discussion on retrieving data from a UIImage: Getting data from an UIImage.
I figured it out...the PNG, bounding box transparency issue and being able to click through to another image behind:
var hitTestPoint1:Boolean = false;
var myHitTest1:Boolean = false;
var objects:Array;
clip.addEventListener(MouseEvent.MOUSE_DOWN, doHitTest);
clip.addEventListener(MouseEvent.MOUSE_UP, stopDragging);
clip.buttonMode = true;
clip.mouseEnabled = true;
clip.mouseChildren = true;
clip2.addEventListener(MouseEvent.MOUSE_DOWN, doHitTest);
clip2.addEventListener(MouseEvent.MOUSE_UP, stopDragging);
clip2.buttonMode = true;
clip2.mouseEnabled = true;
clip2.mouseChildren = true;
clip.rotation = 60;
function doHitTest(event:MouseEvent):void
{
objects = stage.getObjectsUnderPoint(new Point(event.stageX, event.stageY));
trace("Which one: " + event.target.name);
trace("What's under point: " + objects);
for(var i:int=0; i
function stopDragging(event:MouseEvent):void
{
event.target.stopDrag();
}
function realHitTest(object:DisplayObject, point:Point):Boolean
{
/* If we're already dealing with a BitmapData object then we just use the hitTest
* method of that BitmapData.
*/
if(object is BitmapData)
{
return (object as BitmapData).hitTest(new Point(0,0), 0, object.globalToLocal(point));
}
else {
/* First we check if the hitTestPoint method returns false. If it does, that
* means that we definitely do not have a hit, so we return false. But if this
* returns true, we still don't know 100% that we have a hit because it might
* be a transparent part of the image.
*/
if(!object.hitTestPoint(point.x, point.y, true))
{
return false;
}
else {
/* So now we make a new BitmapData object and draw the pixels of our object
* in there. Then we use the hitTest method of that BitmapData object to
* really find out of we have a hit or not.
*/
var bmapData:BitmapData = new BitmapData(object.width, object.height, true, 0x00000000);
bmapData.draw(object, new Matrix());
var returnVal:Boolean = bmapData.hitTest(new Point(0,0), 0, object.globalToLocal(point));
bmapData.dispose();
return returnVal;
}
}
}