I have a route that includes vehicle and pedestrian mode together. When HERE is creating this, I want to show the pedestrian parts with dashed lines or different color for users to understand.
I parsed routes sections for sectionTransportMode like that
_routeCalculator.calculatePedestrianRoute(waypoints, (HERE.RoutingError? routingError, List<HERE.Route>? routeList) async {
if (routingError == null) {
HERE.Route _calculatedRoute = routeList!.first;
_calculatedRoute.sections.forEach((element) {
print('TransportMode: ' + element.sectionTransportMode.toString());
});
_showRouteOnMap(_calculatedRoute);
_startNavigationOnRoute(isSimulated, _calculatedRoute);
} else {
final error = routingError.toString();
_showDialog('Error', 'Error while calculating a pedestrian route: $error');
}
});
But how can i do that after this code snippet.
A MapPolyline consists of three elements:
A list of two or more geographic coordinates that define where to place the polyline on the map.
A GeoPolyline that contains this list of coordinates.
Style parameters such as DashPattern or LineCap to define how to visualize the polyline.
https://developer.here.com/documentation/android-sdk-navigate/4.8.3.0/dev_guide/topics/map-items.html#add-map-polylines
When calling the showRouteOnMap, you can create the MapPolyline with dashed lines as shown below in the example.
Here is an example:
private void showRouteOnMap(Route route) {
// Show route as polyline.
GeoPolyline routeGeoPolyline;
try {
routeGeoPolyline = new GeoPolyline(route.getPolyline());
} catch (InstantiationErrorException e) {
// It should never happen that a route polyline contains less than two vertices.
return;
}
float widthInPixels = 20;
//Blue Color
//Color lineColor = Color.valueOf(0, 0f, 0f, 139f);
MapPolyline routeMapPolyline = new MapPolyline(routeGeoPolyline,
widthInPixels,
Color.valueOf(0, 0.56f, 0.54f, 0.63f)); // RGBA
//Setting polyline to DashPattern
routeMapPolyline.setDashPattern(new DashPattern(10));
// routeMapPolyline.setDashFillColor(lineColor);
mapView.getMapScene().addMapPolyline(routeMapPolyline);
mapPolylines.add(routeMapPolyline);
// Draw a circle to indicate starting point and destination.
addCircleMapMarker(startGeoCoordinates, R.drawable.green_dot);
addCircleMapMarker(destinationGeoCoordinates, R.drawable.green_dot);
// Log maneuver instructions per route section.
List<Section> sections = route.getSections();
for (Section section : sections) {
logManeuverInstructions(section);
}
}
Please refer to this example available in git:
https://github.com/heremaps/here-sdk-examples/tree/master/examples/latest/navigate/flutter/routing_hybrid_app
Related
I'm playing with creating a Konva-based GridLayer for Leaflet (basically an abstraction around canvas elements to try and render tens of thousands of features efficiently). I have some code that seems to work to some degree (the lines in my sample data seem to line up with what I would expect), but I am getting strange behavior. Specifically, features will seem to visibly "teleport" or disappear completely. Additionally, it is not uncommon to see breaks in lines at the edges of the tiles. I suspect this means I'm calculating the pixel location within each tile incorrectly (although it's certainly possible something else is wrong). I am basically identifying the pixel location of the tile (x, y in renderStage()), and am translating the map pixel position by that many pixels (pt.x and pt.y, generated by projecting the lat/lon). This is intended to create an array of [x1, y1, x2, y2, ...], which can be rendered to the individual tile. Everything is expected to be in EPSG:4326.
Does anyone know how to properly project lat/lon to pixel coordinates within individual tiles of a GridLayer? There are plenty of examples for doing it for the entire map, but this doesn't seem to translate cleanly into how to find those same pixel locations in tiles offset from the upper left of the map.
import { GridLayer, withLeaflet } from "react-leaflet";
import { GridLayer as LeafletGridLayer } from "leaflet";
import { Stage, Line, FastLayer } from "konva";
import * as Util from 'leaflet/src/core/Util';
import _ from "lodash";
export const CollectionLayer = LeafletGridLayer.extend({
options: {
tileSize: 256
},
initialize: function(collection, props) {
Util.setOptions(this, props)
this.collection = collection;
this.stages = new Map();
this.shapes = {};
this.cached = {};
this.on('tileunload', (e) => {
const stage = this.stages[e.coords]
if (stage) {
this.stages.delete(e.coords)
stage.destroy()
}
})
},
renderStage: function(stage, coords, tileBounds) {
const x = coords.x * this._tileSize.x
const y = coords.y * this._tileSize.y
const z = coords.z;
const layer = stage.getLayers()[0]
if (!layer || !tileBounds) return;
_.each(this.collection.data, (entity, id) => {
if (entity.bounds && tileBounds.intersects(entity.bounds)) {
let shape = this.shapes[id]
if (!shape) {
shape = new Line()
shape.shadowForStrokeEnabled(false)
this.shapes[id] = shape
}
layer.add(shape);
const points = entity.position.reduce((pts, p) => {
const pt = this._map.project([p.value[1], p.value[0]], this._tileZoom)
pts.push(pt.x - x);
pts.push(pt.y - y);
return pts
}, [])
shape.points(points);
shape.stroke('red');
shape.strokeWidth(2);
this.shapes[id] = shape
}
})
layer.batchDraw()
},
createTile: function(coords) {
const tile = document.createElement("div");
const tileSize = this.getTileSize();
const stage = new Stage({
container: tile,
width: tileSize.x,
height: tileSize.y
});
const bounds = this._tileCoordsToBounds(coords);
const layer = new FastLayer();
stage.add(layer);
this.stages[coords] = stage
this.renderStage(stage, coords, bounds);
return tile;
}
});
class ReactCollectionLayer extends GridLayer {
createLeafletElement(props) {
console.log("PROPS", props);
return new CollectionLayer(props.collection.data, this.getOptions(props));
}
updateLeafletElement(fromProps, toProps) {
super.updateLeafletElement(fromProps, toProps);
if (this.leafletElement.collection !== toProps.collection) {
this.leafletElement.collection = toProps.collection
this.leafletElement.redraw();
}
}
}
export default withLeaflet(ReactCollectionLayer);
Everything is expected to be in EPSG:4326.
No.
Once you are dealing with raster data (image tiles), everything is expected to be either in the map's display CRS, which is (by default) EPSG:3857, or in pixels relative to the CRS origin. These concepts are explained a bit more in-depth in one of Leaflet's tutorials.
In fact, you seem to be working in pixels here, at least for your points:
const pt = this._map.project([p.value[1], p.value[0]], this._tileZoom)
However, your calculation of the pixel offset for each tile is too naïve:
const x = coords.x * this._tileSize.x
const y = coords.y * this._tileSize.y
That should instead rely on the private method _getTiledPixelBounds of L.GridLayer, e.g.:
const tilePixelBounds = this._getTiledPixelBounds();
const x = tilePixelBounds.min.x;
const y = tilePixelBounds.min.y;
And use these bounds to add some sanity checks while looping through the points:
const pt = this._map.project([p.value[1], p.value[0]], this._tileZoom);
if (!tilePixelBounds.contains(pt)) { console.error(....); }
On the other hand:
[...] an abstraction around canvas elements to try and render tens of thousands of features efficiently
I don't think using Konva to actually draw items on a <canvas> is going to improve the performance - the methods are just the same used by Leaflet (and, if we're talking about tiling vector data, the same used by Leaflet.VectorGrid ). Ten thousand calls to canvas draw functions are going to take the same time no matter what the library on top. If you have time to consider other alternatives, Leaflet.GLMarkers and its WebGL rendering might offer better performance at the price of less compatibility and higher integration costs.
I would like to be able to zoom into an ILNumerics scene viewed by a camera (as in scene.Camera) with the center point of the zoom determined by where the mouse pointer is located when I start spinning the mouse scroll wheel. The default zoom behavior is for the zoom center to be at the scene.Camera.LookAt point. So I guess this would require the mouse to be tracked in (X,Y) continuously and for that point to be used as the new LookAt point? This seems to be like this post on getting the 3D coordinates from a mouse click, but in my case there's no click to indicate the location of the mouse.
Tips would be greatly appreciated!
BTW, this kind of zoom method is standard operating procedure in CAD software to zoom in and out on an assembly of parts. It's super convenient for the user.
One approach is to overload the MouseWheel event handler. The current coordinates of the mouse are available here, too.
Use the mouse screen coordinates to acquire (to "pick") the world
coordinate corresponding to the primitive under the mouse.
Adjust the Camera.Position and Camera.ZoomFactor to 'move' the camera closer to the point under the mouse and to achieve the required 'directional zoom' effect.
Here is a complete example from the ILNumerics website:
using System;
using System.Windows.Forms;
using ILNumerics;
using ILNumerics.Drawing;
using ILNumerics.Drawing.Plotting;
using static ILNumerics.Globals;
using static ILNumerics.ILMath;
namespace ILNumerics.Examples.DirectionalZoom {
public partial class Form1 : Form {
public Form1() {
InitializeComponent();
}
private void panel2_Load(object sender, EventArgs e) {
Array<float> X = 0, Y = 0, Z = CreateData(X, Y);
var surface = new Surface(Z, X, Y, colormap: Colormaps.Winter);
surface.UseLighting = true;
surface.Wireframe.Visible = false;
panel2.Scene.Camera.Add(surface);
// setup mouse handlers
panel2.Scene.Camera.Projection = Projection.Orthographic;
panel2.Scene.Camera.MouseDoubleClick += Camera_MouseDoubleClick;
panel2.Scene.Camera.MouseWheel += Camera_MouseWheel;
// initial zoom all
ShowAll(panel2.Scene.Camera);
}
private void Camera_MouseWheel(object sender, Drawing.MouseEventArgs e) {
// Update: added comments.
// the next conditionals help to sort out some calls not needed. Helpful for performance.
if (!e.DirectionUp) return;
if (!(e.Target is Triangles)) return;
// make sure to start with the SceneSyncRoot - the copy of the scene which receives
// user interaction and is eventually used for rendering. See: https://ilnumerics.net/scene-management.html
var cam = panel2.SceneSyncRoot.First<Camera>();
if (Equals(cam, null)) return; // TODO: error handling. (Should not happen in regular setup, though.)
// in case the user has configured limited interaction
if (!cam.AllowZoom) return;
if (!cam.AllowPan) return; // this kind of directional zoom "comprises" a pan operation, to some extent.
// find mouse coordinates. Works only if mouse is over a Triangles shape (surfaces, but not wireframes):
using (var pick = panel2.PickPrimitiveAt(e.Target as Drawable, e.Location)) {
if (pick.NextVertex.IsEmpty) return;
// acquire the target vertex coordinates (world coordinates) of the mouse
Array<float> vert = pick.VerticesWorld[pick.NextVertex[0], r(0, 2), 0];
// and transform them into a Vector3 for easier computations
var vertVec = new Vector3(vert.GetValue(0), vert.GetValue(1), vert.GetValue(2));
// perform zoom: we move the camera closer to the target
float scale = Math.Sign(e.Delta) * (e.ShiftPressed ? 0.01f : 0.2f); // adjust for faster / slower zoom
var offs = (cam.Position - vertVec) * scale; // direction on the line cam.Position -> target vertex
cam.Position += offs; // move the camera on that line
cam.LookAt += offs; // keep the camera orientation
cam.ZoomFactor *= (1 + scale);
// TODO: consider adding: the lookat point now moved away from the center / the surface due to our zoom.
// In order for better rotations it makes sense to place the lookat point back to the surface,
// by adjusting cam.LookAt appropriately. Otherwise, one could use cam.RotationCenter.
e.Cancel = true; // don't execute common mouse wheel handlers
e.Refresh = true; // immediate redraw at the end of event handling
}
}
private void Camera_MouseDoubleClick(object sender, Drawing.MouseEventArgs e) {
var cam = panel2.Scene.Camera;
ShowAll(cam);
e.Cancel = true;
e.Refresh = true;
}
// Some sample data. Replace this with your own data!
private static RetArray<float> CreateData(OutArray<float> Xout, OutArray<float> Yout) {
using (Scope.Enter()) {
Array<float> x_ = linspace<float>(0, 20, 100);
Array<float> y_ = linspace<float>(0, 18, 80);
Array<float> Y = 1, X = meshgrid(x_, y_, Y);
Array<float> Z = abs(sin(sin(X) + cos(Y))) + .01f * abs(sin(X * Y));
if (!isnull(Xout)) {
Xout.a = X;
}
if (!isnull(Yout)) {
Yout.a = Y;
}
return -Z;
}
}
// See: https://ilnumerics.net/examples.php?exid=7b0b4173d8f0125186aaa19ee8e09d2d
public static double ShowAll(Camera cam) {
// Update: adjusts the camera Position too.
// this example works only with orthographic projection. You will need to take the view frustum
// into account, if you want to make this method work with perspective projection also. however,
// the general functioning would be similar....
if (cam.Projection != Projection.Orthographic) {
throw new NotImplementedException();
}
// get the overall extend of the cameras scene content
var limits = cam.GetLimits();
// take the maximum of width/ height
var maxExt = limits.HeightF > limits.WidthF ? limits.HeightF : limits.WidthF;
// make sure the camera looks at the unrotated bounding box
cam.Reset();
// center the camera view
cam.LookAt = limits.CenterF;
cam.Position = cam.LookAt + Vector3.UnitZ * 10;
// apply the zoom factor: the zoom factor will scale the 'left', 'top', 'bottom', 'right' limits
// of the view. In order to fit exactly, we must take the "radius"
cam.ZoomFactor = maxExt * .50;
return cam.ZoomFactor;
}
}
}
Note, that the new handler performs the directional zoom only when the mouse is located over an object hold by this Camera! If, instead, the mouse is placed on the background of the scene or over some other Camera / plot cube object no effect will be visible and the common zoom feature is performed (zooming in/out to the look-at point).
I have an app with Bing Maps and I need to draw the route of the user dinamically.
Now I can only trace the user position with a pushpin. Is there some method to draw route?
UPDATE 1:
I'm using this handler assigned to geolocator.positionChanged:
private void geolocator_DrawRoute(Geolocator sender, PositionChangedEventArgs args)
{
// Need to get back onto UI thread before updating location information
this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, new DispatchedHandler(
() =>
{
//Get the current location
Location location = new Location(args.Position.Coordinate.Point.Position.Latitude,
args.Position.Coordinate.Point.Position.Longitude);
_rotta.Add(location);
if (_rotta.Count > 1)
{
var polyline = new MapPolyline { Locations = _rotta, Color = Colors.Blue, Width = 3 };
_shapeLayer.Shapes.Add(polyline);
}
//Update the position of the GPS pushpin
MapLayer.SetPosition(GpsPushpin, location);
//Update the map view to the current GPS location
MyMap.SetView(location, 18);
}));
}
UPDATE 2:
private void geolocator_DrawRoute(Geolocator sender, PositionChangedEventArgs args)
{
// Need to get back onto UI thread before updating location information
Dispatcher.RunAsync(CoreDispatcherPriority.Normal, new DispatchedHandler(
() =>
{
//Get the current location
var location = new Location(args.Position.Coordinate.Point.Position.Latitude, args.Position.Coordinate.Point.Position.Longitude);
_rottaLoc.Add(location);
if (_rotta == null)
{
_rotta = new MapPolyline { Locations = _rottaLoc, Color = Colors.Blue, Width = 4 };
}
else
{
_rotta.Locations = _rottaLoc;
}
_shapeLayer.Shapes.Add(_rotta);
//Update the position of the GPS pushpin
MapLayer.SetPosition(GpsPushpin, location);
//Update the map view to the current GPS location
MyMap.SetView(location, 18);
}));
}
To do this the first step would be to get the users position. If you are using JavaScript you can use the Geolocation API to get the users location and to monitor them as they move. Here is some documentation on how to do this: http://www.html5rocks.com/en/tutorials/geolocation/trip_meter/
Once you have this done you can show the users location fairly easily. If you want to draw a polyline to should their path then what you can do is the first time users position is grabbed you can create a polyline object where the first and second coordinate are this first coordinate. You can then add the polyline to the map. The next time the users location is grabbed you can simply add this new location to the polyline's array of locations.
I've been playing with the google earth API. I thought it would be neat to draw some lines between places from a relative 3D viewpoint. I've searched through the GE documentation and searched on google for answers but didn't find anything that led me down the correct path, so I thought I'd post some code and perhaps get some insight.
The following code plots two places and then draws a line between those places. Unfortunately the line that gets drawn splices the earth. Is there a method to make it wrap to the contour of the earth when drawn in 3D like this? I've attempted to vary the line height placement with a varying degree of success, but at the cost of accuracy and overall visual appeal when the line doesn't appear to connect the places.
function init() {
google.earth.createInstance('map3d', initCB, failureCB);
}
function initCB(instance) {
ge = instance;
ge.getWindow().setVisibility(true);
//---------------------------------PLACES
// Create the placemark.
var placemark = ge.createPlacemark('');
placemark.setName("Location 1");
// Set the placemark's location.
var point = ge.createPoint('');
point.setLatitude(39.96028);
point.setLongitude(-82.979736);
placemark.setGeometry(point);
// Add the placemark to Earth.
ge.getFeatures().appendChild(placemark);
// Create the placemark.
var placemark2 = ge.createPlacemark('');
placemark2.setName("Hop #2");
// Set the placemark's location.
var point2 = ge.createPoint('');
point2.setLatitude(25.7615);
point2.setLongitude(-80.2939);
placemark2.setGeometry(point2);
// Add the placemark to Earth.
ge.getFeatures().appendChild(placemark2);
//---------------------------------FOCUS
var lookAt = ge.createLookAt('');
lookAt.setLatitude(39.96028);
lookAt.setLongitude(-82.979736);
lookAt.setRange(1000000.0);
lookAt.setAltitude(0);
lookAt.setTilt(45);
ge.getView().setAbstractView(lookAt);
//---------------------------------LINES
// Create the placemark
var lineStringPlacemark = ge.createPlacemark('');
// Create the LineString
var lineString = ge.createLineString('');
lineStringPlacemark.setGeometry(lineString);
// Add LineString points
lineString.getCoordinates().pushLatLngAlt(39.96028, -82.979736, 0);
lineString.getCoordinates().pushLatLngAlt(25.7615, -80.2939, 0);
//lineString.setAltitudeMode(ge.ALTITUDE_CLAMP_TO_GROUND);
//lineString.setAltitudeMode(ge.ALTITUDE_RELATIVE_TO_GROUND);
lineString.setAltitudeMode(ge.absolute);
// Create a style and set width and color of line
lineStringPlacemark.setStyleSelector(ge.createStyle(''));
var lineStyle = lineStringPlacemark.getStyleSelector().getLineStyle();
lineStyle.setWidth(2);
lineStyle.getColor().set('9900ffff'); // aabbggrr format
// Add the feature to Earth
ge.getFeatures().appendChild(lineStringPlacemark);
}
function failureCB(errorCode) {
}
google.setOnLoadCallback(init);
You will want to set tesselation, and optionally extrude, on your linestring to true.
See https://developers.google.com/kml/documentation/kmlreference#tessellate and https://developers.google.com/kml/documentation/kmlreference#extrude for details
For the API, your syntax would be something like
lineStringPlacemark.setTessellate(true);
lineStringPlacemark.setExtrude(true);
There's some additional API examples on this at https://developers.google.com/earth/documentation/geometries
I just starting coding with Google Earth using the GEPlugin control for .Net and still got a lot to learn.
What has got me puzzled is when I try to drag a polygon.
The method below is called whenever the mousemove event fires and should be moving each point of the polygon while retaining the orginal shape of the polygon. The lat / long for each point is changed but the polygon does not move position on the map.
Will moving a point in a polygon cause it to redraw, do I need to call a method to force a redraw or perhaps do something else entirely?
Thanks!
private void DoMouseMove(IKmlMouseEvent mouseEvent)
{
if (isDragging)
{
mouseEvent.preventDefault();
var placemark = mouseEvent.getTarget() as IKmlPlacemark;
if (placemark == null)
{
return;
}
IKmlPolygon polygon = placemark.getGeometry() as IKmlPolygon;
if (polygon != null)
{
float latOffset = startLatLong.Latitude - mouseEvent.getLatitude();
float longOffset = startLatLong.Longitude - mouseEvent.getLongitude();
KmlLinearRingCoClass outer = polygon.getOuterBoundary();
KmlCoordArrayCoClass coordsArray = outer.getCoordinates();
for(int i = 0; i < coordsArray.getLength(); i++)
{
KmlCoordCoClass currentPoint = coordsArray.get(i);
currentPoint.setLatLngAlt(currentPoint.getLatitude() + latOffset,
currentPoint.getLongitude() + longOffset, 0);
}
}
}
}
Consider voting for these issues to be resolved
http://code.google.com/p/earth-api-utility-library/issues/detail?id=33
http://code.google.com/p/earth-api-samples/issues/detail?id=167
You may find some hints at the following link:
http://earth-api-utility-library.googlecode.com/svn/trunk/extensions/examples/ruler.html
UPDATE:
I've released the extension library: https://bitbucket.org/mutopia/earth
See https://bitbucket.org/mutopia/earth/src/master/sample/index.html to run it.
See the drag() method in the sample code class, which calls setDragMode() and addDragEvent() to enable dragging of the KmlPolygon.
I successfully implemented this using takeOverCamera in the earth-api-utility-library and three events:
setDragMode: function (mode) {
// summary:
// Sets dragging mode on and off
if (mode == this.dragMode) {
Log.info('Drag mode is already', mode);
} else {
this.dragMode = mode;
Log.info('Drag mode set', mode);
if (mode) {
this.addEvent(this.ge.getGlobe(), 'mousemove', this.dragMouseMoveCallback);
this.addEvent(this.ge.getGlobe(), 'mouseup', this.dragMouseUpCallback);
this.addEvent(this.ge.getView(), 'viewchange', this.dragViewChange, false);
} else {
this.removeEvent(this.ge.getGlobe(), 'mousemove', this.dragMouseMoveCallback);
this.removeEvent(this.ge.getGlobe(), 'mouseup', this.dragMouseUpCallback);
this.removeEvent(this.ge.getView(), 'viewchange', this.dragViewChange, false);
}
}
},
This is in a utility library within a much larger project. dragMode is a boolean which adds and removes events. These three events control what happens when you drag. addEvent and removeEvent are my own wrapper functions:
addEvent: function (targetObject, eventID, listenerCallback, capture) {
// summary:
// Convenience method for google.earth.addEventListener
capture = setDefault(capture, true);
google.earth.addEventListener(targetObject, eventID, listenerCallback, capture);
},
removeEvent: function (targetObject, eventID, listenerCallback, capture) {
// summary:
// Convenience method for google.earth.removeEventListener
capture = setDefault(capture, true);
google.earth.removeEventListener(targetObject, eventID, listenerCallback, capture);
},
Ignoring the minor details, all the important stuff is in the callbacks to those events. The mousedown event locks the camera and sets the polygon I'm dragging as the dragObject (it's just a variable I'm using). It saves the original lat long coordinates.
this.dragMouseDownCallback = lang.hitch(this, function (event) {
var obj = event.getTarget();
this.lockCamera(true);
this.setSelected(obj);
this.dragObject = obj;
this.dragLatOrigin = this.dragLatLast = event.getLatitude();
this.dragLngOrigin = this.dragLngLast = event.getLongitude();
}
The mousemove callback updates to the latest lat long coordinates:
this.dragMouseMoveCallback = lang.hitch(this, function (event) {
if (this.dragObject) {
var lat = event.getLatitude();
var lng = event.getLongitude();
var latDiff = lat - this.dragLatLast;
var lngDiff = lng - this.dragLngLast;
if (Math.abs(latDiff) > this.dragSensitivity || Math.abs(lngDiff > this.dragSensitivity)) {
this.addPolyCoords(this.dragObject, [latDiff, lngDiff]);
this.dragLatLast = lat;
this.dragLngLast = lng;
}
}
});
Here I'm using some fancy sensitivity values to prevent updating this too often. Finally, addPolyCoords is also my own function which adds lat long values to the existing coordinates of the polygon - effectively moving it across the globe. I do this with the built in setLatitude() and setLongitude() functions for each coordinate. You can get the coordinates like so, where polygon is a KmlPolyon object:
polygon.getGeometry().getOuterBoundary().getCoordinates()
And of course, the mousedown callback turns off the drag mode so that moving the mouse doesn't continue to drag the polygon:
this.dragMouseUpCallback = lang.hitch(this, function (event) {
if (this.dragObject) {
Log.info('Stop drag', this.dragObject.getType());
setTimeout(lang.hitch(this, function () {
this.lockCamera(false);
this.setSelected(null);
}), 100);
this._dragEvent(event);
this.dragObject = this.dragLatOrigin = this.dragLngOrigin = this.dragLatLast = this.dragLngLast = null;
}
});
And finally, _dragEvent is called to ensure that the final coordinates are the actual coordinates the mouse event finished with (and not the latest mousemove call):
_dragEvent: function (event) {
// summary:
// Helper function for moving drag object
var latDiff = event.getLatitude() - this.dragLatLast;
var lngDiff = event.getLongitude() - this.dragLngLast;
if (!(latDiff == 0 && lngDiff == 0)) {
this.addPolyCoords(this.dragObject, [latDiff, lngDiff]);
Log.info('Moved ' + latDiff + ', ' + lngDiff);
}
},
The mousemove callback isn't too important and can actually be ignored - the only reason I use it is to show the polygon moving as the user moves their mouse. Removing it will result in the object being moved when they lift their mouse up.
Hopefully this incredibly long answer gives you some insights into how to implement dragging in the Google Earth API. And I also plan to release my library in the future when I've ironed out the kinks :)