I have 15 box and I add an image on each boxnode as first material. How can I load the images from Firebase and add an Image to each node. I already know how to load images from Firebase, I want to know the best way I can add an image on each 15 nodes, Thanks.
Here's the view controller:
class newsVC: UIViewController, presentVCProtocol {
#IBOutlet var scnView: SCNView!
var newsScene = NewsScene(create: true)
var screenSize: CGRect!
var screenWidth: CGFloat!
var screenHeight: CGFloat!
var posts = [ArtModel]()
var post: ArtModel!
var arts = [ArtModel]()
static var imageCache: NSCache<NSString, UIImage> = NSCache()
var type: String!
override func viewDidLoad() {
super.viewDidLoad()
let scnView = self.scnView!
let scene = newsScene
scnView.scene = scene
scnView.autoenablesDefaultLighting = true
scnView.backgroundColor = UIColor.white
DataService.ds.REF_USERS.child((FIRAuth.auth()?.currentUser?.uid)!).child("arts").observe(.value) { (snapshot: FIRDataSnapshot) in
self.posts = []
if let snapshot = snapshot.children.allObjects as? [FIRDataSnapshot] {
for snap in snapshot {
if let postDict = snap.value as? Dictionary<String, AnyObject> {
let key = snap.key
let post = ArtModel(key: key, artData: postDict)
self.posts.insert(post, at: 0)
}
}
}
}
}
func configureView(_ post: ArtModel, img: UIImage? = nil, imageView: FXImageView? = nil) {
self.post = post
if img != nil {
self.newsScene.setup(image: img!)
print("IMAGE:\(img)")
} else {
let ref = FIRStorage.storage().reference(forURL: post.imgUrl)
ref.data(withMaxSize: 2 * 1024 * 1024, completion: { (data, error) in
if error != nil {
print("JESS: Unable to download image from Firebase storage")
print("Unable to download image: \(error?.localizedDescription)")
} else {
print("JESS: Image downloaded from Firebase storage")
if let imgData = data {
if let img = UIImage(data: imgData) {
self.newsScene.setup(image: img)
print("IMAGE:\(img)")
ProfileVC.imageCache.setObject(img, forKey: post.imgUrl as NSString)
}
}
}
})
}
}
}
Here's the NewsScene:
import SceneKit
import CoreMotion
import FirebaseAuth
import FirebaseStorage
import FirebaseDatabase
class NewsScene: SCNScene {
var geometry = SCNBox()
var boxnode = SCNNode()
var art: ArtModel!
var artImage = UIImage()
var index = IndexPath()
var geo = SCNBox()
var cameranode = SCNNode()
convenience init(create: Bool) {
self.init()
setup(image: artImage)
}
func setup(image: UIImage) {
self.artImage = image
typealias BoxDims = (width: CGFloat, height: CGFloat,
length: CGFloat, chamferRadius: CGFloat)
let box1Dim = BoxDims(CGFloat(0.8), CGFloat(0.8), CGFloat(0.10), CGFloat(0.01))
let box2Dim = BoxDims(CGFloat(0.7), CGFloat(0.7), CGFloat(0.10), CGFloat(0.01))
let box3Dim = BoxDims(CGFloat(0.8), CGFloat(0.6), CGFloat(0.10), CGFloat(0.01))
let box4Dim = BoxDims(CGFloat(0.8), CGFloat(0.9), CGFloat(0.10), CGFloat(0.01))
let box5Dim = BoxDims(CGFloat(0.9), CGFloat(1.0), CGFloat(0.10), CGFloat(0.01))
let box6Dim = BoxDims(CGFloat(0.4), CGFloat(0.5), CGFloat(0.10), CGFloat(0.01))
let box7Dim = BoxDims(CGFloat(0.9), CGFloat(0.7), CGFloat(0.10), CGFloat(0.01))
let box8Dim = BoxDims(CGFloat(0.7), CGFloat(0.8), CGFloat(0.10), CGFloat(0.01))
let box9Dim = BoxDims(CGFloat(0.9), CGFloat(0.9), CGFloat(0.10), CGFloat(0.01))
let box10Dim = BoxDims(CGFloat(0.6), CGFloat(0.6), CGFloat(0.10), CGFloat(0.01))
let box11Dim = BoxDims(CGFloat(0.7), CGFloat(0.7), CGFloat(0.10), CGFloat(0.01))
let box12Dim = BoxDims(CGFloat(0.8), CGFloat(0.8), CGFloat(0.10), CGFloat(0.01))
let box13Dim = BoxDims(CGFloat(0.6), CGFloat(0.8), CGFloat(0.10), CGFloat(0.01))
let box14Dim = BoxDims(CGFloat(0.6), CGFloat(0.6), CGFloat(0.10), CGFloat(0.01))
let box15Dim = BoxDims(CGFloat(0.9), CGFloat(0.9), CGFloat(0.10), CGFloat(0.01))
let allBoxDims = [box1Dim, box2Dim, box3Dim, box4Dim, box5Dim, box6Dim, box7Dim, box8Dim,box9Dim,box10Dim,box11Dim,box12Dim,box13Dim,box14Dim,box15Dim ]
let offset: Int = 50
var boxCounter: Int = 0
for xIndex: Int in 0...2 {
for yIndex: Int in 0...4 {
// create a geometry
let boxDim = allBoxDims[boxCounter]
geo = SCNBox(width: boxDim.width, height: boxDim.height, length: boxDim.length, chamferRadius: boxDim.chamferRadius)
let material = SCNMaterial()
material.diffuse.contents = image
geo.firstMaterial = material
boxCounter = boxCounter + 1
boxnode = SCNNode(geometry: geo)
boxnode.position.x = Float(xIndex - offset)
boxnode.position.y = Float(yIndex - offset)
self.rootNode.addChildNode(boxnode)
}
}
}
func deviceDidMove(motion: CMDeviceMotion?, error: NSError?) {
if let motion = motion {
boxnode.orientation = motion.gaze(atOrientation: UIApplication.shared.statusBarOrientation)
if error != nil {
print("DEVICEDIDMOVE: \(error?.localizedDescription)")
}
}
}
}
Related
How can I export the ARMeshGeometry generated by the new SceneReconstruction API on the latest iPad Pro to an .obj file?
Here's SceneReconstruction documentation.
Starting with Apple's Visualising Scene Scemantics sample app, you can retrieve the ARMeshGeometry object from the first anchor in the frame.
The easiest approach to exporting the data is to first convert it to an MDLMesh:
extension ARMeshGeometry {
func toMDLMesh(device: MTLDevice) -> MDLMesh {
let allocator = MTKMeshBufferAllocator(device: device);
let data = Data.init(bytes: vertices.buffer.contents(), count: vertices.stride * vertices.count);
let vertexBuffer = allocator.newBuffer(with: data, type: .vertex);
let indexData = Data.init(bytes: faces.buffer.contents(), count: faces.bytesPerIndex * faces.count * faces.indexCountPerPrimitive);
let indexBuffer = allocator.newBuffer(with: indexData, type: .index);
let submesh = MDLSubmesh(indexBuffer: indexBuffer,
indexCount: faces.count * faces.indexCountPerPrimitive,
indexType: .uInt32,
geometryType: .triangles,
material: nil);
let vertexDescriptor = MDLVertexDescriptor();
vertexDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: .float3,
offset: 0,
bufferIndex: 0);
vertexDescriptor.layouts[0] = MDLVertexBufferLayout(stride: vertices.stride);
return MDLMesh(vertexBuffer: vertexBuffer,
vertexCount: vertices.count,
descriptor: vertexDescriptor,
submeshes: [submesh]);
}
}
Once you have the MDLMesh, exporting to an OBJ file is a breeze:
#IBAction func exportMesh(_ button: UIButton) {
let meshAnchors = arView.session.currentFrame?.anchors.compactMap({ $0 as? ARMeshAnchor });
DispatchQueue.global().async {
let directory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0];
let filename = directory.appendingPathComponent("MyFirstMesh.obj");
guard let device = MTLCreateSystemDefaultDevice() else {
print("metal device could not be created");
return;
};
let asset = MDLAsset();
for anchor in meshAnchors! {
let mdlMesh = anchor.geometry.toMDLMesh(device: device);
asset.add(mdlMesh);
}
do {
try asset.export(to: filename);
} catch {
print("failed to write to file");
}
}
}
The answer of the #swiftcoder works great. But in the case of several anchors you need to convert the vertices coordinates to the world coordinate system based on the anchor transform. In the opposite case all meshes will be placed at zero position and you will have a mess.
The updated code looks like this:
extension ARMeshGeometry {
func toMDLMesh(device: MTLDevice, transform: simd_float4x4) -> MDLMesh {
let allocator = MTKMeshBufferAllocator(device: device)
let data = Data.init(bytes: transformedVertexBuffer(transform), count: vertices.stride * vertices.count)
let vertexBuffer = allocator.newBuffer(with: data, type: .vertex)
let indexData = Data.init(bytes: faces.buffer.contents(), count: faces.bytesPerIndex * faces.count * faces.indexCountPerPrimitive)
let indexBuffer = allocator.newBuffer(with: indexData, type: .index)
let submesh = MDLSubmesh(indexBuffer: indexBuffer,
indexCount: faces.count * faces.indexCountPerPrimitive,
indexType: .uInt32,
geometryType: .triangles,
material: nil)
let vertexDescriptor = MDLVertexDescriptor()
vertexDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: .float3,
offset: 0,
bufferIndex: 0)
vertexDescriptor.layouts[0] = MDLVertexBufferLayout(stride: vertices.stride)
return MDLMesh(vertexBuffer: vertexBuffer,
vertexCount: vertices.count,
descriptor: vertexDescriptor,
submeshes: [submesh])
}
func transformedVertexBuffer(_ transform: simd_float4x4) -> [Float] {
var result = [Float]()
for index in 0..<vertices.count {
let vertexPointer = vertices.buffer.contents().advanced(by: vertices.offset + vertices.stride * index)
let vertex = vertexPointer.assumingMemoryBound(to: (Float, Float, Float).self).pointee
var vertextTransform = matrix_identity_float4x4
vertextTransform.columns.3 = SIMD4<Float>(vertex.0, vertex.1, vertex.2, 1)
let position = (transform * vertextTransform).position
result.append(position.x)
result.append(position.y)
result.append(position.z)
}
return result
}
}
extension simd_float4x4 {
var position: SIMD3<Float> {
return SIMD3<Float>(columns.3.x, columns.3.y, columns.3.z)
}
}
extension Array where Element == ARMeshAnchor {
func save(to fileURL: URL, device: MTLDevice) throws {
let asset = MDLAsset()
self.forEach {
let mesh = $0.geometry.toMDLMesh(device: device, transform: $0.transform)
asset.add(mesh)
}
try asset.export(to: fileURL)
}
}
I am not an expert in ModelIO and maybe there is more simple way to transform vertex buffer :) But this code works for me.
Exporting LiDAR-reconstructed geometry
This code allows you save LiDAR's geometry as USD and send it to Mac computer via AirDrop. You can export not only .usd but also .usda, .usdc, .obj, .stl, .abc, and .ply file formats.
Additionally you can use SceneKit's write(to:options:delegate:progressHandler:) method to save a .usdz version of file.
import RealityKit
import ARKit
import MetalKit
import ModelIO
#IBOutlet var arView: ARView!
var saveButton: UIButton!
let rect = CGRect(x: 50, y: 50, width: 100, height: 50)
override func viewDidLoad() {
super.viewDidLoad()
let tui = UIControl.Event.touchUpInside
saveButton = UIButton(frame: rect)
saveButton.setTitle("Save", for: [])
saveButton.addTarget(self, action: #selector(saveButtonTapped), for: tui)
self.view.addSubview(saveButton)
}
#objc func saveButtonTapped(sender: UIButton) {
print("Saving is executing...")
guard let frame = arView.session.currentFrame
else { fatalError("Can't get ARFrame") }
guard let device = MTLCreateSystemDefaultDevice()
else { fatalError("Can't create MTLDevice") }
let allocator = MTKMeshBufferAllocator(device: device)
let asset = MDLAsset(bufferAllocator: allocator)
let meshAnchors = frame.anchors.compactMap { $0 as? ARMeshAnchor }
for ma in meshAnchors {
let geometry = ma.geometry
let vertices = geometry.vertices
let faces = geometry.faces
let vertexPointer = vertices.buffer.contents()
let facePointer = faces.buffer.contents()
for vtxIndex in 0 ..< vertices.count {
let vertex = geometry.vertex(at: UInt32(vtxIndex))
var vertexLocalTransform = matrix_identity_float4x4
vertexLocalTransform.columns.3 = SIMD4<Float>(x: vertex.0,
y: vertex.1,
z: vertex.2,
w: 1.0)
let vertexWorldTransform = (ma.transform * vertexLocalTransform).position
let vertexOffset = vertices.offset + vertices.stride * vtxIndex
let componentStride = vertices.stride / 3
vertexPointer.storeBytes(of: vertexWorldTransform.x,
toByteOffset: vertexOffset,
as: Float.self)
vertexPointer.storeBytes(of: vertexWorldTransform.y,
toByteOffset: vertexOffset + componentStride,
as: Float.self)
vertexPointer.storeBytes(of: vertexWorldTransform.z,
toByteOffset: vertexOffset + (2 * componentStride),
as: Float.self)
}
let byteCountVertices = vertices.count * vertices.stride
let byteCountFaces = faces.count * faces.indexCountPerPrimitive * faces.bytesPerIndex
let vertexBuffer = allocator.newBuffer(with: Data(bytesNoCopy: vertexPointer,
count: byteCountVertices,
deallocator: .none), type: .vertex)
let indexBuffer = allocator.newBuffer(with: Data(bytesNoCopy: facePointer,
count: byteCountFaces,
deallocator: .none), type: .index)
let indexCount = faces.count * faces.indexCountPerPrimitive
let material = MDLMaterial(name: "material",
scatteringFunction: MDLPhysicallyPlausibleScatteringFunction())
let submesh = MDLSubmesh(indexBuffer: indexBuffer,
indexCount: indexCount,
indexType: .uInt32,
geometryType: .triangles,
material: material)
let vertexFormat = MTKModelIOVertexFormatFromMetal(vertices.format)
let vertexDescriptor = MDLVertexDescriptor()
vertexDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: vertexFormat,
offset: 0,
bufferIndex: 0)
vertexDescriptor.layouts[0] = MDLVertexBufferLayout(stride: ma.geometry.vertices.stride)
let mesh = MDLMesh(vertexBuffer: vertexBuffer,
vertexCount: ma.geometry.vertices.count,
descriptor: vertexDescriptor,
submeshes: [submesh])
asset.add(mesh)
}
let filePath = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask).first!
let usd: URL = filePath.appendingPathComponent("model.usd")
if MDLAsset.canExportFileExtension("usd") {
do {
try asset.export(to: usd)
let controller = UIActivityViewController(activityItems: [usd],
applicationActivities: nil)
controller.popoverPresentationController?.sourceView = sender
self.present(controller, animated: true, completion: nil)
} catch let error {
fatalError(error.localizedDescription)
}
} else {
fatalError("Can't export USD")
}
}
Tap Save button, and in Activity View Controller choose More and send ready-to-use model to Mac's Downloads folder via AirDrop.
P.S.
Here you can find an extra info on capturing real-world texture.
I'm using Lidar and beginner in scan 3d, how to export to files with swift code:
GLTF (Share in AR for Android devices)
GLB
STL (Un-textured file used in 3d printing)
Point Cloud (PCD PLY PTS XYZ LAS e57)
All Data (Includes captured images)
DAE (Compatible with Sketchfab)
FBX
thanks you
I am drawing a route with Google map. I calculate the km distance on the route I drew. I also want to calculate how long it will go. How can I calculate how many minutes the route will take? I calculated the speed in the code below and when I tried to calculate the time using the speed I could not get any output. How can I calculate the duration?
func drowRoute(from source: CLLocationCoordinate2D, to destination: CLLocationCoordinate2D) {
self.mapView.clear()
let origin = "\(source.latitude),\(source.longitude)"
let destinationn = "\(destination.latitude),\(destination.longitude)"
guard let url = URL(string: "https://maps.googleapis.com/maps/api/directions/json?origin=\(origin)&destination=\(destinationn)&mode=driving&key=..") else {
let error = NSError(domain: "LocalDomain", code: 0, userInfo: [NSLocalizedDescriptionKey: "Failed to create object URL"])
print("Error: \(error)")
return
}
let config = URLSessionConfiguration.default
let session = URLSession(configuration: config)
let task = session.dataTask(with: url, completionHandler: {
(data, response, error) in
if error != nil {
print(error!.localizedDescription)
}
else {
do {
if let json : [String:Any] = try JSONSerialization.jsonObject(with: data!, options: .allowFragments) as? [String: Any]{
guard let routes = json["routes"] as? NSArray else {
DispatchQueue.main.async {
}
return
}
if (routes.count > 0) {
let overview_polyline = routes[0] as? NSDictionary
let dictPolyline = overview_polyline?["overview_polyline"] as? NSDictionary
let points = dictPolyline?.object(forKey: "points") as? String
DispatchQueue.main.async {
//
let legs = overview_polyline?["legs"] as! Array<Dictionary<String, AnyObject>>
let distance = legs[0]["distance"] as? NSDictionary
let distanceValue = distance?["value"] as? Int ?? 0
let distanceDouleValue = distance?["value"] as? Double ?? 0.0
let duration = legs[0]["duration"] as? NSDictionary
let totalDurationInSeconds = duration?["value"] as? Int ?? 0
let durationDouleValue = duration?["value"] as? Double ?? 0.0
if(distanceValue != 0) {
self.speed = distanceDouleValue / durationDouleValue
print("speed", self.speed)
}
let miles = Double(distanceValue) / 1609.344
print("\(miles)")
let km = Double(distanceValue) * 0.001609
self.kmLabel.text = ("\(Int(km))" + " " + "KM")
if distanceValue > Int(32186.9){
}else{
self.showPath(polyStr: points!)
let startLocationDictionary = legs[0]["start_location"] as! Dictionary<String, AnyObject>
let originCoordinate = CLLocationCoordinate2DMake(startLocationDictionary["lat"] as! Double, startLocationDictionary["lng"] as! Double)
let endLocationDictionary = legs[legs.count - 1]["end_location"] as! Dictionary<String, AnyObject>
let destinationCoordinate = CLLocationCoordinate2DMake(endLocationDictionary["lat"] as! Double, endLocationDictionary["lng"] as! Double)
let marker1 = GMSMarker()
marker1.position = CLLocationCoordinate2D(latitude:destinationCoordinate.latitude, longitude: destinationCoordinate.longitude)
marker1.icon = UIImage(named: "placeholder")
marker1.map = self.mapView
let marker2 = GMSMarker()
marker2.position = CLLocationCoordinate2D(latitude:originCoordinate.latitude, longitude: originCoordinate.longitude)
marker2.icon = UIImage(named: "location")
marker2.map = self.mapView
}
}
}
else {
print(json)
DispatchQueue.main.async {
// SVProgressHUD.dismiss()
}
}
}
}
catch {
print("error in JSONSerialization")
DispatchQueue.main.async {
// SVProgressHUD.dismiss()
}
}
}
})
task.resume()
}
func showPath(polyStr :String){
// SVProgressHUD.dismiss()
let path = GMSPath(fromEncodedPath: polyStr)
let polyline = GMSPolyline(path: path)
polyline.strokeWidth = 5.0
polyline.strokeColor = UIColor.red
polyline.map = mapView
DispatchQueue.main.async {
let bounds = GMSCoordinateBounds(path: path!)
let update = GMSCameraUpdate.fit(bounds, with: UIEdgeInsets(top: 170, left: 30, bottom: 30, right: 30))
self.mapView.moveCamera(update)
}
}
I am developing an application to record RGB-D sequences with the iPhone by using the DualRearCamera or the TrueDepthCamera. I can capture and visualize the RGB frame and depth frames and I developed a version where I can compress this data and save in the internal files of the iPhone. Nevertheless, my idea is to save both sequences (RGB and depth map sequences) in the gallery, but I am having problems to use AVAssetWritter and create a depth map video.
I am using the iPhone X, Xcode 10.2.1 and swift 5
import UIKit
import AVFoundation
import AssetsLibrary
var noMoreSpace = false
class ViewController: UIViewController{
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var timeLabel: UILabel!
#IBOutlet weak var previewModeControl: UISegmentedControl!
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue")
let videoOutput = AVCaptureVideoDataOutput()
let movieOutput = AVCaptureMovieFileOutput()
let depthOutput = AVCaptureDepthDataOutput()
let depthCapture = DepthCapture()
var previewLayer = AVCaptureVideoPreviewLayer()
var inputDevice: AVCaptureDeviceInput!
let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTrueDepthCamera], mediaType: .video, position: .unspecified)
var Timestamp: String {
let currentDate = NSDate()
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "ddMM_HHmmss"
return "\(dateFormatter.string(from: currentDate as Date))"
}
var isRecording = false
var time = 0
var timer = Timer()
enum PreviewMode: Int {
case original
case depth
}
var previewMode = PreviewMode.original
var depthMap: CIImage?
var scale: CGFloat = 0.0
//let sessionQueue = DispatchQueue(label: "session queue")
override func viewDidLoad() {
super.viewDidLoad()
timeLabel.isHidden = true //TODO: Disable the rest of the UI
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
func configureCaptureSession() {
session.beginConfiguration()
let camera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .unspecified)!
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.sessionPreset = .vga640x480
session.addInput(cameraInput)
self.inputDevice = cameraInput
}
if session.canAddOutput(videoOutput){
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//previewLayer = AVCaptureVideoPreviewLayer(session: session)
//previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
//previewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//previewView.layer.addSublayer(previewLayer)
//previewLayer.position = CGPoint(x: self.previewView.frame.width / 2, y: self.previewView.frame.height / 2)
//previewLayer.bounds = previewView.frame
}
//Add Depth output to the session
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
}
/*if session.canAddOutput(movieOutput){
session.addOutput(movieOutput)
}*/
} catch {
print("Error")
}
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
// Calculate the scaling factor between videoRect and depthRect
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
// Change the AVCaptureDevice configuration, so you need to lock it
do{
try camera.lockForConfiguration()
// Set the AVCaptureDevice‘s minimum frame duration (which is the inverse of the maximum frame rate) to be equal to the supported frame rate of the depth data
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
// Unlock the configuration you locked
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func startStopRecording(_ sender: Any) {
if isRecording{
stopRecording()
} else {
startRecording()
}
}
func startRecording(){
timeLabel.isHidden = false
timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(ViewController.timerAction), userInfo: nil, repeats: true)
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let flagTime = Timestamp
let auxStr = flagTime+"_output.mp4"
let fileUrl = paths[0].appendingPathComponent(auxStr)
depthCapture.prepareForRecording(timeFlag: flagTime)
movieOutput.startRecording(to: fileUrl, recordingDelegate: self)
print(fileUrl.absoluteString)
print("Recording started")
self.isRecording = true
}
func stopRecording(){
timeLabel.isHidden = true
timer.invalidate()
time = 0
timeLabel.text = "0"
movieOutput.stopRecording()
print("Stopped recording!")
self.isRecording = false
do {
try depthCapture.finishRecording(success: { (url: URL) -> Void in
print(url.absoluteString)
})
} catch {
print("Error while finishing depth capture.")
}
}
#objc func timerAction() {
time += 1
timeLabel.text = String(time)
}
#IBAction func previeModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
#IBAction func switchCamera(_ sender: Any) {
let currentDevice = self.inputDevice.device
let currentPosition = currentDevice.position
let preferredPosition: AVCaptureDevice.Position
let preferredDeviceType: AVCaptureDevice.DeviceType
let devices = self.videoDeviceDiscoverySession.devices
var newVideoDevice: AVCaptureDevice? = nil
switch currentPosition {
case .unspecified, .front:
preferredPosition = .back
preferredDeviceType = .builtInDualCamera
case .back:
preferredPosition = .front
preferredDeviceType = .builtInTrueDepthCamera
#unknown default:
preferredPosition = .back
preferredDeviceType = .builtInDualCamera
}
// First, seek a device with both the preferred position and device type. Otherwise, seek a device with only the preferred position. ENTENDER MEJOR LQS CONDICIONES
if let device = devices.first(where: { $0.position == preferredPosition && $0.deviceType == preferredDeviceType }) {
newVideoDevice = device
} else if let device = devices.first(where: { $0.position == preferredPosition }) {
newVideoDevice = device
}
if let videoDevice = newVideoDevice {
do {
let cameraInput = try AVCaptureDeviceInput(device: videoDevice)
self.session.beginConfiguration()
self.session.removeInput(self.inputDevice)
if self.session.canAddInput(cameraInput) {
session.sessionPreset = .vga640x480
self.session.addInput(cameraInput)
self.inputDevice = cameraInput
}else {
self.session.addInput(self.inputDevice)
}
self.session.commitConfiguration()
} catch{
print("Error occurred while creating video device input: \(error)")
}
}
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {[weak self] in self?.previewView.image = displayImage}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
var convertedDepth: AVDepthData
// Ensure the depth data is the format you need: 32 bit FP disparity.???
if depthData.depthDataType != kCVPixelFormatType_DepthFloat16{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
}else{
convertedDepth = depthData
}
// You save the depth data map from the AVDepthData object as a CVPixelBuffer
let pixelBuffer = convertedDepth.depthDataMap
//Using an extension, you then clamp the pixels in the pixel buffer to keep them between 0.0 and 1.0.
pixelBuffer.clamp()
// Convert the pixel buffer into a CIImage
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
// You store depthMap in a class variable for later use
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
I'm working on an application with Arkit. There are many 3D models and the size is big in my app. Can I get these models out of another server (outside sites)? I'm new on swift, I can't seem to find anything on loading a 3d model from a web server.
is it enough to change the model path there? Thank you
func loadModel() {
guard let virtualObjectScene = SCNScene(named: "\(modelName).\(fileExtension)", inDirectory: "Models.scnassets/\(modelName)") else {
return
}
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
let defaults = UserDefaults.standard
wrapperNode.addChildNode(child)
}
self.addChildNode(wrapperNode)
}
All code:
import UIKit
import SceneKit
import ARKit
class VirtualObject: SCNNode {
var modelName: String = ""
var fileExtension: String = ""
var thumbImage: UIImage!
var title: String = ""
var viewController: ViewController?
override init() {
super.init()
self.name = "Virtual object root node"
}
init(modelName: String, fileExtension: String, thumbImageFilename: String, title: String) {
super.init()
self.name = "Virtual object root node"
self.modelName = modelName
self.fileExtension = fileExtension
self.thumbImage = UIImage(named: thumbImageFilename)
self.title = title
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func loadModel() {
guard let virtualObjectScene = SCNScene(named: "\(modelName).\(fileExtension)", inDirectory: "Models.scnassets/\(modelName)") else {
return
}
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
let defaults = UserDefaults.standard
wrapperNode.addChildNode(child)
}
self.addChildNode(wrapperNode)
}
func unloadModel() {
self.removeFromParentNode()
for child in self.childNodes {
child.removeFromParentNode()
}
}
func translateBasedOnScreenPos(_ pos: CGPoint, instantly: Bool, infinitePlane: Bool) {
guard let controller = viewController else {
return
}
let result = controller.worldPositionFromScreenPosition(pos, objectPos: self.position, infinitePlane: infinitePlane)
controller.moveVirtualObjectToPosition(result.position, instantly, !result.hitAPlane)
}
}
extension VirtualObject {
static func isNodePartOfVirtualObject(_ node: SCNNode) -> Bool {
if node.name == "Virtual object root node" {
return true
}
if node.parent != nil {
return isNodePartOfVirtualObject(node.parent!)
}
return false
}
static let availableObjects: [VirtualObject] = [
Anatomy()
]
}
you can load an scn file from a webserver with ip addresses like this (i used a fake ip below)
let myURL = NSURL(string: “http://110.151.153.202:80/scnfiles/myfile.scn”)
let scene = try! SCNScene(url: myURL! as URL, options:nil)
Edit:
Here’s a simple Swift PlayGrounds which pulls a test cube scn file from my github repo. You just tap anywhere and the cube loads.
import ARKit
import SceneKit
import PlaygroundSupport
class ViewController: NSObject {
var sceneView: ARSCNView
init(sceneView: ARSCNView) {
self.sceneView = sceneView
super.init()
self.setupWorldTracking()
self.sceneView.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(ViewController.handleTap(_:))))
}
private func setupWorldTracking() {
if ARWorldTrackingConfiguration.isSupported {
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
configuration.isLightEstimationEnabled = true
self.sceneView.session.run(configuration, options: [])
}
}
#objc func handleTap(_ gesture: UITapGestureRecognizer) {
let results = self.sceneView.hitTest(gesture.location(in: gesture.view), types: ARHitTestResult.ResultType.featurePoint)
guard let result: ARHitTestResult = results.first else {
return
}
// pulls cube.scn from github repo
let myURL = NSURL(string: "https://raw.githubusercontent.com/wave-electron/scnFile/master/cube.scn")
let scene = try! SCNScene(url: myURL! as URL, options: nil)
let node = scene.rootNode.childNode(withName: "SketchUp", recursively: true)
node?.scale = SCNVector3(0.01,0.01,0.01)
let position = SCNVector3Make(result.worldTransform.columns.3.x, result.worldTransform.columns.3.y, result.worldTransform.columns.3.z)
node?.position = position
self.sceneView.scene.rootNode.addChildNode(node!)
}
}
let sceneView = ARSCNView()
let viewController = ViewController(sceneView: sceneView)
sceneView.autoenablesDefaultLighting = true
PlaygroundPage.current.needsIndefiniteExecution = true
PlaygroundPage.current.liveView = viewController.sceneView
I Want to Show some images from a website on iCarousel SlideShow.
I get Empty Slideshow from iCarousel.
I'm beginner and tried many codes by chance on my app but noting happened
Here is my Complete code :
import UIKit
class ViewController: UIViewController , iCarouselDataSource, iCarouselDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var allimages: UIImageView!
internal var urll:NSURL!
#IBOutlet weak var carouselView: iCarousel!
var numbers = [Int]()
var urls = [String]()
func carousel(carousel: iCarousel, viewForItemAtIndex index: Int, reusingView view: UIView?) -> UIView {
let tempView = UIView(frame: CGRect(x: 0, y: 0, width: 300, height: 200))
let images = UIButton(frame: CGRect(x: 0, y: 0, width: 300, height: 200))
images.backgroundColor = UIColor.blueColor()
images.backgroundImageForState(.Normal)
images.backgroundRectForBounds(CGRect(x: 0, y: 0, width: 300, height: 200))
tempView.addSubview(images)
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 200, height: 200))
imageView.backgroundColor = UIColor.orangeColor()
for item in self.urls {
print (item)
let s = item
let url = NSURL(string: s)
let session = NSURLSession.sharedSession()
let task = session.downloadTaskWithURL(url!)
{
(url: NSURL?, res: NSURLResponse?, e: NSError?) in
let d = NSData(contentsOfURL: url!)
let image = UIImage(data: d!)
dispatch_async(dispatch_get_main_queue()) {
imageView.image = image
}
}
task.resume()
}
return images
}
func carousel(carousel: iCarousel, valueForOption option: iCarouselOption, withDefault value: CGFloat) -> CGFloat {
if option == iCarouselOption.Spacing {
return value * 1.1
}
return value
}
override func awakeFromNib() {
super.awakeFromNib()
numbers = [1,2,3,4]
}
func numberOfItemsInCarousel(carousel: iCarousel) -> Int {
return numbers.count
}
//==============================================================
override func viewDidLoad() {
carouselView.type = .Rotary
carouselView.autoscroll = 0.4
// let defaults = NSUserDefaults.standardUserDefaults()
urll = NSURL(string: "http://xxxxxxxxx.com/api/?slider=uij6sdnb")
let session = NSURLSession.sharedSession()
let task = session.dataTaskWithURL(urll) {(NSData, response, error) -> Void in
do {
let records = try NSJSONSerialization.JSONObjectWithData(NSData!, options: NSJSONReadingOptions.MutableContainers) as! NSArray
for record in records {
let urlid = Int(record["slide_id"] as! String)
let urimage = record["slide_url"] as! String
print(urlid)
print(urimage)
self.urls = [urimage]
// print(self.urls.count)
}
}
catch {
print("Json Error")
}
}
task.resume()
Any help will appreciated.
Here is my Complete Code.
But as you see its for 4 image.
For more or less images, you can apply the changes easily.
I added the icrousel from jared Davinson Youtube Channel.
Enjoy.
//
// ViewController.swift
// parniapharmed
//
// Created by Alfredo Uzumaki on 2016 AP.
// Copyright © 2016 AP Alfredo Uzumaki. All rights reserved.
//
import UIKit
import SystemConfiguration // for checking internet connection
class ViewController: UIViewController { // if you had any problem. command this line and uncommand the below line !
// class ViewController: UIViewController , iCarouselDataSource, iCarouselDelegate {
#IBOutlet weak var allimages: UIImageView!
internal var urll:NSURL!
#IBOutlet weak var carouselView: iCarousel!
#IBOutlet weak var img1: UIImageView!
#IBOutlet weak var img2: UIImageView!
#IBOutlet weak var img3: UIImageView!
#IBOutlet weak var img4: UIImageView!
#IBOutlet weak var allimagetop: UIView!
internal var imageNinja:String = ""
internal var hasInternet:Bool! // for checking internet connection
var numbers = [Int]()
var urls = [String]()
internal var urimage = [String]()
internal var image1:String = ""
internal var image2:String = ""
internal var image3:String = ""
internal var image4:String = ""
internal var imageArray: [UIImage] = []
func carousel(carousel: iCarousel, viewForItemAtIndex index: Int, reusingView view: UIView?) -> UIView {
var tempView = UIView(frame: CGRect(x: 0, y: 0, width: 300, height: 200))
let images = UIButton(frame: CGRect(x: 0, y: 0, width: 300, height: 200))
images.backgroundColor = UIColor.blueColor()
images.backgroundImageForState(.Normal)
images.backgroundRectForBounds(CGRect(x: 0, y: 0, width: 300, height: 200))
tempView.addSubview(images)
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 400, height: 200))
imageView.image = UIImage(named: "loading")
if (imageNinja != "") && (imageArray.count == 4) { // Checking if images fully loaded you can Delete Seccond Condition if it Coused any Error
imageView.backgroundColor = UIColor.orangeColor()
imageView.image = imageArray[index]
tempView.addSubview(imageView)
}
return imageView
}
func carousel(carousel: iCarousel, valueForOption option: iCarouselOption, withDefault value: CGFloat) -> CGFloat {
if option == iCarouselOption.Spacing {
return value * 1.1
}
return value
}
override func awakeFromNib() {
super.awakeFromNib()
numbers = [1,2,3,4] // i wrote it manualy but you can change it to: numbers = imageArray.count or write the th numbers of your images.
}
func numberOfItemsInCarousel(carousel: iCarousel) -> Int {
return numbers.count
}
//===========================Begin of View Did Load===================================
override func viewDidLoad() {
carouselView.type = .Rotary
carouselView.autoscroll = 0.1
hasInternet = connectedToNetwork() //checking internet again !
print(hasInternet) // if true then internet is ok
if hasInternet == true {
urll = NSURL(string: "http://yourwebsite.com/api/?slider=uij6sdnb") // <== put your php address here !!
let session = NSURLSession.sharedSession()
let task = session.dataTaskWithURL(urll) {(data, response, error) -> Void in
do {
let records = try NSJSONSerialization.JSONObjectWithData(data!, options: NSJSONReadingOptions.MutableContainers) as! NSArray
for record in records {
//slide_url is the subject of database row from php... change it to yours
let urimage = record["slide_url"] as! String
self.urls.append(urimage)
}
print(self.urls[2])
self.image1 = self.urls[0]
self.image2 = self.urls[1]
self.image3 = self.urls[2]
self.image4 = self.urls[3]
}
catch {
print("Json Error")
}
while self.imageNinja == "" {
self.image1 = self.urls[0]
self.image2 = self.urls[1]
self.image3 = self.urls[2]
self.image4 = self.urls[3]
print("4th image downloaded")
print(self.image4)
//----------------------------------- downoad image -----------------------------------------
let url = NSURL(string: self.image1)
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)) {
let data = NSData(contentsOfURL: url!) //make sure your image in this url does exist, otherwise unwrap in a if let check
dispatch_async(dispatch_get_main_queue(), {
let image = UIImage(data: data!)
self.imageArray.append(image!)
});
}
//----------------------------------- downoad image -----------------------------------------
//----------------------------------- downoad image -----------------------------------------
let url2 = NSURL(string: self.image2)
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)) {
let data = NSData(contentsOfURL: url2!) //make sure your image in this url does exist, otherwise unwrap in a if let check
dispatch_async(dispatch_get_main_queue(), {
let image = UIImage(data: data!)
self.imageArray.append(image!)
});
}
//----------------------------------- downoad image -----------------------------------------
//----------------------------------- downoad image -----------------------------------------
let url3 = NSURL(string: self.image3)
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)) {
let data = NSData(contentsOfURL: url3!) //make sure your image in this url does exist, otherwise unwrap in a if let check
dispatch_async(dispatch_get_main_queue(), {
let image = UIImage(data: data!)
self.imageArray.append(image!)
});
}
//----------------------------------- downoad image -----------------------------------------
//----------------------------------- downoad image -----------------------------------------
let url4 = NSURL(string: self.image4)
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)) {
let data = NSData(contentsOfURL: url4!) //make sure your image in this url does exist, otherwise unwrap in a if let check
dispatch_async(dispatch_get_main_queue(), {
let image = UIImage(data: data!)
self.imageArray.append(image!)
print("imageArray Count is is :")
print(self.imageArray.count)
self.carouselView.reloadData() //this one is do nothing! i put it for luck!
});
}
//----------------------------------- downoad image -----------------------------------------
self.imageNinja = "hhh"
}
}
task.resume()
}
// ================================= end of view did load
}
// checking internet connection
func connectedToNetwork() -> Bool {
var zeroAddress = sockaddr_in()
zeroAddress.sin_len = UInt8(sizeofValue(zeroAddress))
zeroAddress.sin_family = sa_family_t(AF_INET)
guard let defaultRouteReachability = withUnsafePointer(&zeroAddress, {
SCNetworkReachabilityCreateWithAddress(nil, UnsafePointer($0))
}) else {
return false
}
var flags : SCNetworkReachabilityFlags = []
if SCNetworkReachabilityGetFlags(defaultRouteReachability, &flags) == false {
return false
}
let isReachable = flags.contains(.Reachable)
let needsConnection = flags.contains(.ConnectionRequired)
return (isReachable && !needsConnection)
}
}