ios – How to insert Mesh (.showSceneUnderstanding) while using ARKit and Reality Kit to enable LiDAR Guidelines


I have made use of Apple ARKit documentations to create a simple ARKit application which utilizes SceneKit & RealityKit.

I am currently faced with the task to add Guidelines(Mesh: To insert .showSceneUnderstanding)

is there any particular method or something that i am missing to access this data for displaying the Mesh depth.

I have attached the code below (★indicates the line i am currently facing an error at)


import UIKit
import SceneKit
import ARKit
import RealityKit

class ViewController: UIViewController, ARSessionDelegate {
  var trackingStateOK: Bool = false
  let sphereNode = SCNNode(geometry: SCNSphere(radius: 0.01))
  var tappedPointNodeOrigin: SCNNode?
  var tappedPointNodeDest: SCNNode?
  var lineNode = SCNNode()
  var objectNode: SCNNode!
  var distanceLabel = UILabel()
  let coachingOverlayView = UIView()
  //separating data acquisition from display
  //var arProvider: ARProvider = ARProvider()
  @IBOutlet var sceneView: ARSCNView!
  //var sceneView:ARSCNView!
  override func viewDidLoad() {
    super.viewDidLoad()
    //sceneView = ARSCNView(frame: view.bounds)
    view.addSubview(sceneView)
    sceneView.scene.rootNode.addChildNode(lineNode)
    distanceLabel.text = ""
    distanceLabel.frame = CGRect(x: 0, y: view.bounds.maxY - 200, width: view.bounds.width, height: 200)
    view.addSubview(distanceLabel)
    distanceLabel.textColor = .red
    distanceLabel.textAlignment = .center
    distanceLabel.numberOfLines = 3
    distanceLabel.font = .systemFont(ofSize: 40, weight: .bold)
    view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(tap(recognizer:))))
    //setupCoachingOverlay()
  }
  override func viewWillAppear(_ animated: Bool) {
    super.viewWillAppear(animated)
    let worldtracking = ARWorldTrackingConfiguration()
    //varun added 28.12.2023
    //adding the guidelines
    worldtracking.sceneReconstruction = .meshWithClassification
    worldtracking.environmentTexturing = .automatic
    worldtracking.planeDetection = [.horizontal, .vertical]
    //varun added code. requesting info of scene depth in the configuration.
    worldtracking.frameSemantics = [.sceneDepth, .smoothedSceneDepth]
    //sceneView.session.run(worldtracking, options: [.removeExistingAnchors])
    sceneView.session.run(worldtracking)
     ★//sceneView.debugOptions.insert(.showSceneUnderstanding) ★
    sceneView.debugOptions = [.showPhysicsFields,.showCameras,.showFeaturePoints,.showSkeletons, .showWireframe , .showWorldOrigin]
    sceneView.session.delegate = self
  }
  //func session(_ session: ARSession, cameraDidChangeTrackingState camera: ARCamera) {
  func session(_ session: ARSession, didUpdate frame: ARFrame) {
    trackingStateOK = true
    if(frame.sceneDepth != nil) && (frame.smoothedSceneDepth != nil){
      //arData:
      guard let sceneDepth = frame.smoothedSceneDepth ?? frame.sceneDepth else{
        print("failed to acquire scene depth")
        return
      }
      var pixelBuffer: CVPixelBuffer!
      pixelBuffer = sceneDepth.depthMap
      print(pixelBuffer)
    }

would be grateful with any inputs or suggestions in the right direction.

Thanks in advance

Latest articles

spot_imgspot_img

Related articles

Leave a reply

Please enter your comment!
Please enter your name here

spot_imgspot_img