Tacking pictures of my scene

Hello,

I use a scene with rendering and post-processing effects (brightness, contrast, hue stauration). I’d like to create a function that takes different photos of my scene at different positions and displays them directly on the screen.
I’ve done it, but I’ve got a major problem: I don’t have the effects displayed on screen, so the images aren’t representative of my scene.

I’ve tried creating a composer just for this etc., but every time I do this, my rendering simply bugs and my images end up empty.

Could someone help me integrate the effects of my scene so that they are perceptible in the photos?

Here my code :

takeSceneSnapshots(displayImages = true) {
    // ------------------------------------------------------------
    // 1) Récupération de la taille du canvas et de son zIndex
    // ------------------------------------------------------------
    const domRect = this.context.domElement.getBoundingClientRect();
    const computedStyle = getComputedStyle(this.context.domElement);
    const baseZIndex = parseInt(computedStyle.zIndex, 10) || 0;
  
    // ------------------------------------------------------------
    // 2) Calcul de la nouvelle taille (70% de l'original)
    //    et position (centrée à 15% de marge)
    // ------------------------------------------------------------
    const newWidth = domRect.width * 0.7;
    const newHeight = domRect.height * 0.7;
    const offsetLeft = domRect.left + 0.15 * domRect.width;
    const offsetTop = domRect.top + 0.15 * domRect.height;
  
    // ------------------------------------------------------------
    // 3) Création d'un conteneur si besoin
    // ------------------------------------------------------------
    if (!this.imageContainer) {
      this.imageContainer = document.createElement('div');
      this.imageContainer.style.position = 'absolute';
      this.imageContainer.style.top = `${offsetTop}px`;
      this.imageContainer.style.left = `${offsetLeft}px`;
      this.imageContainer.style.width = `${newWidth}px`;
      this.imageContainer.style.height = `${newHeight}px`;
      this.imageContainer.style.display = 'flex';
      this.imageContainer.style.alignItems = 'center';
      this.imageContainer.style.justifyContent = 'center';
      this.imageContainer.style.gap = '10px';
      this.imageContainer.style.background = '#3F434A';
      this.imageContainer.style.zIndex = `${baseZIndex + 1}`;
      document.body.appendChild(this.imageContainer);
    }
  
    // ------------------------------------------------------------
    // Si on active l'affichage et qu'on n'est pas encore en mode vues multiples
    // ------------------------------------------------------------
    if (displayImages && !this.isMultiplesViewsActivated) {
      this.isMultiplesViewsActivated = true;
      this.imageContainer.innerHTML = ''; // On nettoie le contenu
  
      const scene = this.context.scene;
      const renderer = this.context.renderer;
      const mainCamera: any = this.context.mainCamera;
  
      // ----------------------------------------------------------
      // 4) On sauvegarde la position/orientation de la caméra
      //    ainsi que celles du modèle (this.mainMouthModel)
      // ----------------------------------------------------------
      const originalCameraPosition = mainCamera.position.clone();
      const originalCameraQuaternion = mainCamera.quaternion.clone();
  
      const originalMouthPosition = this.mainMouthModel.position.clone();
      const originalMouthQuaternion = this.mainMouthModel.quaternion.clone();
  
      // ----------------------------------------------------------
      // On s'assure que la caméra principale est à jour
      // ----------------------------------------------------------
      mainCamera.updateProjectionMatrix();
      mainCamera.updateMatrixWorld(true);
  
      // Angles pour nos 3 vues : [0°, -90°, 90°] (ou autre)
      const angles = [0, -90, 90];
  
      // ----------------------------------------------------------
      // Parcours des vues
      // ----------------------------------------------------------
      angles.forEach((angle) => {
        // 1) On modifie (temporairement) la rotation du modèle
        //    Ici, on choisit de juste modifier la rotation en Y
        this.mainMouthModel.rotation.set(
          originalMouthQuaternion.x,
          THREE.MathUtils.degToRad(angle),
          originalMouthQuaternion.z
        );
  
        // Si tu veux aussi changer la position, fais-le ici :
        // this.mainMouthModel.position.set(...)
  
        // 2) On prépare un RenderTarget
        const renderTarget = new THREE.WebGLRenderTarget(1080, 1080);
  
        // -- (Si tu utilises un EffectComposer, remplace ce bloc par le composer)
        renderer.setRenderTarget(renderTarget);
        renderer.render(scene, mainCamera);
        renderer.setRenderTarget(null);
  
        // 3) Lecture des pixels
        const buffer = new Uint8Array(1080 * 1080 * 4);
        renderer.readRenderTargetPixels(renderTarget, 0, 0, 1080, 1080, buffer);
  
        // 4) Création d'un canvas local pour générer la <img>
        const canvas = document.createElement('canvas');
        canvas.width = 1080;
        canvas.height = 1080;
        const ctx = canvas.getContext('2d')!;
        const imageData = ctx.createImageData(1080, 1080);
        imageData.data.set(buffer);
        ctx.putImageData(flipY(imageData), 0, 0);
  
        // 5) Convertir en <img> + affichage
        const img = document.createElement('img');
        img.src = canvas.toDataURL();
  
        // Ajuster le style pour éviter la déformation
        // Ici, on force par ex. "maxHeight: 100%" et "auto" en width, etc.
        // ou on applique 30% de la zone en largeur, sans Ă©craser le ratio
        img.style.maxWidth = '30%';
        img.style.height = 'auto';
        img.style.border = '2px solid white';
  
        this.imageContainer.appendChild(img);
  
        // Nettoyage
        renderTarget.dispose();
      });
  
      // ----------------------------------------------------------
      // 5) On rétablit la rotation/position d'origine
      //    pour la caméra et le modèle
      // ----------------------------------------------------------
      mainCamera.position.copy(originalCameraPosition);
      mainCamera.quaternion.copy(originalCameraQuaternion);
  
      this.mainMouthModel.position.copy(originalMouthPosition);
      this.mainMouthModel.quaternion.copy(originalMouthQuaternion);
  
      // ----------------------------------------------------------
      // On (ré)affiche le conteneur
      // ----------------------------------------------------------
      this.imageContainer.style.display = 'flex';
    }
    // ------------------------------------------------------------
    // Sinon, on désactive
    // ------------------------------------------------------------
    else if (this.isMultiplesViewsActivated) {
      this.isMultiplesViewsActivated = false;
      this.imageContainer.remove();
      this.imageContainer = null;
    }
  
    // ------------------------------------------------------------
    // FONCTIONS INTERNES
    // ------------------------------------------------------------
  
    // => Retourner l'image verticalement
    function flipY(imageData: ImageData) {
      const flipped = new Uint8ClampedArray(imageData.data.length);
      const rowSize = imageData.width * 4;
      for (let y = 0; y < imageData.height; y++) {
        const sourceRow = imageData.data.subarray(y * rowSize, (y + 1) * rowSize);
        const targetRowIndex = (imageData.height - y - 1) * rowSize;
        flipped.set(sourceRow, targetRowIndex);
      }
      return new ImageData(flipped, imageData.width, imageData.height);
    }
  }

Hi Robin L.,

To address your issue with capturing scene snapshots that include post-processing effects, you’ll want to ensure that you’re using a proper rendering pipeline that correctly applies these effects when taking a screenshot.

Here’s a refined approach that you could implement in your takeSceneSnapshots function:

  1. Create a Render Target: Use a render target that allows you to apply post-processing effects when rendering the scene specifically for the snapshot.

  2. Use EffectComposer: After setting up the render target, utilize an EffectComposer to apply the desired post-processing effects (like brightness, contrast, hue saturation) during rendering.

  3. Use the Correct Render Call: Ensure that you render the scene to the texture created in the previous step, then take the snapshot from that texture.

Here’s a sample implementation:

import { WebGLRenderTarget, EffectComposer, RenderPass } from 'three/examples/jsm/postprocessing/EffectComposer.js';
import { ShaderPass } from 'three/examples/jsm/postprocessing/ShaderPass.js';
import { BrightnessContrastShader } from 'three/examples/jsm/shaders/BrightnessContrastShader.js';

takeSceneSnapshots(displayImages = true) {
    // Create render target
    const renderTarget = new WebGLRenderTarget(window.innerWidth, window.innerHeight);
    
    // Create composer and add passes
    const composer = new EffectComposer(this.context.renderer, renderTarget);
    composer.addPass(new RenderPass(this.context.scene, this.context.camera));
    
    // Add shader pass for effects (adjust parameters as needed)
    const brightnessContrastPass = new ShaderPass(BrightnessContrastShader);
    brightnessContrastPass.uniforms['brightness'] = { value: 0.1 }; // Example value
    brightnessContrastPass.uniforms['contrast'] = { value: 1.2 }; // Example value
    composer.addPass(brightnessContrastPass);

    // Render to texture
    composer.render();

    // Take screenshot from render target
    const dataUrl = this.takeScreenshot(renderTarget.texture);
    
    if (displayImages) {
        this.displayImage(dataUrl);
    }
}

takeScreenshot(texture) {
    // Convert texture to data URL (this example assumes a utility function exists)
    // ...
}

displayImage(dataUrl) {
    // Function to display the image
    // ...
}

In this code, I assume that you have utilities for displaying the image and converting the texture to a data URL. You will need to adjust the post-processing settings and shader parameters according to your needs.

Make sure to call composer.render() before taking a screenshot, which ensures that all your effects are applied to the render target.

For further reference on the components and shaders available, you can check the Needle Engine API.

If you have specific settings or issues with your post-processing effects, please share those details for further assistance.

1 replies left - Get a PRO license for more AI replies.
thread_nZNwW44AAtWGozIi8Qf2y3Wp
Reply to this message to continue the conversation

It doesn’t help me a lot :confused:

Hi Robin L.,

I understand that the previous response may not have fully addressed your issue. To assist you more effectively, could you please provide additional details? Specifically:

  1. What specific post-processing effects are you trying to achieve?
  2. Are you using a particular version of Needle Engine or any specific library for post-processing?
  3. Any error messages or issues observed when trying to take snapshots?

The more information you share, the better I can help you resolve the issue.

0 replies left - Get a PRO license for more AI replies.
thread_nZNwW44AAtWGozIi8Qf2y3Wp
Reply to this message to continue the conversation

Needle Engine has a screenshot function that takes arguments like which camera to use. Have you tried that?

For example the following takes a 1024x960 screenshot and downloads the file immediately.

screenshot2({
   width: 1024,
   height: 960,
   download_filename: "test.png"
});

Hello, I find a way but I will try yours too !

takeSceneSnapshots(displayImages = true) {

    // ---------------------------------------------- --------------
    // 1) Récupération de la taille du canvas et de son zIndex
    // ------------------------------------------------------------
    const domRect = this.context.domElement.getBoundingClientRect();
    const computedStyle = getComputedStyle(this.context.domElement);
    const baseZIndex = parseInt(computedStyle.zIndex, 10) || 0;
  
    // ------------------------------------------------------------
    // 2) Calcul de la nouvelle taille (70% de l'original)
    //    et position (centrée à 15% de marge)
    // ------------------------------------------------------------
    const newWidth = domRect.width * 0.7;
    const newHeight = domRect.height * 0.7;
    const offsetLeft = domRect.left + 0.15 * domRect.width;
    const offsetTop = domRect.top + 0.15 * domRect.height;
  
    // ------------------------------------------------------------
    // 3) Création d'un conteneur si besoin
    // ------------------------------------------------------------
    if (!this.imageContainer) {
      this.imageContainer = document.createElement('div');
      this.imageContainer.style.position = 'absolute';
      this.imageContainer.style.top = `${offsetTop}px`;
      this.imageContainer.style.left = `${offsetLeft}px`;
      this.imageContainer.style.width = `${newWidth}px`;
      this.imageContainer.style.height = `${newHeight}px`;
      this.imageContainer.style.display = 'flex';
      this.imageContainer.style.alignItems = 'center';
      this.imageContainer.style.justifyContent = 'center';
      this.imageContainer.style.gap = '10px';
      this.imageContainer.style.background = '#3F434A';
      this.imageContainer.style.zIndex = `${baseZIndex + 1}`;
      document.body.appendChild(this.imageContainer);
    }
  
    // ------------------------------------------------------------
    // Si on active l'affichage et qu'on n'est pas encore en mode vues multiples
    // ------------------------------------------------------------
    if (displayImages && !this.isMultiplesViewsActivated) {
      this.isMultiplesViewsActivated = true;
      this.imageContainer.innerHTML = ''; // On nettoie le contenu
  
      // const scene = this.context.scene;
      // const renderer = this.context.renderer;
      const mainCamera: any = this.context.mainCamera;
  
      // ----------------------------------------------------------
      // 4) On sauvegarde la position/orientation de la caméra
      //    ainsi que celles du modèle (this.mainMouthModel)
      // ----------------------------------------------------------
      const originalCameraPosition = mainCamera.position.clone();
      const originalCameraQuaternion = mainCamera.quaternion.clone();
  
      const originalMouthPosition = this.mainMouthModel.position.clone();
      const originalMouthQuaternion = this.mainMouthModel.quaternion.clone();
  
      // ----------------------------------------------------------
      // On s'assure que la caméra principale est à jour
      // ----------------------------------------------------------
      mainCamera.updateProjectionMatrix();
      mainCamera.updateMatrixWorld(true);
  
      // Angles pour nos 3 vues : [0°, -90°, 90°] (ou autre)
      const angles = [0, -90, 90];
  
      // ----------------------------------------------------------
      // Parcours des vues
      // ----------------------------------------------------------
      angles.forEach((angle) => {
        // 1) On modifie (temporairement) la rotation du modèle
        //    Ici, on choisit de juste modifier la rotation en Y
        this.mainMouthModel.rotation.set(
          originalMouthQuaternion.x,
          THREE.MathUtils.degToRad(angle),
          originalMouthQuaternion.z
        );
        this.context.renderer.clear(true, true, true); 
        // 1) Rendu de la scène depuis la caméra principale
        this.context.renderer.render(this.context.scene, this.context.mainCamera);

        // 2) Extraction du dataURL depuis le canvas du renderer
        const imageData = this.context.renderer.domElement.toDataURL("image/png");

        // 3) Création d'une balise <img> et affectation de l'image capturée
        const img = document.createElement('img');
        img.src = imageData;

        // Ajuster le style pour éviter la déformation
        img.style.maxWidth = '30%';
        img.style.height = 'auto';
        img.style.border = '2px solid white';

        // Ajout de l'image dans le conteneur
        this.imageContainer.appendChild(img);

  
        this.imageContainer.appendChild(img);
      });
  
      // ----------------------------------------------------------
      // 5) On rétablit la rotation/position d'origine
      //    pour la caméra et le modèle
      // ----------------------------------------------------------
      mainCamera.position.copy(originalCameraPosition);
      mainCamera.quaternion.copy(originalCameraQuaternion);
  
      this.mainMouthModel.position.copy(originalMouthPosition);
      this.mainMouthModel.quaternion.copy(originalMouthQuaternion);
  
      // ----------------------------------------------------------
      // On (ré)affiche le conteneur
      // ----------------------------------------------------------
      this.imageContainer.style.display = 'flex';
    }
    // ------------------------------------------------------------
    // Sinon, on désactive
    // ------------------------------------------------------------
    else if (this.isMultiplesViewsActivated) {
      this.isMultiplesViewsActivated = false;
      this.imageContainer.remove();
      this.imageContainer = null;
    }
  
    // ------------------------------------------------------------
    // FONCTIONS INTERNES
    // ------------------------------------------------------------
  
    // => Retourner l'image verticalement
    function flipY(imageData: ImageData) {
      const flipped = new Uint8ClampedArray(imageData.data.length);
      const rowSize = imageData.width * 4;
      for (let y = 0; y < imageData.height; y++) {
        const sourceRow = imageData.data.subarray(y * rowSize, (y + 1) * rowSize);
        const targetRowIndex = (imageData.height - y - 1) * rowSize;
        flipped.set(sourceRow, targetRowIndex);
      }
      return new ImageData(flipped, imageData.width, imageData.height);
    }
  }

Hi, i think you can save all that code by just using the method I shared. You can provide it with a camera too and it can return the data url (as in your code) that you can then assign to your image.

The reason for this is simply that it will take care of e.g. postprocessing, xr rendering (e.g. work in AR on Android) etc.

Ultimately just my suggestion tho and if you have a working solution now that’s great! :slight_smile:

This topic was automatically closed 13 days after the last reply. New replies are no longer allowed.