зеркало из https://github.com/microsoft/scenepic.git
940 строки
41 KiB
Plaintext
940 строки
41 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Ensure the scenepic library will auto reload\n",
|
|
"%load_ext autoreload"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Imports\n",
|
|
"import json\n",
|
|
"import math\n",
|
|
"import os\n",
|
|
"\n",
|
|
"import numpy as np\n",
|
|
"import scenepic as sp\n",
|
|
"%autoreload\n",
|
|
"\n",
|
|
"# Seed random number generator for consistency\n",
|
|
"np.random.seed(0)\n",
|
|
"\n",
|
|
"ASSET_DIR = os.path.join(\"..\", \"ci\", \"assets\")\n",
|
|
"\n",
|
|
"def asset_path(filename):\n",
|
|
" return os.path.join(ASSET_DIR, filename)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# ScenePic Python Tutorials\n",
|
|
"\n",
|
|
"These tutorials provide practical examples that highlight most of the functionality supported by ScenePic. While by no means exhaustive, they should give you a solid start towards building useful and insightful 3D visualizations of your own. If there is something you feel is missing from this tutorial, or if there is something you would like to contribute, please contact the maintainers via GitHub Issues."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 1 - Scene and Canvas basics\n",
|
|
"\n",
|
|
"# Create a Scene, the top level container in ScenePic\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# A Scene can contain many Canvases\n",
|
|
"# For correct operation, you should create these using scene1.create_canvas() (rather than constructing directly using sp.Canvas(...)) \n",
|
|
"canvas_1 = scene.create_canvas_3d(width = 300, height = 300)\n",
|
|
"canvas_2 = scene.create_canvas_3d(width = 100, height = 300)\n",
|
|
"\n",
|
|
"# ScenePic has told Jupyter how to display scene objects\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 2 - Meshes and Frames\n",
|
|
"\n",
|
|
"# Create a scene\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# A Mesh is a vertex/triangle/line buffer with convenience methods\n",
|
|
"# Meshes \"belong to\" the Scene, so should be created using create_mesh()\n",
|
|
"# Meshes can be re-used across multiple frames/canvases\n",
|
|
"my_first_mesh = scene.create_mesh(shared_color = sp.Color(1.0, 0.0, 1.0)) # If shared_color is not provided, you can use per-vertex coloring\n",
|
|
"my_first_mesh.add_cube(transform = sp.Transforms.Scale(0.1)) # Adds a unit cube centered at the origin\n",
|
|
"my_first_mesh.add_cube(transform = np.dot(sp.Transforms.Translate([-1.0, 1.0, -1.0]), sp.Transforms.Scale(0.5)))\n",
|
|
"my_first_mesh.add_sphere(transform = sp.Transforms.Translate([1.0, 1.0, 1.0]))\n",
|
|
"\n",
|
|
"# A Canvas is a 3D rendering panel\n",
|
|
"canvas = scene.create_canvas_3d(width = 300, height = 300)\n",
|
|
"\n",
|
|
"# Create an animation with multiple Frames\n",
|
|
"# A Frame references a set of Meshes\n",
|
|
"# Frames are created from the Canvas not the Scene\n",
|
|
"for i in range(10):\n",
|
|
" frame = canvas.create_frame()\n",
|
|
" frame.add_mesh(my_first_mesh, transform = sp.Transforms.Translate([i / 10.0, 0.0, 0.0])) # An arbitrary rigid transform can optionally be specified.\n",
|
|
" mesh2 = scene.create_mesh(shared_color = sp.Color(1.0,0.0,0.0),camera_space=True)\n",
|
|
" mesh2.add_cube(transform = np.dot(sp.Transforms.Translate([0.0, 0.0, -5.0]), sp.Transforms.Scale(0.5)))\n",
|
|
" frame.add_mesh(mesh2)\n",
|
|
" label = scene.create_label(text = \"Hi\", color = sp.Colors.White, size_in_pixels = 80, offset_distance = 0.6, camera_space = True)\n",
|
|
" frame.add_label(label = label, position = [0.0, 0.0, -5.0])\n",
|
|
" \n",
|
|
"# Display the Scene in Jupyter\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 3 - Point clouds 1\n",
|
|
"\n",
|
|
"# Create a scene\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# Create a mesh that we'll turn in to a point-cloud using enable_instancing()\n",
|
|
"mesh = scene.create_mesh(shared_color = sp.Color(0,1,0))\n",
|
|
"mesh.add_cube() # Unit diameter cube that will act as primitive\n",
|
|
"mesh.apply_transform(sp.Transforms.Scale(0.01)) # Scale the primitive\n",
|
|
"mesh.enable_instancing(positions = 2 * np.random.rand(10000, 3) - 1) # Cause the mesh to be replicated across many instances with the provided translations. You can optionally also provide per-instance colors and quaternion rotations.\n",
|
|
"\n",
|
|
"# Create Canvas and Frame, and add Mesh to Frame\n",
|
|
"canvas = scene.create_canvas_3d(width = 300, height = 300, shading=sp.Shading(bg_color=sp.Colors.White))\n",
|
|
"frame = canvas.create_frame()\n",
|
|
"frame.add_mesh(mesh)\n",
|
|
"\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 4 - Points clouds 2\n",
|
|
"# Note that the point cloud primitive can be arbitrarily complex.\n",
|
|
"# The primitive geometry will only be stored once for efficiency.\n",
|
|
"\n",
|
|
"# Some parameters\n",
|
|
"disc_thickness = 0.2\n",
|
|
"normal_length = 1.5\n",
|
|
"point_size = 0.1\n",
|
|
"\n",
|
|
"# A helper Mesh which we won't actually use for rendering - just to find the points and normals on a sphere to be used in mesh2 below\n",
|
|
"# NB this is created using the sp.Mesh() constructor directly so it doesn't get added automatically to the Scene\n",
|
|
"sphere_mesh = sp.Mesh()\n",
|
|
"sphere_mesh.add_sphere(transform = sp.Transforms.Scale(2.0), color = sp.Color(1.0, 0.0, 0.0))\n",
|
|
"N = sphere_mesh.count_vertices()\n",
|
|
"points = sphere_mesh.vertex_buffer['pos']\n",
|
|
"normals = sphere_mesh.vertex_buffer['norm']\n",
|
|
"\n",
|
|
"# Convert the normals into quaternion rotations\n",
|
|
"rotations = np.zeros((N, 4))\n",
|
|
"for i in range(0, N):\n",
|
|
" rotations[i, :] = sp.Transforms.QuaternionToRotateXAxisToAlignWithAxis(normals[i, :])\n",
|
|
"\n",
|
|
"# Generate some random colors\n",
|
|
"colors = np.random.rand(N,3)\n",
|
|
" \n",
|
|
"# Create a scene\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# Create a mesh that we'll turn in to a point-cloud using enable_instancing()\n",
|
|
"mesh = scene.create_mesh(shared_color = sp.Color(0,1,0), double_sided = True) # shared_color will be overridden in a moment\n",
|
|
"\n",
|
|
"# Add the primitive to the Mesh - a disc and a thickline showing the normal\n",
|
|
"mesh.add_disc(segment_count = 20, transform = sp.Transforms.Scale([disc_thickness, 1.0, 1.0]))\n",
|
|
"mesh.add_thickline(start_point = np.array([disc_thickness * 0.5, 0.0, 0.0]), end_point = np.array([normal_length, 0.0, 0.0]), start_thickness = 0.2, end_thickness = 0.1)\n",
|
|
"mesh.apply_transform(sp.Transforms.Scale(point_size))\n",
|
|
"\n",
|
|
"# Now turn the mesh into a point-cloud\n",
|
|
"mesh.enable_instancing(positions = points, rotations = rotations, colors = colors) # Both rotations and colors are optional\n",
|
|
"\n",
|
|
"# Create Canvas and Frame, and add Mesh to Frame\n",
|
|
"canvas = scene.create_canvas_3d(width = 300, height = 300)\n",
|
|
"frame = canvas.create_frame()\n",
|
|
"frame.add_mesh(mesh)\n",
|
|
"\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 5 - Misc Meshes\n",
|
|
"\n",
|
|
"# Scene is the top level container in ScenePic\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# Ok - let's start by creating some Mesh objects\n",
|
|
"\n",
|
|
"# Mesh 1 - contains a cube and a sphere\n",
|
|
"# Mesh objects can contain arbitrary triangle mesh and line geometry\n",
|
|
"# Meshes can belong to \"layers\" which can be controlled by the user interactively\n",
|
|
"mesh1 = scene.create_mesh(layer_id = \"Sphere+\") # No shared_color provided, so per-vertex coloring enabled\n",
|
|
"mesh1.add_cylinder(color = sp.Color(1.0, 0.0, 0.0), transform = sp.Transforms.Translate([-2.0, 0.0, -2.0]))\n",
|
|
"mesh1.add_uv_sphere(color = sp.Color(0.0, 0.0, 1.0), transform = np.dot(sp.Transforms.Translate([-1.0, 1.0, 0.0]), sp.Transforms.Scale(1.8)), fill_triangles = False, add_wireframe = True)\n",
|
|
"mesh1.add_icosphere(color = sp.Color(0.0, 1.0, 1.0), transform = np.dot(sp.Transforms.Translate([2.0, 1.0, 0.0]), sp.Transforms.Scale(1.8)), fill_triangles = False, add_wireframe = True, steps = 2)\n",
|
|
"\n",
|
|
"# Mesh 2 - coordinate axes\n",
|
|
"mesh2 = scene.create_mesh(layer_id = \"Coords\")\n",
|
|
"mesh2.add_coordinate_axes(transform = sp.Transforms.Translate([0.0, 0.0, 0.0]))\n",
|
|
"\n",
|
|
"# Mesh 3 - example of Loop Subdivision on a cube\n",
|
|
"cube_verts = np.array([[-0.5, -0.5, -0.5], [+0.5, -0.5, -0.5], [-0.5, +0.5, -0.5], [+0.5, +0.5, -0.5], [-0.5, -0.5, +0.5], [+0.5, -0.5, +0.5], [-0.5, +0.5, +0.5], [+0.5, +0.5, +0.5]])\n",
|
|
"cube_tris = np.array([[0, 2, 3], [0, 3, 1], [1, 3, 7], [1, 7, 5], [4, 5, 7], [4, 7, 6], [4, 6, 2], [4, 2, 0], [2, 6, 7], [2, 7, 3], [4, 0, 1], [4, 1, 5]])\n",
|
|
"cube_verts_a, cube_tris_a = sp.LoopSubdivStencil(cube_tris, 2, False).apply(cube_verts) # Two steps of subdivision, no projection to limit surface. Stencils could be reused for efficiency for other meshes with same triangle topology.\n",
|
|
"cube_verts_b, cube_tris_b = sp.LoopSubdivStencil(cube_tris, 2, True).apply(cube_verts) # Two steps of subdivision, projection to limit surface. Stencils could be reused for efficiency for other meshes with same triangle topology.\n",
|
|
"mesh3 = scene.create_mesh(shared_color = sp.Color(1.0, 0.8, 0.8))\n",
|
|
"mesh3.add_mesh_without_normals(cube_verts, cube_tris, transform = sp.Transforms.Translate([-1.0, 0.0, 0.0])) # Add non-subdivided cube\n",
|
|
"mesh3.add_mesh_without_normals(cube_verts_a, cube_tris_a)\n",
|
|
"mesh3.add_mesh_without_normals(cube_verts_b, cube_tris_b, transform = sp.Transforms.Translate([+1.0, 0.0, 0.0]))\n",
|
|
"\n",
|
|
"# Mesh 4 - line example\n",
|
|
"mesh4 = scene.create_mesh()\n",
|
|
"Nsegs = 7000\n",
|
|
"positions = np.cumsum(np.random.rand(Nsegs, 3) * 0.2, axis = 0)\n",
|
|
"colored_points = np.concatenate((positions, np.random.rand(Nsegs, 3)), axis = 1)\n",
|
|
"mesh4.add_lines(colored_points[0:-1, :], colored_points[1:, :])\n",
|
|
"mesh4.add_camera_frustum(color = sp.Color(1.0,1.0,0.0))\n",
|
|
"\n",
|
|
"# Let's create two Canvases this time\n",
|
|
"canvas1 = scene.create_canvas_3d(width = 300, height = 300)\n",
|
|
"canvas2 = scene.create_canvas_3d(width = 300, height = 300)\n",
|
|
"\n",
|
|
"# We can link their keyboard/mouse/etc. input events to keep the views in sync\n",
|
|
"scene.link_canvas_events(canvas1, canvas2)\n",
|
|
"\n",
|
|
"# And we can specify that certain named \"mesh collections\" should have user-controlled visibility and opacity\n",
|
|
"# Meshs without mesh_collection set, or without specified visibilities will always be visible and opaque\n",
|
|
"canvas1.set_layer_settings({\"Coords\" : { \"opacity\" : 0 }, \"Sphere+\" : { \"opacity\" : 1 }})\n",
|
|
"\n",
|
|
"# A Frame contains an array of meshes\n",
|
|
"frame11 = canvas1.create_frame(meshes = [mesh1, mesh2]) # Note that Frames are created from the Canvas not the Scene\n",
|
|
"frame21 = canvas2.create_frame(meshes = [mesh2, mesh3])\n",
|
|
"frame22 = canvas2.create_frame(meshes = [mesh4, mesh1])\n",
|
|
"\n",
|
|
"# ScenePic has told Jupyter how to display scene objects\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 6 - Images and Textures\n",
|
|
"\n",
|
|
"# Scene is the top level container in ScenePic\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# Create and populate an Image object\n",
|
|
"image1 = scene.create_image(image_id = \"PolarBear\")\n",
|
|
"image1.load(asset_path(\"PolarBear.png\")) # This will preserve the image data in compressed PNG format\n",
|
|
"\n",
|
|
"# Create a texture map\n",
|
|
"texture = scene.create_image(image_id = \"texture\")\n",
|
|
"texture.load(asset_path(\"uv.png\")) # we can use this image to skin meshes\n",
|
|
"\n",
|
|
"# Example of a mesh that is defined in camera space not world space\n",
|
|
"# This will not move as the virtual camera is moved with the mouse\n",
|
|
"cam_space_mesh = scene.create_mesh(shared_color = sp.Color(1.0, 0.0, 0.0), camera_space = True)\n",
|
|
"cam_space_mesh.add_sphere(transform = np.dot(sp.Transforms.Translate([10, -10, -20.0]), sp.Transforms.Scale(1.0)))\n",
|
|
"\n",
|
|
"# Some textured primitives\n",
|
|
"sphere = scene.create_mesh(texture_id=texture.image_id, nn_texture = False)\n",
|
|
"sphere.add_icosphere(steps=4)\n",
|
|
"\n",
|
|
"cube = scene.create_mesh(texture_id=texture.image_id)\n",
|
|
"transform = sp.Transforms.translate([-1, 0, 0]) @ sp.Transforms.scale(0.5)\n",
|
|
"cube.add_cube(transform=transform)\n",
|
|
"\n",
|
|
"# Show images in 3D canvas\n",
|
|
"canvas = scene.create_canvas_3d(shading=sp.Shading(bg_color=sp.Colors.White))\n",
|
|
"mesh1 = scene.create_mesh(texture_id = \"PolarBear\")\n",
|
|
"mesh1.add_image() # Adds image in canonical position\n",
|
|
"\n",
|
|
"# Add an animation that rigidly transforms each image\n",
|
|
"n_frames = 20\n",
|
|
"for i in range(n_frames):\n",
|
|
" angle = 2 * math.pi * i / n_frames\n",
|
|
" c, s = math.cos(angle), math.sin(angle)\n",
|
|
" \n",
|
|
" # Create a focus point that allows you to \"lock\" the camera's translation and optionally orientation by pressing the \"l\" key\n",
|
|
" axis = np.array([1.0, 0.0, 1.0])\n",
|
|
" axis /= np.linalg.norm(axis)\n",
|
|
" focus_point = sp.FocusPoint([c,s,0], orientation_axis_angle = axis * angle)\n",
|
|
" \n",
|
|
" mesh = scene.create_mesh()\n",
|
|
" mesh.add_coordinate_axes(transform = np.dot(sp.Transforms.Translate(focus_point.position), sp.Transforms.RotationMatrixFromAxisAngle(axis, angle)))\n",
|
|
" \n",
|
|
" im_size = 15\n",
|
|
" im_data = np.random.rand(im_size, im_size, 4)\n",
|
|
" im_data[:,:,3] = 0.5 + 0.5 * im_data[:,:,3]\n",
|
|
" \n",
|
|
" imageB = scene.create_image()\n",
|
|
" imageB.from_numpy(im_data) # Converts data to PNG format\n",
|
|
" meshB = scene.create_mesh(texture_id = imageB, is_billboard = True, use_texture_alpha=True)\n",
|
|
" meshB.add_image(transform = np.dot(sp.Transforms.Scale(2.0), sp.Transforms.Translate([0,0,-1])))\n",
|
|
" \n",
|
|
" frame = canvas.create_frame(focus_point = focus_point)\n",
|
|
" frame.add_mesh(mesh1, transform = sp.Transforms.Translate([c,s,0]))\n",
|
|
" frame.add_mesh(meshB, transform = np.dot(sp.Transforms.Scale(i * 1.0 / n_frames), sp.Transforms.Translate([-c,-s,0])))\n",
|
|
" frame.add_mesh(cam_space_mesh)\n",
|
|
" frame.add_mesh(sphere, transform=sp.Transforms.rotation_about_y(np.pi * 2 * i / n_frames))\n",
|
|
" frame.add_mesh(cube, transform=sp.Transforms.rotation_about_y(-np.pi * 2 * i / n_frames))\n",
|
|
" frame.add_mesh(mesh)\n",
|
|
" \n",
|
|
"# Show Scene\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 7 - 2D canvases\n",
|
|
"# Scene is the top level container in ScenePic\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# Load an image\n",
|
|
"image1 = scene.create_image(image_id = \"PolarBear\")\n",
|
|
"image1.load(asset_path(\"PolarBear.png\")) # This will preserve the image data in compressed PNG format\n",
|
|
"\n",
|
|
"# Create and populate an Image object\n",
|
|
"image2 = scene.create_image(image_id = \"Random\")\n",
|
|
"image2.from_numpy(np.random.rand(20, 30, 3) * 128 / 255.0) # Converts data to PNG format\n",
|
|
"\n",
|
|
"# Create a 2D canvas demonstrating different image positioning options\n",
|
|
"canvas1 = scene.create_canvas_2d(width = 400, height = 300, background_color = sp.Colors.White)\n",
|
|
"canvas1.create_frame().add_image(image1, \"fit\")\n",
|
|
"canvas1.create_frame().add_image(image1, \"fill\")\n",
|
|
"canvas1.create_frame().add_image(image1, \"stretch\")\n",
|
|
"canvas1.create_frame().add_image(image1, \"manual\", x = 50, y= 50, scale = 0.3)\n",
|
|
"\n",
|
|
"# You can composite images and primitives too\n",
|
|
"canvas2 = scene.create_canvas_2d(width = 300, height = 300)\n",
|
|
"f = canvas2.create_frame()\n",
|
|
"f.add_image(image2, \"fit\")\n",
|
|
"f.add_image(image1, \"manual\", x = 30, y= 30, scale = 0.2)\n",
|
|
"f.add_circle(200, 200, 40, fill_color = sp.Colors.Black, line_width = 10, line_color = sp.Colors.Blue)\n",
|
|
"f.add_rectangle(200, 100, 50, 25, fill_color = sp.Colors.Green, line_width = 0)\n",
|
|
"f.add_text(\"Hello World\", 30, 100, sp.Colors.White, 100, \"segoe ui light\")\n",
|
|
"\n",
|
|
"scene.framerate = 2\n",
|
|
"\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 8 - a mix of transparent and opaque objects, with labels\n",
|
|
"np.random.seed(55)\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"canvas = scene.create_canvas_3d(width = 700, height = 700)\n",
|
|
"frame = canvas.create_frame()\n",
|
|
"\n",
|
|
"# Create a mesh that we'll turn in to a point-cloud using enable_instancing()\n",
|
|
"layer_settings = { \"Labels\" : { \"opacity\" : 1.0 }}\n",
|
|
"N = 20\n",
|
|
"for i in range(N):\n",
|
|
" # Sample object\n",
|
|
" geotype = np.random.randint(2)\n",
|
|
" color = np.random.rand(3)\n",
|
|
" size = 0.3 * np.random.rand() + 0.2\n",
|
|
" position = 3.0 * np.random.rand(3) - 1.5\n",
|
|
" opacity = 1.0 if np.random.randint(2) == 0 else np.random.uniform(0.45, 0.55)\n",
|
|
" \n",
|
|
" # Generate geometry\n",
|
|
" layer_id = \"Layer\" + str(i)\n",
|
|
" mesh = scene.create_mesh(shared_color = color, layer_id = layer_id)\n",
|
|
" layer_settings[layer_id] = { \"opacity\" : opacity }\n",
|
|
" if geotype == 0:\n",
|
|
" mesh.add_cube()\n",
|
|
" elif geotype == 1:\n",
|
|
" mesh.add_sphere()\n",
|
|
" mesh.apply_transform(sp.Transforms.Scale(size)) # Scale the primitive\n",
|
|
" mesh.apply_transform(sp.Transforms.Translate(position))\n",
|
|
" frame.add_mesh(mesh)\n",
|
|
" \n",
|
|
" # Add label\n",
|
|
" text = \"{0:0.2f} {1:0.2f} {2:0.2f} {3:0.2f}\".format(color[0], color[1], color[2], opacity)\n",
|
|
" horizontal_align = [\"left\", \"center\", \"right\"][np.random.randint(3)]\n",
|
|
" vertical_align = [\"top\", \"middle\", \"bottom\"][np.random.randint(3)]\n",
|
|
" if geotype == 0:\n",
|
|
" if horizontal_align != \"center\" and vertical_align != \"middle\":\n",
|
|
" offset_distance = size * 0.7\n",
|
|
" else:\n",
|
|
" offset_distance = size * 0.9\n",
|
|
" else:\n",
|
|
" if horizontal_align != \"center\" and vertical_align != \"middle\":\n",
|
|
" offset_distance = size * 0.5 * 0.8\n",
|
|
" else:\n",
|
|
" offset_distance = size * 0.6\n",
|
|
" label = scene.create_label(text = text, color = color, layer_id = \"Labels\", font_family = \"consolas\", size_in_pixels = 80 * size, offset_distance = offset_distance, vertical_align = vertical_align, horizontal_align = horizontal_align)\n",
|
|
" frame.add_label(label = label, position = position) \n",
|
|
"\n",
|
|
"canvas.set_layer_settings(layer_settings)\n",
|
|
" \n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 9 - mesh animation\n",
|
|
"\n",
|
|
"# let's create our mesh to get started\n",
|
|
"scene = sp.Scene()\n",
|
|
"canvas = scene.create_canvas_3d(width=700, height=700)\n",
|
|
"\n",
|
|
"# Load a mesh to animate\n",
|
|
"jelly_mesh = sp.load_obj(asset_path(\"jelly.obj\"))\n",
|
|
"texture = scene.create_image(\"texture\")\n",
|
|
"texture.load(asset_path(\"jelly.png\"))\n",
|
|
"\n",
|
|
"# create a base mesh for the animation. The animation\n",
|
|
"# will only change the vertex positions, so this mesh\n",
|
|
"# is used to set everything else, e.g. textures.\n",
|
|
"base_mesh = scene.create_mesh(\"jelly_base\")\n",
|
|
"base_mesh.texture_id = texture.image_id\n",
|
|
"base_mesh.use_texture_alpha = True\n",
|
|
"base_mesh.add_mesh(jelly_mesh)\n",
|
|
"\n",
|
|
"def random_linspace(min_val, max_val, num_samples):\n",
|
|
" vals = np.linspace(min_val, max_val, num_samples)\n",
|
|
" np.random.shuffle(vals)\n",
|
|
" return vals \n",
|
|
"\n",
|
|
"# this base mesh will be instanced, so we can animate each\n",
|
|
"# instance individual using rigid transforms, in this case\n",
|
|
"# just translation.\n",
|
|
"marbles = scene.create_mesh(\"marbles_base\")\n",
|
|
"num_marbles = 10\n",
|
|
"marbles.add_sphere(sp.Colors.White, transform=sp.Transforms.Scale(0.2))\n",
|
|
"marble_positions = np.zeros((num_marbles, 3), np.float32)\n",
|
|
"marble_positions[:, 0] = random_linspace(-0.6, 0.6, num_marbles)\n",
|
|
"marble_positions[:, 2] = random_linspace(-1, 0.7, num_marbles)\n",
|
|
"marble_offsets = np.random.uniform(0, 2*np.pi, size=num_marbles).astype(np.float32)\n",
|
|
"marble_colors_start = np.random.uniform(0, 1, size=(num_marbles, 3)).astype(np.float32)\n",
|
|
"marble_colors_end = np.random.uniform(0, 1, size=(num_marbles, 3)).astype(np.float32)\n",
|
|
"marbles.enable_instancing(marble_positions, colors=marble_colors_start)\n",
|
|
"\n",
|
|
"for i in range(60):\n",
|
|
" # animate the wave mesh by updating the vertex positions\n",
|
|
" positions = jelly_mesh.positions.copy()\n",
|
|
" delta_x = (positions[:, 0] + 0.0838 * i) * 10\n",
|
|
" delta_z = (positions[:, 2] + 0.0419 * i) * 10\n",
|
|
" positions[:, 1] = positions[:, 1] + 0.1 * (np.cos(delta_x) + np.sin(delta_z))\n",
|
|
" \n",
|
|
" # we create a mesh update with the new posiitons. We can use this mesh update\n",
|
|
" # just like a new mesh, because it essentially is one, as ScenePic will create\n",
|
|
" # a new mesh from the old one using these new positions.\n",
|
|
" jelly_update = scene.update_mesh_positions(\"jelly_base\", positions)\n",
|
|
" frame = canvas.create_frame(meshes=[jelly_update])\n",
|
|
"\n",
|
|
" # this is a simpler form of animation in which we will change the position\n",
|
|
" # and colors of the marbles\n",
|
|
" marble_y = np.sin(0.105 * i + marble_offsets)\n",
|
|
" positions = np.stack([marble_positions[:, 0], marble_y, marble_positions[:, 2]], -1)\n",
|
|
" alpha = ((np.sin(marble_y) + 1) * 0.5).reshape(-1, 1)\n",
|
|
" beta = 1 - alpha\n",
|
|
" colors = alpha * marble_colors_start + beta * marble_colors_end\n",
|
|
" marbles_update = scene.update_instanced_mesh(\"marbles_base\", positions, colors=colors)\n",
|
|
" frame.add_mesh(marbles_update)\n",
|
|
"\n",
|
|
"scene.quantize_updates()\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 10 - Instanced Animation\n",
|
|
"\n",
|
|
"# In this tutorial we will explore how we can use mesh updates on\n",
|
|
"# instanced meshes as well. We will begin by creating a simple primitive\n",
|
|
"# and use instancing to create a cloud of stylized butterflies. We will\n",
|
|
"# then using mesh updates on the instances to make the butterflies\n",
|
|
"# fly.\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"butterflies = scene.create_mesh(\"butterflies\", double_sided=True)\n",
|
|
"# the primitive will be a single wing, and we'll use instancing to create\n",
|
|
"# all the butterflies\n",
|
|
"butterflies.add_quad(sp.Colors.Blue, [0, 0, 0], [0.1, 0, 0.04], [0.08, 0, -0.06], [0.015, 0, -0.03])\n",
|
|
"\n",
|
|
"rotate_back = sp.Transforms.quaternion_from_axis_angle([1, 0, 0], -np.pi / 6)\n",
|
|
"\n",
|
|
"num_butterflies = 100\n",
|
|
"num_anim_frames = 20\n",
|
|
"\n",
|
|
"# this will make them flap their wings independently\n",
|
|
"start_frames = np.random.randint(0, num_anim_frames, num_butterflies)\n",
|
|
"rot_angles = np.random.uniform(-1, 1, num_butterflies)\n",
|
|
"\n",
|
|
"rotations = np.zeros((num_butterflies * 2, 4), np.float32)\n",
|
|
"positions = np.random.uniform(-1, 1, (num_butterflies * 2, 3))\n",
|
|
"colors = np.random.random((num_butterflies * 2, 3))\n",
|
|
"\n",
|
|
"for b, angle in enumerate(rot_angles):\n",
|
|
" rot = sp.Transforms.quaternion_from_axis_angle([0, 1, 0], angle)\n",
|
|
" rotations[2 * b] = rotations[2 * b + 1] = rot\n",
|
|
" \n",
|
|
" # we will use the second position per butterfly as a destination\n",
|
|
" dx = np.sin(angle) * 0.1\n",
|
|
" dy = positions[2 * b + 1, 1] - positions[2 * b, 1]\n",
|
|
" dy = np.sign(angle) * min(abs(angle), 0.1)\n",
|
|
" dz = np.cos(angle) * 0.1\n",
|
|
" positions[2 * b + 1] = positions[2 * b] + [dx, dy, dz]\n",
|
|
"\n",
|
|
"butterflies.enable_instancing(positions, rotations, colors)\n",
|
|
"\n",
|
|
"canvas = scene.create_canvas_3d(\"main\", 700, 700)\n",
|
|
"canvas.shading = sp.Shading(sp.Colors.White)\n",
|
|
"\n",
|
|
"start = -np.pi / 6\n",
|
|
"end = np.pi / 2\n",
|
|
"delta = (end - start) / (num_anim_frames // 2 - 1)\n",
|
|
"\n",
|
|
"# let's construct the animation frame by frame\n",
|
|
"animation = []\n",
|
|
"for i in range(num_anim_frames):\n",
|
|
" frame_positions = np.zeros_like(positions)\n",
|
|
" frame_rotations = np.zeros_like(rotations)\n",
|
|
" frame_colors = np.zeros_like(colors)\n",
|
|
" \n",
|
|
" for b, start_frame in enumerate(start_frames):\n",
|
|
" frame = (i + start_frame) % num_anim_frames\n",
|
|
" if frame < num_anim_frames // 2:\n",
|
|
" angle = start + delta * frame\n",
|
|
" else:\n",
|
|
" angle = end + delta * (frame - num_anim_frames // 2)\n",
|
|
" \n",
|
|
" right = sp.Transforms.quaternion_from_axis_angle([0, 0, 1], angle)\n",
|
|
" right = sp.Transforms.quaternion_multiply(rotate_back, right)\n",
|
|
" right = sp.Transforms.quaternion_multiply(rotations[2 * b], right)\n",
|
|
" left = sp.Transforms.quaternion_from_axis_angle([0, 0, 1], np.pi - angle)\n",
|
|
" left = sp.Transforms.quaternion_multiply(rotate_back, left)\n",
|
|
" left = sp.Transforms.quaternion_multiply(rotations[2 * b + 1], left)\n",
|
|
" frame_rotations[2 * b] = right\n",
|
|
" frame_rotations[2 * b + 1] = left\n",
|
|
" \n",
|
|
" progress = np.sin((frame * 2 * np.pi) / num_anim_frames)\n",
|
|
" progress = (progress + 1) * 0.5\n",
|
|
" \n",
|
|
" # we move the butterfly along its path\n",
|
|
" pos = (1 - progress) * positions[2 * b] + progress * positions[2 * b + 1]\n",
|
|
" pos[1] -= np.sin(angle) * 0.02\n",
|
|
" frame_positions[2 * b : 2 * b + 2, :] = pos\n",
|
|
" \n",
|
|
" # finally we alter the color\n",
|
|
" color = (1 - progress) * colors[2 * b] + progress * colors[2 * b + 1]\n",
|
|
" frame_colors[2 * b : 2 * b + 2, :] = color\n",
|
|
" \n",
|
|
" # now we create the update. Here we update position, rotation,\n",
|
|
" # and color, but you can update them separately as well by passing\n",
|
|
" # the `*None()` versions of the buffers to this function. \n",
|
|
" update = scene.update_instanced_mesh(\"butterflies\", frame_positions, frame_rotations, frame_colors)\n",
|
|
" animation.append(update)\n",
|
|
"\n",
|
|
"# now we create the encapsulating animation which will move the camera\n",
|
|
"# around the butterflies. The inner animation will loop as the camera moves.\n",
|
|
"num_frames = 300\n",
|
|
"cameras = sp.Camera.orbit(num_frames, 3, 2)\n",
|
|
"for i, camera in enumerate(cameras):\n",
|
|
" frame = canvas.create_frame()\n",
|
|
" frame.add_mesh(animation[i % num_anim_frames])\n",
|
|
" frame.camera = camera\n",
|
|
"\n",
|
|
"scene "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 11 - camera movement\n",
|
|
"\n",
|
|
"# in this tutorial we will show how to create per-frame camera movement.\n",
|
|
"# while the user can always choose to override this behavior, having a\n",
|
|
"# camera track specified can be helpful for demonstrating particular\n",
|
|
"# items in 3D. We will also show off the flexible Camera class.\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"spin_canvas = scene.create_canvas_3d(\"spin\")\n",
|
|
"spiral_canvas = scene.create_canvas_3d(\"spiral\")\n",
|
|
"\n",
|
|
"# let's create some items in the scene so we have a frame of reference\n",
|
|
"polar_bear = scene.create_image(image_id=\"polar_bear\")\n",
|
|
"polar_bear.load(asset_path(\"PolarBear.png\"))\n",
|
|
"uv_texture = scene.create_image(image_id = \"texture\")\n",
|
|
"uv_texture.load(asset_path(\"uv.png\"))\n",
|
|
"\n",
|
|
"cube = scene.create_mesh(\"cube\", texture_id=polar_bear.image_id)\n",
|
|
"cube.add_cube()\n",
|
|
"sphere = scene.create_mesh(\"sphere\", texture_id=uv_texture.image_id)\n",
|
|
"sphere.add_icosphere(steps=4, transform=sp.Transforms.translate([0, 1, 0]))\n",
|
|
"\n",
|
|
"num_frames = 60\n",
|
|
"for i in range(num_frames):\n",
|
|
" angle = i*np.pi*2/num_frames\n",
|
|
"\n",
|
|
" # for the first camera we will spin in place on the Z axis\n",
|
|
" rotation = sp.Transforms.rotation_about_z(angle)\n",
|
|
" spin_camera = sp.Camera(center=[0, 0, 4], rotation=rotation, fov_y_degrees=30.0)\n",
|
|
"\n",
|
|
" # for the second camera, we will spin the camera in a spiral around the scene\n",
|
|
" # we can do this using the look-at initialization, which provides a straightforward\n",
|
|
" # \"look at\" interface for camera placement.\n",
|
|
" camera_center = [4*np.cos(angle), i*4/num_frames - 2, 4*np.sin(angle)]\n",
|
|
" spiral_camera = sp.Camera(camera_center, look_at=[0, 0.5, 0])\n",
|
|
"\n",
|
|
" # we can add frustums directly using the ScenePic camera objects\n",
|
|
" frustums = scene.create_mesh()\n",
|
|
" frustums.add_camera_frustum(spin_camera, sp.Colors.Red)\n",
|
|
" frustums.add_camera_frustum(spiral_camera, sp.Colors.Green)\n",
|
|
"\n",
|
|
" spin_frame = spin_canvas.create_frame()\n",
|
|
" spin_frame.camera = spin_camera # each frame can have its own camera object\n",
|
|
" spin_frame.add_meshes([cube, sphere, frustums])\n",
|
|
" \n",
|
|
" spiral_frame = spiral_canvas.create_frame()\n",
|
|
" spiral_frame.camera = spiral_camera\n",
|
|
" spiral_frame.add_meshes([cube, sphere, frustums])\n",
|
|
"\n",
|
|
"scene.link_canvas_events(spin_canvas, spiral_canvas)\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 12 - audio tracks\n",
|
|
"\n",
|
|
"# in this tutorial we'll show how to attach audio tracks to canvases. ScenePic\n",
|
|
"# supports any audio file format supported by the browser.\n",
|
|
"\n",
|
|
"def _set_audio(scene, canvas, path):\n",
|
|
" audio = scene.create_audio()\n",
|
|
" audio.load(path)\n",
|
|
" canvas.media_id = audio.audio_id\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"names = [\"red\", \"green\", \"blue\"]\n",
|
|
"colors = [sp.Colors.Red, sp.Colors.Green, sp.Colors.Blue]\n",
|
|
"frequencies = [0, 1, 0.5]\n",
|
|
"\n",
|
|
"graph = scene.create_graph(\"graph\", width=900, height=150)\n",
|
|
"for name, color, frequency in zip(names, colors, frequencies):\n",
|
|
" mesh = scene.create_mesh()\n",
|
|
" mesh.add_cube(color)\n",
|
|
" canvas = scene.create_canvas_3d(name, width=300, height=300)\n",
|
|
" _set_audio(scene, canvas, asset_path(name + \".ogg\"))\n",
|
|
" values = []\n",
|
|
"\n",
|
|
" for j in range(60):\n",
|
|
" frame = canvas.create_frame()\n",
|
|
" scale = math.sin(j * 2 * math.pi * frequency / 30)\n",
|
|
" frame.add_mesh(mesh, sp.Transforms.scale((scale + 1) / 2 + 0.5))\n",
|
|
" values.append(scale)\n",
|
|
"\n",
|
|
" graph.add_sparkline(name, values, color)\n",
|
|
" graph.media_id = canvas.media_id\n",
|
|
"\n",
|
|
"names.append(\"graph\")\n",
|
|
"scene.grid(\"600px\", \"1fr auto\", \"1fr 1fr 1fr\")\n",
|
|
"scene.place(\"graph\", \"2\", \"1 / span 3\")\n",
|
|
"scene.link_canvas_events(*names)\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 13 - video\n",
|
|
"\n",
|
|
"# It is also possible to attach videos to ScenePic scenes. Once attached, you can draw the\n",
|
|
"# frames of those videos to canvases in the same way as images, and can draw the same\n",
|
|
"# video to multiple frames. Once a media file (video or audio) has been attached to a\n",
|
|
"# canvas, that file will be used to drive playback. In practical terms, this means that\n",
|
|
"# ScenePic will display frames such that they line up with the timestamps of the video\n",
|
|
"# working on the assumption that ScenePic frames are displayed at the framerate of the video.\n",
|
|
"\n",
|
|
"\n",
|
|
"def _angle_to_pos(angle, radius):\n",
|
|
" return np.cos(angle) * radius + 200, np.sin(angle) * radius + 200\n",
|
|
"\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"video = scene.create_video()\n",
|
|
"video.load(asset_path(\"circles.mp4\"))\n",
|
|
"\n",
|
|
"tracking = scene.create_canvas_2d(\"tracking\", background_color=sp.Colors.White)\n",
|
|
"tracking.media_id = video.video_id\n",
|
|
"\n",
|
|
"multi = scene.create_canvas_2d(\"multi\", background_color=sp.Colors.White)\n",
|
|
"multi.media_id = video.video_id\n",
|
|
"\n",
|
|
"angles = np.linspace(0, 2 * np.pi, 360, endpoint=False)\n",
|
|
"for angle in angles:\n",
|
|
" # if a 2D canvas has an associated video\n",
|
|
" # then a frame of that video can be added\n",
|
|
" # via the add_video method.\n",
|
|
" frame = tracking.create_frame()\n",
|
|
" frame.add_video(layer_id=\"video\")\n",
|
|
"\n",
|
|
" red_pos = _angle_to_pos(angle, 160)\n",
|
|
" frame.add_rectangle(red_pos[0] - 11, red_pos[1] - 11, 22, 22, [255, 0, 0], 2, layer_id=\"rect\")\n",
|
|
" frame.add_circle(red_pos[0], red_pos[1], 10, fill_color=[255, 0, 0], layer_id=\"dot\")\n",
|
|
"\n",
|
|
" green_pos = _angle_to_pos(-2*angle, 80)\n",
|
|
" frame.add_rectangle(green_pos[0] - 11, green_pos[1] - 11, 22, 22, [0, 255, 0], 2, layer_id=\"rect\")\n",
|
|
" frame.add_circle(green_pos[0], green_pos[1], 10, fill_color=[0, 255, 0], layer_id=\"dot\")\n",
|
|
"\n",
|
|
" blue_pos = _angle_to_pos(4*angle, 40)\n",
|
|
" frame.add_rectangle(blue_pos[0] - 11, blue_pos[1] - 11, 22, 22, [0, 0, 255], 2, layer_id=\"rect\")\n",
|
|
" frame.add_circle(blue_pos[0], blue_pos[1], 10, fill_color=[0, 0, 255], layer_id=\"dot\")\n",
|
|
"\n",
|
|
" frame = multi.create_frame()\n",
|
|
" frame.add_video(\"manual\", red_pos[0] - 40, red_pos[1] - 40, 0.2, layer_id=\"red\")\n",
|
|
" frame.add_video(\"manual\", green_pos[0] - 25, green_pos[1] - 25, 0.125, layer_id=\"green\")\n",
|
|
" frame.add_video(\"manual\", 160, 160, 0.2, layer_id=\"blue\")\n",
|
|
"\n",
|
|
"tracking.set_layer_settings({\n",
|
|
" \"rect\": {\"render_order\": 0},\n",
|
|
" \"video\": {\"render_order\": 1},\n",
|
|
" \"dot\": {\"render_order\": 2}\n",
|
|
"})\n",
|
|
"\n",
|
|
"scene.link_canvas_events(\"tracking\", \"multi\")\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 14 - Multiview Visualization\n",
|
|
"\n",
|
|
"# One common and useful scenario for ScenePic is to visualize the result of multiview 3D reconstruction.\n",
|
|
"# In this tutorial we'll show how to load some geometry, assocaited camera calibration\n",
|
|
"# information, and images to create a visualization depicting the results.\n",
|
|
"\n",
|
|
"def _load_camera(camera_info):\n",
|
|
" # this function loads an \"OpenCV\"-style camera representation\n",
|
|
" # and converts it to a GL style for use in ScenePic\n",
|
|
" location = np.array(camera_info[\"location\"], np.float32)\n",
|
|
" euler_angles = np.array(camera_info[\"rotation\"], np.float32)\n",
|
|
" rotation = sp.Transforms.euler_angles_to_matrix(euler_angles, \"XYZ\")\n",
|
|
" translation = sp.Transforms.translate(location)\n",
|
|
" extrinsics = translation @ rotation\n",
|
|
" world_to_camera = sp.Transforms.gl_world_to_camera(extrinsics)\n",
|
|
" aspect_ratio = camera_info[\"width\"] / camera_info[\"height\"]\n",
|
|
" projection = sp.Transforms.gl_projection(camera_info[\"fov\"], aspect_ratio, 0.01, 100)\n",
|
|
"\n",
|
|
" return sp.Camera(world_to_camera, projection)\n",
|
|
"\n",
|
|
"\n",
|
|
"def _load_cameras():\n",
|
|
" with open(asset_path(\"cameras.json\")) as file:\n",
|
|
" cameras = json.load(file)\n",
|
|
" return [_load_camera(cameras[key])\n",
|
|
" for key in cameras]\n",
|
|
"\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# load the fitted cameras\n",
|
|
"cameras = _load_cameras()\n",
|
|
"\n",
|
|
"# this textured cube will stand in for a reconstructed mesh\n",
|
|
"texture = scene.create_image(\"texture\")\n",
|
|
"texture.load(asset_path(\"PolarBear.png\"))\n",
|
|
"cube = scene.create_mesh(\"cube\")\n",
|
|
"cube.texture_id = texture.image_id\n",
|
|
"cube.add_cube(transform=sp.Transforms.scale(2))\n",
|
|
"\n",
|
|
"# construct all of the frustums\n",
|
|
"# and camera images\n",
|
|
"frustums = scene.create_mesh(\"frustums\", layer_id=\"frustums\")\n",
|
|
"colors = [sp.Colors.Red, sp.Colors.Green, sp.Colors.Blue]\n",
|
|
"paths = [asset_path(name) for name in [\"render0.png\", \"render1.png\", \"render2.png\"]]\n",
|
|
"camera_images = []\n",
|
|
"images = []\n",
|
|
"\n",
|
|
"for i, (color, path, camera) in enumerate(zip(colors, paths, cameras)):\n",
|
|
" image = scene.create_image(path)\n",
|
|
" image.load(path)\n",
|
|
" frustums.add_camera_frustum(camera, color)\n",
|
|
" \n",
|
|
" image_mesh = scene.create_mesh(\"image{}\".format(i),\n",
|
|
" layer_id=\"images\",\n",
|
|
" shared_color=sp.Colors.Gray,\n",
|
|
" double_sided=True,\n",
|
|
" texture_id=image.image_id)\n",
|
|
" image_mesh.add_camera_image(camera)\n",
|
|
" \n",
|
|
" images.append(image)\n",
|
|
" camera_images.append(image_mesh)\n",
|
|
"\n",
|
|
"# create one canvas for each camera to show the scene from\n",
|
|
"# that camera's viewpoint\n",
|
|
"width = 640\n",
|
|
"for i, camera in enumerate(cameras):\n",
|
|
" height = width / camera.aspect_ratio\n",
|
|
" canvas = scene.create_canvas_3d(\"hand{}\".format(i), width, height, camera=camera)\n",
|
|
" frame = canvas.create_frame()\n",
|
|
" frame.add_mesh(cube)\n",
|
|
" frame.add_mesh(frustums)\n",
|
|
" frame.camera = camera\n",
|
|
" for cam_mesh in camera_images:\n",
|
|
" frame.add_mesh(cam_mesh)\n",
|
|
"\n",
|
|
"scene\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tutorial 15 - Frame Layer Settings\n",
|
|
"\n",
|
|
"\n",
|
|
"# It is possible to use the per-frame layer settings to automatically\n",
|
|
"# change various layer properties, for example to fade meshes in and\n",
|
|
"# out of view. The user can still override this manually using the\n",
|
|
"# controls, of course, but this feature can help guide the user through\n",
|
|
"# more complex animations.\n",
|
|
"\n",
|
|
"scene = sp.Scene()\n",
|
|
"\n",
|
|
"# In this tutorial we will fade out one mesh (the cube) and fade\n",
|
|
"# another in (the sphere).\n",
|
|
"\n",
|
|
"cube = scene.create_mesh(layer_id=\"cube\")\n",
|
|
"cube.add_cube(sp.Colors.Green)\n",
|
|
"\n",
|
|
"sphere = scene.create_mesh(layer_id=\"sphere\")\n",
|
|
"sphere.add_sphere(sp.Colors.Red)\n",
|
|
"\n",
|
|
"canvas = scene.create_canvas_3d()\n",
|
|
"for i in range(60):\n",
|
|
" sphere_opacity = i / 59\n",
|
|
" cube_opacity = 1 - sphere_opacity\n",
|
|
" frame = canvas.create_frame()\n",
|
|
" frame.add_mesh(cube)\n",
|
|
" frame.add_mesh(sphere)\n",
|
|
" # the interface here is the same as with how layer settings\n",
|
|
" # usually works at the canvas level.\n",
|
|
" frame.set_layer_settings({\n",
|
|
" \"cube\": {\"opacity\": cube_opacity},\n",
|
|
" \"sphere\": {\"opacity\": sphere_opacity}\n",
|
|
" })\n",
|
|
"\n",
|
|
"scene"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.7"
|
|
},
|
|
"vscode": {
|
|
"interpreter": {
|
|
"hash": "4e4cd08459ad7e57ab24f65628a4d7a7e3c34a2f94f0316bf5d977822d8a3af0"
|
|
}
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|