anomaly-detection-material-parameters-calibration

Sionna param calibration (research proj)
git clone https://git.ea.contact/anomaly-detection-material-parameters-calibration
Log | Files | Refs | README

renderer.py (15725B)


      1 #
      2 # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
      3 # SPDX-License-Identifier: Apache-2.0
      4 #
      5 """
      6 Ray tracing-based renderer.
      7 """
      8 
      9 import drjit as dr
     10 import matplotlib
     11 import mitsuba as mi
     12 import numpy as np
     13 import tensorflow as tf
     14 
     15 from .camera import Camera
     16 from .utils import paths_to_segments, scene_scale, mitsuba_rectangle_to_world
     17 
     18 
     19 def render(scene, camera, paths, show_paths, show_devices, num_samples,
     20            resolution, fov,
     21            coverage_map=None, cm_tx=None, cm_db_scale=True,
     22            cm_vmin=None, cm_vmax=None, cm_metric="path_gain"):
     23     r"""
     24     Renders two images with path tracing:
     25     1. Base scene with the meshes
     26     2. Paths, radio devices and coverage map,
     27     then composites them together.
     28 
     29     We adopt this approach because as of the time of writing, Mitsuba
     30     does not support adding or removing objects from a scene after
     31     it has been loaded.
     32 
     33     Input
     34     ------
     35     camera : str | :class:`~sionna.rt.Camera`
     36         The name or instance of a :class:`~sionna.rt.Camera`
     37 
     38     paths : :class:`~sionna.rt.Paths` | `None`
     39         Simulated paths generated by
     40         :meth:`~sionna.rt.Scene.compute_paths()` or `None`.
     41         If `None`, only the scene is rendered.
     42 
     43     show_paths : bool
     44         If `paths` is not `None`, shows the paths.
     45 
     46     show_devices : bool
     47         If `paths` is not `None`, shows the radio devices.
     48 
     49     coverage_map : :class:`~sionna.rt.CoverageMap` | `None`
     50         An optional coverage map to overlay in the scene for visualization.
     51         Defaults to `None`.
     52 
     53     cm_tx : int | str | None
     54         When `coverage_map` is specified, controls which of the transmitters
     55         to display the coverage map for. Either the transmitter's name
     56         or index can be given. If `None`, the maximum metric over all
     57         transmitters is shown.
     58         Defaults to `None`.
     59 
     60     cm_db_scale: bool
     61         Use logarithmic scale for coverage map visualization, i.e. the
     62         coverage values are mapped with:
     63         :math:`y = 10 \cdot \log_{10}(x)`.
     64         Defaults to `True`.
     65 
     66     cm_vmin, cm_vmax: floot | None
     67         For coverage map visualization, defines the range of path gains that
     68         the colormap covers.
     69         If set to None, then covers the complete range.
     70         Defaults to `None`.
     71 
     72     cm_metric : str, one of ["path_gain", "rss", "sinr"]
     73         Metric of the coverage map to be displayed.
     74         Defaults to `path_gain`.
     75 
     76     num_samples : int
     77         Number of rays thrown per pixel.
     78 
     79     resolution : [2], int
     80         Size of the rendered figure.
     81 
     82     fov : float
     83         Field of view, in degrees.
     84 
     85     Output
     86     -------
     87     : :class:`~mitsuba.Bitmap`
     88         Rendered image
     89     """
     90     s1 = scene.mi_scene
     91     sensor = make_render_sensor(scene, camera, resolution=resolution, fov=fov)
     92 
     93     integrator = mi.load_dict({
     94         'type': 'path',
     95         'max_depth': 8,
     96         'hide_emitters': True,
     97     })
     98     img1 = mi.render(s1, sensor=sensor, integrator=integrator, spp=num_samples)
     99     img1 = img1.numpy()
    100 
    101     needs_compositing = (
    102         (show_paths and paths is not None)
    103         or (coverage_map is not None)
    104         or show_devices
    105     )
    106     if needs_compositing:
    107         if coverage_map is not None:
    108             coverage_map = _coverage_map_to_textured_rectangle(
    109                 coverage_map, tx=cm_tx, db_scale=cm_db_scale,
    110                 vmin=cm_vmin, vmax=cm_vmax, cm_metric=cm_metric,
    111                 viewpoint=sensor.world_transform().translation())
    112 
    113         s2 = results_to_mitsuba_scene(scene, paths=paths,
    114                                       show_paths=show_paths,
    115                                       show_devices=show_devices,
    116                                       coverage_map=coverage_map)
    117         depth_integrator = mi.load_dict({
    118             'type': 'depth'
    119         })
    120 
    121         depth1 = mi.render(s1, sensor=sensor, integrator=depth_integrator,
    122                            spp=num_samples)
    123         depth1 = unmultiply_alpha(depth1.numpy())
    124 
    125         img2 = mi.render(s2, sensor=sensor, integrator=integrator,
    126                          spp=num_samples)
    127         img2 = img2.numpy()
    128         depth2 = mi.render(s2, sensor=sensor, integrator=depth_integrator,
    129                            spp=num_samples)
    130         depth2 = unmultiply_alpha(depth2.numpy())
    131 
    132         # Alpha compositing using the renderings (stored as pre-multiplied
    133         # alpha)
    134         alpha1 = img1[:, :, 3]
    135         alpha2 = img2[:, :, 3]
    136         composite = img2 + img1 * (1 - alpha2[:, :, None])
    137 
    138         # Use the composite only in unoccluded regions (based on depth info)
    139         # TODO: can probably do a nicer transition based on depth values
    140         img3 = np.where(
    141             (alpha1[:, :, None] > 0) & (depth1 < depth2),
    142             img1,
    143             composite
    144         )
    145         img3[:, :, 3] = np.maximum(img1[:, :, 3], composite[:, :, 3])
    146 
    147     else:
    148         # No need for any compositing, we just have to show the scene as-is
    149         img3 = img1
    150 
    151     return mi.Bitmap(img3)
    152 
    153 def make_render_sensor(scene, camera, resolution, fov):
    154     r"""
    155     Instantiates a Mitsuba sensor (camera) from the provided ``camera`` object.
    156 
    157     Input
    158     ------
    159     scene : :class:`~sionna.rt.Scene`
    160         The scene
    161 
    162     camera : str | :class:`~sionna.rt.Camera` | :class:`~mitsuba.Sensor`
    163         A camera
    164 
    165     resolution : [2], int
    166         Size of the rendered figure.
    167 
    168     fov : float
    169         Field of view, in degrees.
    170 
    171     Output
    172     -------
    173     : :class:`~mitsuba.Sensor`
    174         A Mitsuba sensor (camera)
    175     """
    176     props = {
    177         'type': 'perspective',
    178     }
    179 
    180     if isinstance(camera, str):
    181 
    182         if camera == 'preview':
    183             # Use the viewpoint from the preview.
    184             w = scene.preview_widget
    185             if w is None:
    186                 raise RuntimeError("Could not find an open preview widget, "
    187                                    "please call `scene.preview()` first.")
    188 
    189             cam = w.camera
    190             props['to_world'] = mi.ScalarTransform4f.look_at(
    191                 origin=cam.position,
    192                 target=w.orbit.target,
    193                 up=(0, 0, 1),
    194             )
    195             props['near_clip'] = cam.near
    196             props['far_clip'] = cam.far
    197             del w, cam
    198 
    199         else:
    200             cam_name = camera
    201             camera = scene.get(cam_name)
    202             if not isinstance(camera, Camera):
    203                 raise ValueError(f"The scene has no camera named '{cam_name}'")
    204 
    205     if isinstance(camera, Camera):
    206         world_transform = camera.world_transform.matrix.numpy()
    207         props['to_world'] = mi.ScalarTransform4f(world_transform)
    208         props['near_clip'] = 0.1
    209         props['far_clip'] = 10000
    210 
    211     elif isinstance(camera, mi.Sensor):
    212         sensor_params = mi.traverse(camera)
    213         world_transform = camera.world_transform().matrix.numpy()
    214         props['to_world'] = mi.ScalarTransform4f(world_transform)
    215         props['near_clip'] = sensor_params['near_clip']
    216         props['far_clip'] = sensor_params['far_clip']
    217 
    218     elif isinstance(camera, str):
    219         # Do nothing as this was already handled. This is to avoid wrongly
    220         # raising an exception
    221         pass
    222 
    223     else:
    224         raise ValueError(f'Unsupported camera type: {type(camera)}')
    225 
    226     if fov is not None:
    227         props['fov'] = fov
    228         props['fov_axis'] = 'x'
    229     props['film'] = {
    230         'type': 'hdrfilm',
    231         'width': resolution[0],
    232         'height': resolution[1],
    233         'pixel_format': 'rgba',
    234         'rfilter': {'type': 'box'},
    235     }
    236     return mi.load_dict(props)
    237 
    238 
    239 def results_to_mitsuba_scene(scene, paths, show_paths, show_devices,
    240                              coverage_map=None):
    241     """
    242     Builds a Mitsuba scene with only the paths
    243 
    244     Input
    245     -----
    246     scene : :class:`~sionna.rt.Scene`
    247         The scene
    248 
    249     paths : :class:`~sionna.rt.Paths` | `None`
    250         Simulated paths generated by
    251         :meth:`~sionna.rt.Scene.compute_paths()` or `None`.
    252         If `None`, only the scene is rendered.
    253         Defaults to `None`.
    254 
    255     show_paths : bool
    256         If `paths` is not `None`, shows the paths.
    257         Defaults to `True`.
    258 
    259     show_devices : bool
    260         If `paths` is not `None`, shows the radio devices.
    261         Defaults to `True`.
    262 
    263     coverage_map : dict
    264         Dictionary describing a Mitsuba rectangle shape, with a
    265         textured emitter showing the coverage map to display.
    266         See `coverage_map_to_textured_rectangle`.
    267 
    268     Output
    269     -------
    270     : :class:`~mitsuba.Scene`
    271         A Mitsuba scene
    272     """
    273     objects = {
    274         'type': 'scene',
    275     }
    276     sc, tx_positions, rx_positions, ris_positions, _ = scene_scale(scene)
    277     ris_orientations = [ris.orientation for ris in scene.ris.values()]
    278     ris_sizes = [ris.size for ris in scene.ris.values()]
    279     transmitter_colors = [transmitter.color.numpy() for
    280                           transmitter in scene.transmitters.values()]
    281     receiver_colors = [receiver.color.numpy() for
    282                        receiver in scene.receivers.values()]
    283     ris_colors = [ris.color.numpy() for
    284                            ris in scene.ris.values()]
    285 
    286     # --- Radio devices, shown as spheres
    287     if show_devices:
    288         radius = max(0.0025 * sc, 1)
    289         for source, color in ((tx_positions, transmitter_colors),
    290                               (rx_positions, receiver_colors)):
    291             for index, (k, p) in enumerate(source.items()):
    292                 key = 'rd-' + k
    293                 assert key not in objects
    294                 objects[key] = {
    295                     'type': 'sphere',
    296                     'center': p,
    297                     'radius': radius,
    298                     'light': {
    299                         'type': 'area',
    300                         'radiance': {'type': 'rgb', 'value': color[index]},
    301                     },
    302                 }
    303 
    304     # --- RIS, shown as rectangles
    305     if show_devices:
    306         for k, o, s, c in zip(ris_positions, ris_orientations, ris_sizes,
    307                               ris_colors):
    308             p = tf.constant(ris_positions[k])
    309             key = 'ris-' + k
    310             assert key not in objects
    311             to_world = mitsuba_rectangle_to_world(p, o, s, ris=True)
    312             objects[key] = {
    313                 'type': 'rectangle',
    314                 'to_world': to_world,
    315                 'light': {
    316                     'type': 'area',
    317                     'radiance': {'type': 'rgb', 'value': c},
    318                 },
    319             }
    320 
    321     # --- Paths, shown as cylinders (the closest we have to lines)
    322     if (paths is not None) and show_paths:
    323         path_color = [0.75, 0.75, 0.75]
    324         starts, ends = paths_to_segments(paths)
    325 
    326         for i, (s, e) in enumerate(zip(starts, ends)):
    327             # TODO: avoid the shearing warning
    328             objects[f'path-{i:06d}'] = {
    329                 'type': 'cylinder',
    330                 'p0': s,
    331                 'p1': e,
    332                 'radius': 0.25,
    333                 'light': {
    334                     'type': 'area',
    335                     'radiance': {'type': 'rgb', 'value': path_color},
    336                 },
    337             }
    338 
    339     if coverage_map is not None:
    340         objects['coverage-map'] = coverage_map
    341 
    342     # Temporarily raise log level to silence warning about cylinders shearing
    343     # TODO: shouldn't need this, maybe the warning is too sensitive
    344     logger = mi.Thread.thread().logger()
    345     level = logger.log_level()
    346     logger.set_log_level(mi.LogLevel.Error)
    347     new_scene = mi.load_dict(objects)
    348     logger.set_log_level(level)
    349 
    350     return new_scene
    351 
    352 
    353 def _coverage_map_to_textured_rectangle(coverage_map, tx=0, db_scale=True,
    354                                         vmin=None, vmax=None,
    355                                         cm_metric="path_gain",
    356                                         viewpoint=None):
    357     to_world = coverage_map.to_world()
    358     # Resample values from cell centers to cell corners
    359     cm = getattr(coverage_map, cm_metric).numpy()
    360     if tx is None:
    361         cm = np.max(cm, axis=0)
    362     else:
    363         cm = cm[tx]
    364     # Ensure that dBm is correctly computed for RSS
    365     if cm_metric=="rss" and db_scale:
    366         cm *= 1000
    367     coverage_map = resample_to_corners(cm.squeeze())
    368 
    369     texture, opacity = _coverage_map_texture(
    370         coverage_map, db_scale=db_scale, vmin=vmin, vmax=vmax)
    371     bsdf = {
    372         'type': 'mask',
    373         'opacity': {
    374             'type': 'bitmap',
    375             'bitmap': mi.Bitmap(opacity.astype(np.float32)),
    376         },
    377         'nested': {
    378             'type': 'diffuse',
    379             'reflectance': 0.,
    380         },
    381     }
    382 
    383     emitter = {
    384         'type': 'area',
    385         'radiance': {
    386             'type': 'bitmap',
    387             'bitmap': mi.Bitmap(texture.astype(np.float32)),
    388         },
    389     }
    390 
    391     flip_normal = False
    392     if viewpoint is not None:
    393         # Area emitters are single-sided, so we need to flip the rectangle's
    394         # normals if the camera is on the wrong side.
    395         p0 = to_world.transform_affine([-1, -1, 0])
    396         p1 = to_world.transform_affine([-1, 0, 0])
    397         p2 = to_world.transform_affine([0, -1, 0])
    398         plane_center = to_world.transform_affine([0, 0, 0])
    399         normal = dr.cross(p1 - p0, p2 - p0)
    400         flip_normal = dr.dot(plane_center - viewpoint.numpy(), normal) < 0
    401 
    402     return {
    403         'type': 'rectangle',
    404         'flip_normals': flip_normal,
    405         'to_world': to_world,
    406         'bsdf': bsdf,
    407         'emitter': emitter,
    408     }
    409 
    410 
    411 def coverage_map_color_mapping(coverage_map, db_scale=True,
    412                                vmin=None, vmax=None):
    413     """
    414     Prepare a Matplotlib color maps and normalizing helper based on the
    415     requested value scale to be displayed.
    416     Also applies the dB scaling to a copy of the coverage map, if requested.
    417     """
    418     valid = np.logical_and(coverage_map > 0., np.isfinite(coverage_map))
    419     coverage_map = coverage_map.copy()
    420     if db_scale:
    421         coverage_map[valid] = 10. * np.log10(coverage_map[valid])
    422     else:
    423         coverage_map[valid] = coverage_map[valid]
    424 
    425     if vmin is None:
    426         vmin = coverage_map[valid].min()
    427     if vmax is None:
    428         vmax = coverage_map[valid].max()
    429     normalizer = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    430     color_map = matplotlib.colormaps.get_cmap('viridis')
    431     return coverage_map, normalizer, color_map
    432 
    433 
    434 def resample_to_corners(values):
    435     """
    436     Takes a 2D NumPy array of values defined at cell centers and converts it
    437     into an array (with one more row and one more column) with values defined at
    438     cell corners.
    439     """
    440     assert values.ndim == 2
    441     padded = np.pad(values, pad_width=((1, 1), (1, 1)), mode='edge')
    442     return 0.25 * (
    443           padded[ :-1,  :-1]
    444         + padded[1:  ,  :-1]
    445         + padded[ :-1, 1:  ]
    446         + padded[1:  , 1:  ]
    447     )
    448 
    449 
    450 def _coverage_map_texture(coverage_map, db_scale=True, vmin=None, vmax=None):
    451     # Leave zero-valued regions as transparent
    452     valid = coverage_map > 0.
    453     opacity = valid.astype(np.float32)
    454 
    455     # Color mapping of real values
    456     coverage_map, normalizer, color_map = coverage_map_color_mapping(
    457         coverage_map, db_scale=db_scale, vmin=vmin, vmax=vmax)
    458     texture = color_map(normalizer(coverage_map))[:, :, :3]
    459     # Colors from the color map are gamma-compressed, go back to linear
    460     texture = np.power(texture, 2.2)
    461 
    462     # Pre-multiply alpha to avoid fringe
    463     texture *= opacity[:, :, None]
    464 
    465     return texture, opacity
    466 
    467 
    468 def unmultiply_alpha(arr):
    469     """
    470     De-multiply the alpha channel
    471 
    472     Input
    473     -----
    474     arr : [w,h,4]
    475         An image
    476 
    477     Output
    478     -------
    479     arr : [w,h,4]
    480         Image with the alpha channel de-multiplied.
    481     """
    482     arr = arr.copy()
    483     alpha = arr[:, :, 3]
    484     weight = 1. / np.where(alpha > 0, alpha, 1.)
    485     arr[:, :, :3] *= weight[:, :, None]
    486     return arr