anomaly-detection-material-parameters-calibration

Sionna param calibration (research proj)
git clone https://git.ea.contact/anomaly-detection-material-parameters-calibration
Log | Files | Refs | README

utils.py (10653B)


      1 #
      2 # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
      3 # SPDX-License-Identifier: Apache-2.0
      4 #
      5 """Utility functions for the filter module"""
      6 
      7 import numpy as np
      8 import matplotlib.pyplot as plt
      9 import tensorflow as tf
     10 from sionna.utils.tensors import expand_to_rank
     11 from tensorflow.experimental.numpy import swapaxes
     12 
     13 
     14 def convolve(inp, ker, padding='full', axis=-1):
     15     # pylint: disable=line-too-long
     16     r"""
     17     Filters an input ``inp`` of length `N` by convolving it with a kernel ``ker`` of length `K`.
     18 
     19     The length of the kernel ``ker`` must not be greater than the one of the input sequence ``inp``.
     20 
     21     The `dtype` of the output is `tf.float` only if both ``inp`` and ``ker`` are `tf.float`. It is `tf.complex` otherwise.
     22     ``inp`` and ``ker`` must have the same precision.
     23 
     24     Three padding modes are available:
     25 
     26     *   "full" (default): Returns the convolution at each point of overlap between ``ker`` and ``inp``.
     27         The length of the output is `N + K - 1`. Zero-padding of the input ``inp`` is performed to
     28         compute the convolution at the border points.
     29     *   "same": Returns an output of the same length as the input ``inp``. The convolution is computed such
     30         that the coefficients of the input ``inp`` are centered on the coefficient of the kernel ``ker`` with index
     31         ``(K-1)/2`` for kernels of odd length, and ``K/2 - 1`` for kernels of even length.
     32         Zero-padding of the input signal is performed to compute the convolution at the border points.
     33     *   "valid": Returns the convolution only at points where ``inp`` and ``ker`` completely overlap.
     34         The length of the output is `N - K + 1`.
     35 
     36     Input
     37     ------
     38     inp : [...,N], tf.complex or tf.real
     39         Input to filter.
     40 
     41     ker : [K], tf.complex or tf.real
     42         Kernel of the convolution.
     43 
     44     padding : string
     45         Padding mode. Must be one of "full", "valid", or "same". Case insensitive.
     46         Defaults to "full".
     47 
     48     axis : int
     49         Axis along which to perform the convolution.
     50         Defaults to `-1`.
     51 
     52     Output
     53     -------
     54     out : [...,M], tf.complex or tf.float
     55         Convolution output.
     56         It is `tf.float` only if both ``inp`` and ``ker`` are `tf.float`. It is `tf.complex` otherwise.
     57         The length `M` of the output depends on the ``padding``.
     58     """
     59 
     60     # We don't want to be sensitive to case
     61     padding = padding.lower()
     62     assert padding in ('valid', 'same', 'full'), "Invalid padding method"
     63 
     64     # Ensure we process along the axis requested by the user
     65     inp = tf.experimental.numpy.swapaxes(inp, axis, -1)
     66 
     67     # Reshape the input to a 2D tensor
     68     batch_shape = tf.shape(inp)[:-1]
     69     inp_len = tf.shape(inp)[-1]
     70     inp_dtype = inp.dtype
     71     ker_dtype = ker.dtype
     72     inp = tf.reshape(inp, [-1, inp_len])
     73 
     74     # Using Tensorflow convolution implementation, we need to manually flip
     75     # the kernel
     76     ker = tf.reverse(ker, axis=(0,))
     77     # Tensorflow convolution expects convolution kernels with input and
     78     # output dims
     79     ker = expand_to_rank(ker, 3, 1)
     80     # Tensorflow convolution expects a channel dim for the convolution
     81     inp = tf.expand_dims(inp, axis=-1)
     82 
     83     # Pad the kernel or input if required depending on the convolution type.
     84     # Also, set the padding-mode for TF convolution
     85     if padding == 'valid':
     86         # No padding required in this case
     87         tf_conv_mode = 'VALID'
     88     elif padding == 'same':
     89         ker = tf.pad(ker, [[0,1],[0,0],[0,0]])
     90         tf_conv_mode = 'SAME'
     91     else: # 'full'
     92         ker_len = ker.shape[0] #tf.shape(ker)[0]
     93         if (ker_len % 2) == 0:
     94             extra_padding_left = ker_len // 2
     95             extra_padding_right = extra_padding_left-1
     96         else:
     97             extra_padding_left = (ker_len-1) // 2
     98             extra_padding_right = extra_padding_left
     99         inp = tf.pad(inp, [[0,0],
    100                         [extra_padding_left,extra_padding_right],
    101                         [0,0]])
    102         tf_conv_mode = 'SAME'
    103 
    104     # Extract the real and imaginary components of the input and kernel
    105     inp_real = tf.math.real(inp)
    106     ker_real = tf.math.real(ker)
    107     inp_imag = tf.math.imag(inp)
    108     ker_imag = tf.math.imag(ker)
    109 
    110     # Compute convolution
    111     # The output is complex-valued if the input or the kernel is.
    112     # Defaults to False, and set to True if required later
    113     complex_output = False
    114     out_1 = tf.nn.convolution(inp_real, ker_real, padding=tf_conv_mode)
    115     if inp_dtype.is_complex:
    116         out_4 = tf.nn.convolution(inp_imag, ker_real, padding=tf_conv_mode)
    117         complex_output = True
    118     else:
    119         out_4 = tf.zeros_like(out_1)
    120     if ker_dtype.is_complex:
    121         out_3 = tf.nn.convolution(inp_real, ker_imag, padding=tf_conv_mode)
    122         complex_output = True
    123     else:
    124         out_3 = tf.zeros_like(out_1)
    125     if inp_dtype.is_complex and ker.dtype.is_complex:
    126         out_2 = tf.nn.convolution(inp_imag, ker_imag, padding=tf_conv_mode)
    127     else:
    128         out_2 = tf.zeros_like(out_1)
    129     if complex_output:
    130         out = tf.complex(out_1 - out_2,
    131                         out_3 + out_4)
    132     else:
    133         out = out_1
    134 
    135     # Reshape the output to the expected shape
    136     out = tf.squeeze(out, axis=-1)
    137     out_len = tf.shape(out)[-1]
    138     out = tf.reshape(out, tf.concat([batch_shape, [out_len]], axis=-1))
    139     out = tf.experimental.numpy.swapaxes(out, axis, -1)
    140 
    141     return out
    142 
    143 
    144 def fft(tensor, axis=-1):
    145     r"""Computes the normalized DFT along a specified axis.
    146 
    147     This operation computes the normalized one-dimensional discrete Fourier
    148     transform (DFT) along the ``axis`` dimension of a ``tensor``.
    149     For a vector :math:`\mathbf{x}\in\mathbb{C}^N`, the DFT
    150     :math:`\mathbf{X}\in\mathbb{C}^N` is computed as
    151 
    152     .. math::
    153         X_m = \frac{1}{\sqrt{N}}\sum_{n=0}^{N-1} x_n \exp \left\{
    154             -j2\pi\frac{mn}{N}\right\},\quad m=0,\dots,N-1.
    155 
    156     Input
    157     -----
    158     tensor : tf.complex
    159         Tensor of arbitrary shape.
    160     axis : int
    161         Indicates the dimension along which the DFT is taken.
    162 
    163     Output
    164     ------
    165     : tf.complex
    166         Tensor of the same dtype and shape as ``tensor``.
    167     """
    168     fft_size = tf.cast(tf.shape(tensor)[axis], tensor.dtype)
    169     scale = 1/tf.sqrt(fft_size)
    170 
    171     if axis not in [-1, tensor.shape.rank]:
    172         output =  tf.signal.fft(swapaxes(tensor, axis, -1))
    173         output = swapaxes(output, axis, -1)
    174     else:
    175         output = tf.signal.fft(tensor)
    176 
    177     return scale * output
    178 
    179 
    180 def ifft(tensor, axis=-1):
    181     r"""Computes the normalized IDFT along a specified axis.
    182 
    183     This operation computes the normalized one-dimensional discrete inverse
    184     Fourier transform (IDFT) along the ``axis`` dimension of a ``tensor``.
    185     For a vector :math:`\mathbf{X}\in\mathbb{C}^N`, the IDFT
    186     :math:`\mathbf{x}\in\mathbb{C}^N` is computed as
    187 
    188     .. math::
    189         x_n = \frac{1}{\sqrt{N}}\sum_{m=0}^{N-1} X_m \exp \left\{
    190             j2\pi\frac{mn}{N}\right\},\quad n=0,\dots,N-1.
    191 
    192     Input
    193     -----
    194     tensor : tf.complex
    195         Tensor of arbitrary shape.
    196 
    197     axis : int
    198         Indicates the dimension along which the IDFT is taken.
    199 
    200     Output
    201     ------
    202     : tf.complex
    203         Tensor of the same dtype and shape as ``tensor``.
    204     """
    205     fft_size = tf.cast(tf.shape(tensor)[axis], tensor.dtype)
    206     scale = tf.sqrt(fft_size)
    207 
    208     if axis not in [-1, tensor.shape.rank]:
    209         output =  tf.signal.ifft(swapaxes(tensor, axis, -1))
    210         output = swapaxes(output, axis, -1)
    211     else:
    212         output = tf.signal.ifft(tensor)
    213 
    214     return scale * output
    215 
    216 
    217 def empirical_psd(x, show=True, oversampling=1.0, ylim=(-30,3)):
    218     r"""Computes the empirical power spectral density.
    219 
    220     Computes the empirical power spectral density (PSD) of tensor ``x``
    221     along the last dimension by averaging over all other dimensions.
    222     Note that this function
    223     simply returns the averaged absolute squared discrete Fourier
    224     spectrum of ``x``.
    225 
    226     Input
    227     -----
    228     x : [...,N], tf.complex
    229         The signal of which to compute the PSD.
    230 
    231     show : bool
    232         Indicates if a plot of the PSD should be generated.
    233         Defaults to True,
    234 
    235     oversampling : float
    236         The oversampling factor. Defaults to 1.
    237 
    238     ylim : tuple of floats
    239         The limits of the y axis. Defaults to [-30, 3].
    240         Only relevant if ``show`` is True.
    241 
    242     Output
    243     ------
    244     freqs : [N], float
    245         The normalized frequencies at which the PSD was evaluated.
    246 
    247     psd : [N], float
    248         The PSD.
    249     """
    250     psd = tf.pow(tf.abs(fft(x)), 2)
    251     psd = tf.reduce_mean(psd, tf.range(0, tf.rank(psd)-1))
    252     psd = tf.signal.fftshift(psd)
    253     f_min = -0.5*oversampling
    254     f_max = -f_min
    255     freqs = tf.linspace(f_min, f_max, tf.shape(psd)[0])
    256     if show:
    257         plt.figure()
    258         plt.plot(freqs, 10*np.log10(psd))
    259         plt.title("Power Spectral Density")
    260         plt.xlabel("Normalized Frequency")
    261         plt.xlim([freqs[0], freqs[-1]])
    262         plt.ylabel(r"$\mathbb{E}\left[|X(f)|^2\right]$ (dB)")
    263         plt.ylim(ylim)
    264         plt.grid(True, which="both")
    265 
    266     return (freqs, psd)
    267 
    268 
    269 def empirical_aclr(x, oversampling=1.0, f_min=-0.5, f_max=0.5):
    270     r"""Computes the empirical ACLR.
    271 
    272     Computes the empirical adjacent channel leakgae ration (ACLR)
    273     of tensor ``x`` based on its empirical power spectral density (PSD)
    274     which is computed along the last dimension by averaging over
    275     all other dimensions.
    276 
    277     It is assumed that the in-band ranges from [``f_min``, ``f_max``] in
    278     normalized frequency. The ACLR is then defined as
    279 
    280     .. math::
    281 
    282         \text{ACLR} = \frac{P_\text{out}}{P_\text{in}}
    283 
    284     where :math:`P_\text{in}` and :math:`P_\text{out}` are the in-band
    285     and out-of-band power, respectively.
    286 
    287     Input
    288     -----
    289     x : [...,N],  complex
    290         The signal for which to compute the ACLR.
    291 
    292     oversampling : float
    293         The oversampling factor. Defaults to 1.
    294 
    295     f_min : float
    296         The lower border of the in-band in normalized frequency.
    297         Defaults to -0.5.
    298 
    299     f_max : float
    300         The upper border of the in-band in normalized frequency.
    301         Defaults to 0.5.
    302 
    303     Output
    304     ------
    305     aclr : float
    306         The ACLR in linear scale.
    307     """
    308     freqs, psd = empirical_psd(x, oversampling=oversampling, show=False)
    309     ind_out = tf.where(tf.logical_or(tf.less(freqs, f_min),
    310                                      tf.greater(freqs, f_max)))
    311     ind_in = tf.where(tf.logical_and(tf.greater(freqs, f_min),
    312                                      tf.less(freqs, f_max)))
    313     p_out = tf.reduce_sum(tf.gather(psd, ind_out))
    314     p_in = tf.reduce_sum(tf.gather(psd, ind_in))
    315     aclr = p_out/p_in
    316     return aclr