anomaly-detection-material-parameters-calibration

Sionna param calibration (research proj)
git clone https://git.ea.contact/anomaly-detection-material-parameters-calibration
Log | Files | Refs | README

utils.py (44587B)


      1 #
      2 # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
      3 # SPDX-License-Identifier: Apache-2.0
      4 #
      5 """Utility functions and layers for the FEC package."""
      6 
      7 import tensorflow as tf
      8 from tensorflow.keras.layers import Layer
      9 import numpy as np
     10 import matplotlib.pyplot as plt
     11 import warnings
     12 from importlib_resources import files, as_file
     13 from sionna import config
     14 from sionna.fec.ldpc import codes
     15 from sionna.utils import log2
     16 from sionna.nr.utils import generate_prng_seq as generate_prng_seq_utils
     17 
     18 
     19 class GaussianPriorSource(Layer):
     20     r"""GaussianPriorSource(specified_by_mi=False, dtype=tf.float32, **kwargs)
     21 
     22     Generates `fake` LLRs as if the all-zero codeword was transmitted
     23     over an Bi-AWGN channel with noise variance ``no`` or mutual information
     24     (if ``specified_by_mi`` is True). If selected, the mutual information
     25     denotes the mutual information associated with a binary random variable
     26     observed at the output of a corresponding AWGN channel (cf. Gaussian
     27     approximation).
     28 
     29     .. image:: ../figures/GaussianPriorSource.png
     30 
     31     The generated LLRs are drawn from a Gaussian distribution with
     32 
     33     .. math::
     34         \sigma_{\text{llr}}^2 = \frac{4}{\sigma_\text{ch}^2}
     35 
     36     and
     37 
     38     .. math::
     39         \mu_{\text{llr}} = \frac{\sigma_\text{llr}^2}{2}
     40 
     41     where :math:`\sigma_\text{ch}^2` is the channel noise variance as defined by
     42     ``no``.
     43 
     44     If ``specified_by_mi`` is True, this class uses the of the so-called
     45     `J-function` (relates mutual information to Gaussian distributed LLRs) as
     46     proposed in [Brannstrom]_.
     47 
     48     Parameters
     49     ----------
     50         specified_by_mi : bool
     51             Defaults to False. If True, the second input parameter ``no`` is
     52             interpreted as mutual information instead of noise variance.
     53 
     54         dtype : tf.DType
     55             Defaults to `tf.float32`. Defines the datatype for internal
     56             calculations and the output. Must be one of the following
     57             `(tf.float16, tf.bfloat16, tf.float32, tf.float64)`.
     58 
     59     Input
     60     -----
     61         (output_shape, no):
     62             Tuple:
     63 
     64         output_shape : tf.int
     65             Integer tensor or Python array defining the shape of the desired
     66             output tensor.
     67 
     68         no : tf.float32
     69             Scalar defining the noise variance or mutual information (if
     70             ``specified_by_mi`` is True) of the corresponding (fake) AWGN
     71             channel.
     72 
     73     Output
     74     ------
     75         : ``dtype``, defaults to `tf.float32`
     76             1+D Tensor with shape as defined by ``output_shape``.
     77 
     78     Raises
     79     ------
     80         InvalidArgumentError
     81             If mutual information is not in (0,1).
     82 
     83         AssertionError
     84             If ``inputs`` is not a list with 2 elements.
     85 
     86     """
     87 
     88     def __init__(self, specified_by_mi=False, dtype=tf.float32, **kwargs):
     89 
     90         if dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16,
     91                         tf.complex64, tf.complex128):
     92             raise ValueError("Only float dtypes are supported.")
     93 
     94         # use real_dtype to support tf.complex
     95         super().__init__(dtype=dtype.real_dtype, **kwargs)
     96 
     97         assert isinstance(specified_by_mi, bool),"specified_by_mi must be bool."
     98         self._specified_by_mi = specified_by_mi
     99 
    100     def call(self, inputs):
    101         """Generate Gaussian distributed fake LLRs as if the all-zero codeword
    102         was transmitted over an Bi-AWGN channel.
    103 
    104         Args:
    105             inputs (list): ``[output_shape, no]``, where
    106             ``output_shape`` (tf.int32): 1D list or tensor describing the
    107                 desired shape of the output.
    108             ``no`` (tf.float32): Scalar defining the noise variance or mutual
    109                 information (if ``specified_by_mi`` is True) of the
    110                 corresponding (fake) AWGN channel.
    111 
    112         Returns:
    113             1+D Tensor (``dtype``): Shape as defined by ``output_shape``.
    114         """
    115 
    116         assert isinstance(inputs, (list, tuple)), \
    117                                 "inputs must be a list or tuple."
    118         assert len(inputs)==2, "inputs must be a list with 2 elements."
    119         output_shape, noise_var = inputs
    120 
    121         if self._specified_by_mi:
    122             # interpret noise_var as mutual information
    123             mi_a = tf.cast(noise_var, tf.float32)
    124             tf.debugging.assert_greater_equal(mi_a, 0.,
    125                                         "Mutual information must be positive.")
    126             tf.debugging.assert_less_equal(mi_a, 1.,
    127                                 "Mutual information must be less or equal 1.")
    128             #clip Ia to range (0,1)
    129             mi_a = tf.maximum(mi_a, 1e-7)
    130             mi_a = tf.minimum(mi_a, 1.)
    131             mu_llr = j_fun_inv_tf(mi_a)
    132             sigma_llr = tf.math.sqrt(2*mu_llr)
    133         else:
    134             noise_var = tf.cast(noise_var, tf.float32)
    135 
    136             # noise_var must be positive
    137             noise_var = tf.maximum(noise_var, 1e-7)
    138             sigma_llr = tf.math.sqrt(4 / noise_var)
    139             mu_llr = sigma_llr**2  / 2
    140 
    141         mu_llr = tf.cast(mu_llr, super().dtype)
    142         sigma_llr = tf.cast(sigma_llr, super().dtype)
    143 
    144         # generate LLRs with Gaussian approximation (BPSK, all-zero cw)
    145         # Use negative mean as we generate logits with definition p(b=1)/p(b=0)
    146         llr = config.tf_rng.normal(output_shape,
    147                                    mean=-1.*mu_llr,
    148                                    stddev=sigma_llr,
    149                                    dtype=super().dtype)
    150         return llr
    151 
    152 def llr2mi(llr, s=None, reduce_dims=True):
    153     # pylint: disable=line-too-long
    154     r"""Implements an approximation of the mutual information based on LLRs.
    155 
    156     The function approximates the mutual information for given ``llr`` as
    157     derived in [Hagenauer]_ assuming an `all-zero codeword` transmission
    158 
    159     .. math::
    160 
    161         I \approx 1 - \sum \operatorname{log_2} \left( 1 + \operatorname{e}^{-\text{llr}} \right).
    162 
    163     This approximation assumes that the following `symmetry condition` is fulfilled
    164 
    165     .. math::
    166 
    167         p(\text{llr}|x=0) = p(\text{llr}|x=1) \cdot \operatorname{exp}(\text{llr}).
    168 
    169     For `non-all-zero` codeword transmissions, this methods requires knowledge
    170     about the signs of the original bit sequence ``s`` and flips the signs
    171     correspondingly (as if the all-zero codeword was transmitted).
    172 
    173     Please note that we define LLRs as :math:`\frac{p(x=1)}{p(x=0)}`, i.e.,
    174     the sign of the LLRs differ to the solution in [Hagenauer]_.
    175 
    176     Input
    177     -----
    178         llr : tf.float32
    179             Tensor of arbitrary shape containing LLR-values.
    180 
    181         s : None or tf.float32
    182             Tensor of same shape as llr containing the signs of the
    183             transmitted sequence (assuming BPSK), i.e., +/-1 values.
    184 
    185         reduce_dims : bool
    186             Defaults to True. If True, all dimensions are
    187             reduced and the return is a scalar. Otherwise, `reduce_mean` is
    188             only taken over the last dimension.
    189 
    190     Output
    191     ------
    192         mi : tf.float32
    193             A scalar tensor (if ``reduce_dims`` is True) or a tensor of same
    194             shape as ``llr`` apart from the last dimensions that is removed.
    195             It contains the approximated value of the mutual information.
    196 
    197     Raises
    198     ------
    199         TypeError
    200             If dtype of ``llr`` is not a real-valued float.
    201 
    202     """
    203 
    204     if s is None:
    205         s = tf.ones_like(llr)
    206 
    207     if llr.dtype not in (tf.float16, tf.bfloat16, tf.float32, tf.float64):
    208         raise TypeError("Dtype of llr must be a real-valued float.")
    209 
    210     # ensure that both tensors are compatible
    211     s = tf.cast(s, llr.dtype)
    212 
    213     # scramble sign as if all-zero cw was transmitted
    214     llr_zero = tf.multiply(s, llr)
    215     llr_zero = tf.clip_by_value(llr_zero, -20., 20.) # clip for stability
    216     x = log2(1. + tf.exp(1.* llr_zero))
    217     if reduce_dims:
    218         x = 1. - tf.reduce_mean(x)
    219     else:
    220         x = 1. - tf.reduce_mean(x, axis=-1)
    221     return x
    222 
    223 def j_fun(mu):
    224        # pylint: disable=line-too-long
    225     r"""Calculates the `J-function` in NumPy.
    226 
    227     The so-called `J-function` relates mutual information to the mean of
    228     Gaussian distributed LLRs (cf. Gaussian approximation). We use the
    229     approximation as proposed in [Brannstrom]_ which can be written as
    230 
    231     .. math::
    232 
    233         J(\mu) \approx \left( 1- 2^{H_\text{1}(2\mu)^{H_\text{2}}}\right)^{H_\text{2}}
    234 
    235     with :math:`\mu` denoting the mean value of the LLR distribution and
    236     :math:`H_\text{1}=0.3073`, :math:`H_\text{2}=0.8935` and
    237     :math:`H_\text{3}=1.1064`.
    238 
    239     Input
    240     -----
    241         mu : float
    242             float or `ndarray` of float.
    243 
    244     Output
    245     ------
    246         : float
    247             `ndarray` of same shape as the input.
    248     """
    249     assert np.all(mu<1000), "mu too large."
    250     # we support exact 0 for EXIT (clipping is used in any way)
    251     assert np.all(mu>-0.0001), "mu must be positive."
    252 
    253     h1 = 0.3073
    254     h2 = 0.8935
    255     h3 = 1.1064
    256     mu = np.maximum(mu, 1e-10) # input must be positive for numerical stability
    257     mi = (1-2**(-h1*(2*mu)**h2))**h3
    258     return mi
    259 
    260 def j_fun_inv(mi):
    261      # pylint: disable=line-too-long
    262     r"""Calculates the inverse `J-function` in NumPy.
    263 
    264     The so-called `J-function` relates mutual information to the mean of
    265     Gaussian distributed LLRs (cf. Gaussian approximation). We use the
    266     approximation as proposed in [Brannstrom]_ which can be written as
    267 
    268     .. math::
    269 
    270         J(\mu) \approx \left( 1- 2^{H_\text{1}(2\mu)^{H_\text{2}}}\right)^{H_\text{2}}
    271 
    272     with :math:`\mu` denoting the mean value of the LLR distribution and
    273     :math:`H_\text{1}=0.3073`, :math:`H_\text{2}=0.8935` and
    274     :math:`H_\text{3}=1.1064`.
    275 
    276     Input
    277     -----
    278         mi : float
    279             float or `ndarray` of float.
    280 
    281     Output
    282     -------
    283         : float
    284             `ndarray` of same shape as the input.
    285 
    286     Raises
    287     ------
    288         AssertionError
    289             If ``mi`` < 0.001 or ``mi`` > 0.999.
    290     """
    291 
    292     assert np.all(mi<0.999), "mi must be smaller 1."
    293     assert np.all(mi>0.001), "mi must be greater 0."
    294 
    295     h1 = 0.3073
    296     h2 = 0.8935
    297     h3 = 1.1064
    298     mi = np.maximum(mi,1e-10)
    299     # add small value to avoid log(0)
    300     mu = 0.5*((-1/h1)*np.log2((1-mi**(1/h3)) + 1e-12))**(1/(h2))
    301     return np.minimum(mu, 20) # clipp the output to mu_max =20
    302 
    303 def j_fun_tf(mu, verify_inputs=True):
    304      # pylint: disable=line-too-long
    305     r"""Calculates the `J-function` in Tensorflow.
    306 
    307     The so-called `J-function` relates mutual information to the mean of
    308     Gaussian distributed LLRs (cf. Gaussian approximation). We use the
    309     approximation as proposed in [Brannstrom]_ which can be written as
    310 
    311     .. math::
    312 
    313         J(\mu) \approx \left( 1- 2^{H_\text{1}(2\mu)^{H_\text{2}}}\right)^{H_\text{2}}
    314 
    315     with :math:`\mu` denoting the mean value of the LLR distribution and
    316     :math:`H_\text{1}=0.3073`, :math:`H_\text{2}=0.8935` and
    317     :math:`H_\text{3}=1.1064`.
    318 
    319     Input
    320     -----
    321         mu : tf.float32
    322             Tensor of arbitrary shape.
    323 
    324         verify_inputs : bool
    325             A boolean defaults to True. If True, ``mu`` is clipped internally
    326             to be numerical stable.
    327 
    328     Output
    329     ------
    330         : tf.float32
    331             Tensor of same shape and dtype as ``mu``.
    332 
    333     Raises
    334     ------
    335         InvalidArgumentError
    336             If ``mu`` is negative.
    337     """
    338     assert isinstance(verify_inputs, bool), "verify_inputs must be bool."
    339     if verify_inputs:
    340         # input must be positive for numerical stability
    341         mu = tf.maximum(mu, 1e-10)
    342     else:
    343         tf.debugging.assert_greater_equal(mu, 0., "mu must be positive.")
    344 
    345     h1 = 0.3073
    346     h2 = 0.8935
    347     h3 = 1.1064
    348     mi = (1-2**(-h1*(2*mu)**h2))**h3
    349     return mi
    350 
    351 def j_fun_inv_tf(mi, verify_inputs=True):
    352     # pylint: disable=line-too-long
    353     r"""Calculates the inverse `J-function` in Tensorflow.
    354 
    355     The so-called `J-function` relates mutual information to the mean of
    356     Gaussian distributed LLRs (cf. Gaussian approximation). We use the
    357     approximation as proposed in [Brannstrom]_ which can be written as
    358 
    359     .. math::
    360 
    361         J(\mu) \approx \left( 1- 2^{H_\text{1}(2\mu)^{H_\text{2}}}\right)^{H_\text{2}}
    362 
    363     with :math:`\mu` denoting the mean value of the LLR distribution and
    364     :math:`H_\text{1}=0.3073`, :math:`H_\text{2}=0.8935` and
    365     :math:`H_\text{3}=1.1064`.
    366 
    367     Input
    368     -----
    369         mi : tf.float32
    370             Tensor of arbitrary shape.
    371 
    372         verify_inputs : bool
    373             A boolean defaults to True. If True, ``mi`` is clipped internally
    374             to be numerical stable.
    375 
    376     Output
    377     ------
    378         : tf.float32
    379             Tensor of same shape and dtype as the ``mi``.
    380 
    381     Raises
    382     ------
    383         InvalidArgumentError
    384             If ``mi`` is not in `(0,1)`.
    385     """
    386 
    387     assert isinstance(verify_inputs, bool), "verify_inputs must be bool."
    388     if verify_inputs:
    389         # input must be positive for numerical stability
    390         mi = tf.maximum(mi, 1e-10) # ensure that I>0
    391         mi = tf.minimum(mi, 1.) # ensure that I=<1
    392     else:
    393         tf.debugging.assert_greater_equal(mi, 0., "mi must be positive.")
    394         tf.debugging.assert_less_equal(mi, 1., "mi must be less or equal 1.")
    395 
    396     h1 = 0.3073
    397     h2 = 0.8935
    398     h3 = 1.1064
    399     mu = 0.5*((-1/h1) * log2((1-mi**(1/h3))))**(1/(h2))
    400     return tf.minimum(mu, 20) # clipp the output to mu_max =20
    401 
    402 def plot_trajectory(plot, mi_v, mi_c, ebno=None):
    403     """Utility function to plot the trajectory of an EXIT-chart.
    404 
    405     Input
    406     -----
    407         plot : matplotlib.figure
    408             A handle to a matplotlib figure.
    409 
    410         mi_v : float
    411             An ndarray of floats containing the variable node mutual
    412             information.
    413 
    414         mi_c : float
    415             An ndarray of floats containing the check node mutual
    416             information.
    417 
    418         ebno : float
    419             A float denoting the EbNo in dB for the legend entry.
    420     """
    421 
    422     assert (len(mi_v)==len(mi_c)), "mi_v and mi_c must have same length."
    423 
    424     # number of decoding iterations to plot
    425     iters = np.shape(mi_v)[0] - 1
    426 
    427     x = np.zeros([2*iters])
    428     y = np.zeros([2*iters])
    429 
    430     #  iterate between VN and CN MI value
    431     y[1] = mi_v[0]
    432     for i in range(1, iters):
    433         x[2*i] = mi_c[i-1]
    434         y[2*i] = mi_v[i-1]
    435         x[2*i+1] = mi_c[i-1]
    436         y[2*i+1] = mi_v[i]
    437 
    438     if ebno is not None:
    439         label_str = f"Actual trajectory @ {ebno} dB"
    440     else:
    441         label_str = "Actual trajectory"
    442 
    443     #plot trajectory
    444     plot.plot(x,
    445              y,
    446              "-",
    447              linewidth=3,
    448              color="g",
    449              label=label_str)
    450     plot.legend(fontsize=18) # and show the legend
    451 
    452 def plot_exit_chart(mi_a=None, mi_ev=None, mi_ec=None, title="EXIT-Chart"):
    453     """Utility function to plot EXIT-Charts [tenBrinkEXIT]_.
    454 
    455     If all inputs are `None` an empty EXIT chart is generated. Otherwise,
    456     the mutual information curves are plotted.
    457 
    458     Input
    459     -----
    460         mi_a : float
    461             An ndarray of floats containing the a priori mutual
    462             information.
    463 
    464         mi_v : float
    465             An ndarray of floats containing the variable node mutual
    466             information.
    467 
    468         mi_c : float
    469             An ndarray of floats containing the check node mutual
    470             information.
    471 
    472         title : str
    473             A string defining the title of the EXIT chart.
    474     Output
    475     ------
    476         plt: matplotlib.figure
    477             A matplotlib figure handle
    478 
    479     Raises
    480     ------
    481         AssertionError
    482             If ``title`` is not `str`.
    483     """
    484 
    485     assert isinstance(title, str), "title must be str."
    486 
    487     if not (mi_ev is None and mi_ec is None):
    488         if mi_a is None:
    489             raise ValueError("mi_a cannot be None if mi_e is provided.")
    490 
    491     if mi_ev is not None:
    492         assert (len(mi_a)==len(mi_ev)), "mi_a and mi_ev must have same length."
    493     if mi_ec is not None:
    494         assert (len(mi_a)==len(mi_ec)), "mi_a and mi_ec must have same length."
    495 
    496     plt.figure(figsize=(10,10))
    497     plt.title(title, fontsize=25)
    498     plt.xlabel("$I_{a}^v$, $I_{e}^c$", fontsize=25)
    499     plt.ylabel("$I_{e}^v$, $I_{a}^c$", fontsize=25)
    500     plt.grid(visible=True, which='major')
    501 
    502 
    503     # for MI, the x,y limits are always (0,1)
    504     plt.xlim(0, 1)
    505     plt.ylim(0, 1)
    506     plt.xticks(fontsize=18)
    507     plt.yticks(fontsize=18)
    508 
    509     # and plot EXIT curves
    510     if mi_ec is not None:
    511         plt.plot(mi_ec, mi_a, "r", linewidth=3, label="Check node")
    512         plt.legend()
    513     if mi_ev is not None:
    514         plt.plot(mi_a, mi_ev, "b", linewidth=3, label="Variable node")
    515         plt.legend()
    516     return plt
    517 
    518 def get_exit_analytic(pcm, ebno_db):
    519     """Calculate the analytic EXIT-curves for a given parity-check matrix.
    520 
    521     This function extracts the degree profile from ``pcm`` and calculates the
    522     variable (VN) and check node (CN) decoder EXIT curves. Please note that
    523     this is an asymptotic tool which needs a certain codeword length for
    524     accurate predictions.
    525 
    526     Transmission over an AWGN channel with BPSK modulation and SNR ``ebno_db``
    527     is assumed. The detailed equations can be found in [tenBrink]_ and
    528     [tenBrinkEXIT]_.
    529 
    530     Input
    531     -----
    532         pcm : ndarray
    533             The parity-check matrix.
    534 
    535         ebno_db : float
    536             The channel SNR in dB.
    537 
    538     Output
    539     ------
    540         mi_a : ndarray of floats
    541             NumPy array containing the `a priori` mutual information.
    542 
    543         mi_ev : ndarray of floats
    544             NumPy array containing the extrinsic mutual information of the
    545             variable node decoder for the corresponding ``mi_a``.
    546 
    547         mi_ec : ndarray of floats
    548             NumPy array containing the extrinsic mutual information of the check
    549             node decoder for the corresponding ``mi_a``.
    550 
    551     Note
    552     ----
    553         This function assumes random parity-check matrices without any imposed
    554         structure. Thus, explicit code construction algorithms may lead
    555         to inaccurate EXIT predictions. Further, this function is based
    556         on asymptotic properties of the code, i.e., only works well for large
    557         parity-check matrices. For details see [tenBrink]_.
    558     """
    559 
    560     # calc coderate
    561     n = pcm.shape[1]
    562     k = n - pcm.shape[0]
    563     coderate = k/n
    564 
    565     # calc mean and noise_var of Gaussian distributed LLRs for given channel SNR
    566     ebno = 10**(ebno_db/10)
    567     snr = ebno*coderate
    568     noise_var = 1/(2*snr)
    569 
    570     # For BiAWGN channels the LLRs follow a Gaussian distr. as given below [1]
    571     sigma_llr = np.sqrt(4 / noise_var)
    572     mu_llr = sigma_llr**2  / 2
    573 
    574     # calculate max node degree
    575     # "+1" as the array indices later directly denote the node degrees and we
    576     # have to account the array start at position 0 (i.e., we need one more
    577     # element)
    578     c_max = int(np.max(np.sum(pcm, axis=1)) + 1 )
    579     v_max = int(np.max(np.sum(pcm, axis=0)) + 1 )
    580 
    581     # calculate degree profile (node perspective)
    582     c = np.histogram(np.sum(pcm, axis=1),
    583                      bins=c_max,
    584                      range=(0, c_max),
    585                      density=False)[0]
    586 
    587     v = np.histogram(np.sum(pcm, axis=0),
    588                      bins=v_max,
    589                      range=(0, v_max),
    590                      density=False)[0]
    591 
    592     # calculate degrees from edge perspective
    593     r = np.zeros([c_max])
    594     for i in range(1,c_max):
    595         r[i] = (i-1)*c[i]
    596     r = r / np.sum(r)
    597     l = np.zeros([v_max])
    598     for i in range(1,v_max):
    599         l[i] = (i-1)*v[i]
    600     l = l / np.sum(l)
    601 
    602     mi_a = np.arange(0.002, 0.998, 0.001) # quantize Ia with 0.01 resolution
    603 
    604     # Exit function of check node update
    605     mi_ec = np.zeros_like(mi_a)
    606     for i in range(1, c_max):
    607         mi_ec += r[i] * j_fun((i-1.) * j_fun_inv(1 - mi_a))
    608     mi_ec = 1 - mi_ec
    609 
    610     # Exit function of variable node update
    611     mi_ev = np.zeros_like(mi_a)
    612     for i in range(1, v_max):
    613         mi_ev += l[i] * j_fun(mu_llr + (i-1.) * j_fun_inv(mi_a))
    614 
    615     return mi_a, mi_ev, mi_ec
    616 
    617 def load_parity_check_examples(pcm_id, verbose=False):
    618     # pylint: disable=line-too-long
    619     """Utility function to load example codes stored in sub-folder LDPC/codes.
    620 
    621     The following codes are available
    622 
    623     - 0 : `(7,4)`-Hamming code of length `k=4` information bits and codeword    length `n=7`.
    624 
    625     - 1 : `(63,45)`-BCH code of length `k=45` information bits and codeword    length `n=63`.
    626 
    627     - 2 : (127,106)-BCH code of length `k=106` information bits and codeword    length `n=127`.
    628 
    629     - 3 : Random LDPC code with regular variable node degree 3 and check node degree 6 of length `k=50` information bits and codeword length         `n=100`.
    630 
    631     - 4 : 802.11n LDPC code of length of length `k=324` information bits and    codeword length `n=648`.
    632 
    633     Input
    634     -----
    635         pcm_id : int
    636             An integer defining which matrix id to load.
    637 
    638         verbose : bool
    639             Defaults to False. If True, the code parameters are
    640             printed.
    641 
    642     Output
    643     ------
    644         pcm: ndarray of `zeros` and `ones`
    645             An ndarray containing the parity check matrix.
    646 
    647         k : int
    648             An integer defining the number of information bits.
    649 
    650         n : int
    651             An integer defining the number of codeword bits.
    652 
    653         coderate : float
    654             A float defining the coderate (assuming full rank of
    655             parity-check matrix).
    656     """
    657 
    658     source = files(codes).joinpath("example_codes.npy")
    659     with as_file(source) as code:
    660         pcms = np.load(code, allow_pickle=True)
    661 
    662     pcm = np.array(pcms[pcm_id]) # load parity-check matrix
    663     n = int(pcm.shape[1]) # number of codeword bits (codeword length)
    664     k = int(n - pcm.shape[0]) # number of information bits k per codeword
    665     coderate = k / n
    666 
    667     if verbose:
    668         print(f"\nn: {n}, k: {k}, coderate: {coderate:.3f}")
    669     return pcm, k, n, coderate
    670 
    671 def bin2int(arr):
    672     """Convert binary array to integer.
    673 
    674     For example ``arr`` = `[1, 0, 1]` is converted to `5`.
    675 
    676     Input
    677     -----
    678         arr: int or float
    679             An iterable that yields 0's and 1's.
    680 
    681     Output
    682     -----
    683         : int
    684             Integer representation of ``arr``.
    685 
    686     """
    687     if len(arr) == 0: return None
    688     return int(''.join([str(x) for x in arr]), 2)
    689 
    690 def bin2int_tf(arr):
    691     """
    692     Converts binary tensor to int tensor. Binary representation in ``arr``
    693     is across the last dimension from most significant to least significant.
    694 
    695     For example ``arr`` = `[0, 1, 1]` is converted to `3`.
    696 
    697     Input
    698     -----
    699         arr: int or float
    700             Tensor of  0's and 1's.
    701 
    702     Output
    703     -----
    704         : int
    705             Tensor containing the integer representation of ``arr``.
    706     """
    707     len_ = tf.shape(arr)[-1]
    708     shifts = tf.range(len_-1,-1,-1)
    709 
    710     # (2**len_-1)*arr[0] +... 2*arr[len_-2] + 1*arr[len_-1]
    711     op = tf.reduce_sum(tf.bitwise.left_shift(arr, shifts), axis=-1)
    712 
    713     return op
    714 
    715 def int2bin(num, len_):
    716     """
    717     Convert ``num`` of int type to list of length ``len_`` with 0's and 1's.
    718     ``num`` and ``len_`` have to non-negative.
    719 
    720     For e.g., ``num`` = `5`; `int2bin(num`, ``len_`` =4) = `[0, 1, 0, 1]`.
    721 
    722     For e.g., ``num`` = `12`; `int2bin(num`, ``len_`` =3) = `[1, 0, 0]`.
    723 
    724     Input
    725     -----
    726         num: int
    727             An integer to be converted into binary representation.
    728 
    729         len_: int
    730             An integer defining the length of the desired output.
    731 
    732     Output
    733     -----
    734         : list of int
    735             Binary representation of ``num`` of length ``len_``.
    736     """
    737     assert num >= 0,  "Input integer should be non-negative"
    738     assert len_ >= 0,  "width should be non-negative"
    739 
    740     bin_ = format(num, f'0{len_}b')
    741     binary_vals = [int(x) for x in bin_[-len_:]] if len_ else []
    742     return binary_vals
    743 
    744 def int2bin_tf(ints, len_):
    745     """
    746     Converts (int) tensor to (int) tensor with 0's and 1's. `len_` should be
    747     to non-negative. Additional dimension of size `len_` is inserted at end.
    748 
    749     Input
    750     -----
    751         ints: int
    752             Tensor of arbitrary shape `[...,k]` containing integer to be
    753             converted into binary representation.
    754 
    755         len_: int
    756             An integer defining the length of the desired output.
    757 
    758     Output
    759     -----
    760         : int
    761             Tensor of same shape as ``ints`` except dimension of length
    762             ``len_`` is added at the end `[...,k, len_]`. Contains the binary
    763             representation of ``ints`` of length ``len_``.
    764     """
    765     assert len_ >= 0
    766 
    767     shifts = tf.range(len_-1, -1, delta=-1)
    768     bits = tf.math.floormod(
    769         tf.bitwise.right_shift(tf.expand_dims(ints, -1), shifts), 2)
    770     return bits
    771 
    772 def alist2mat(alist, verbose=True):
    773     # pylint: disable=line-too-long
    774     r"""Convert `alist` [MacKay]_ code definition to `full` parity-check matrix.
    775 
    776     Many code examples can be found in [UniKL]_.
    777 
    778     About `alist` (see [MacKay]_ for details):
    779 
    780         - `1.` Row defines parity-check matrix dimension `m x n`
    781         - `2.` Row defines int with `max_CN_degree`, `max_VN_degree`
    782         - `3.` Row defines VN degree of all `n` column
    783         - `4.` Row defines CN degree of all `m` rows
    784         - Next `n` rows contain non-zero entries of each column (can be zero padded at the end)
    785         - Next `m` rows contain non-zero entries of each row.
    786 
    787     Input
    788     -----
    789     alist: list
    790         Nested list in `alist`-format [MacKay]_.
    791 
    792     verbose: bool
    793         Defaults to True. If True, the code parameters are printed.
    794 
    795     Output
    796     ------
    797     (pcm, k, n, coderate):
    798         Tuple:
    799 
    800     pcm: ndarray
    801         NumPy array of shape `[n-k, n]` containing the parity-check matrix.
    802 
    803     k: int
    804         Number of information bits.
    805 
    806     n: int
    807         Number of codewords bits.
    808 
    809     coderate: float
    810         Coderate of the code.
    811 
    812     Note
    813     ----
    814         Use :class:`~sionna.fec.utils.load_alist` to import alist from a
    815         textfile.
    816 
    817         For example, the following code snippet will import an alist from a file called ``filename``:
    818 
    819         .. code-block:: python
    820 
    821             al = load_alist(path = filename)
    822             pcm, k, n, coderate = alist2mat(al)
    823     """
    824 
    825     assert len(alist)>4, "Invalid alist format."
    826 
    827     n = alist[0][0]
    828     m = alist[0][1]
    829     v_max = alist[1][0]
    830     c_max = alist[1][1]
    831     k = n - m
    832     coderate = k / n
    833 
    834     vn_profile = alist[2]
    835     cn_profile = alist[3]
    836 
    837     # plausibility checks
    838     assert np.sum(vn_profile)==np.sum(cn_profile), "Invalid alist format."
    839     assert np.max(vn_profile)==v_max, "Invalid alist format."
    840     assert np.max(cn_profile)==c_max, "Invalid alist format."
    841 
    842     if len(alist)==len(vn_profile)+4:
    843         print("Note: .alist does not contain (redundant) CN perspective.")
    844         print("Recovering parity-check matrix from VN only.")
    845         print("Please verify the correctness of the results manually.")
    846         vn_only = True
    847     else:
    848         assert len(alist)==len(vn_profile) + len(cn_profile) + 4, \
    849                                                 "Invalid alist format."
    850         vn_only = False
    851 
    852     pcm = np.zeros((m,n))
    853     num_edges = 0 # count number of edges
    854 
    855     for idx_v in range(n):
    856         for idx_i in range(vn_profile[idx_v]):
    857             # first 4 rows of alist contain meta information
    858             idx_c = alist[4+idx_v][idx_i]-1 # "-1" as this is python
    859             pcm[idx_c, idx_v] = 1
    860             num_edges += 1 # count number of edges (=each non-zero entry)
    861 
    862     # validate results from CN perspective
    863     if not vn_only:
    864         for idx_c in range(m):
    865             for idx_i in range(cn_profile[idx_c]):
    866                 # first 4 rows of alist contain meta information
    867                 # follwing n rows contained VN perspective
    868                 idx_v = alist[4+n+idx_c][idx_i]-1 # "-1" as this is python
    869                 assert pcm[idx_c, idx_v]==1 # entry must already exist
    870 
    871     if verbose:
    872         print("Number of variable nodes (columns): ", n)
    873         print("Number of check nodes (rows): ", m)
    874         print("Number of information bits per cw: ", k)
    875         print("Number edges: ", num_edges)
    876         print("Max. VN degree: ", v_max)
    877         print("Max. CN degree: ", c_max)
    878         print("VN degree: ", vn_profile)
    879         print("CN degree: ", cn_profile)
    880 
    881     return pcm, k, n, coderate
    882 
    883 def load_alist(path):
    884     """Read `alist`-file [MacKay]_ and return nested list describing the
    885     parity-check matrix of a code.
    886 
    887     Many code examples can be found in [UniKL]_.
    888 
    889     Input
    890     -----
    891     path:str
    892         Path to file to be loaded.
    893 
    894     Output
    895     ------
    896     alist: list
    897         A nested list containing the imported alist data.
    898     """
    899 
    900     alist = []
    901     with open(path, "r") as reader: # pylint: disable=unspecified-encoding
    902         # read list line by line (different length)
    903         for line in reader:
    904             l = []
    905             # append all entries
    906             for word in line.split():
    907                 l.append(int(word))
    908             if l: # ignore empty lines
    909                 alist.append(l)
    910 
    911     return alist
    912 
    913 def make_systematic(mat, is_pcm=False):
    914     r"""Bring binary matrix in its systematic form.
    915 
    916     Input
    917     -----
    918     mat : ndarray
    919         Binary matrix to be transformed to systematic form of shape `[k, n]`.
    920 
    921     is_pcm: bool
    922         Defaults to False. If true, ``mat`` is interpreted as parity-check
    923         matrix and, thus, the last k columns will be the identity part.
    924 
    925     Output
    926     ------
    927     mat_sys: ndarray
    928         Binary matrix in systematic form, i.e., the first `k` columns equal the
    929         identity matrix (or last `k` if ``is_pcm`` is True).
    930 
    931     column_swaps: list of int tuples
    932         A list of integer tuples that describes the swapped columns (in the
    933         order of execution).
    934 
    935     Note
    936     ----
    937     This algorithm (potentially) swaps columns of the input matrix. Thus, the
    938     resulting systematic matrix (potentially) relates to a permuted version of
    939     the code, this is defined by the returned list ``column_swap``.
    940     Note that, the inverse permutation must be applied in the inverse list
    941     order (in case specific columns are swapped multiple times).
    942 
    943     If a parity-check matrix is passed as input (i.e., ``is_pcm`` is True), the
    944     identity part will be re-arranged to the last columns."""
    945 
    946     m = mat.shape[0]
    947     n = mat.shape[1]
    948 
    949     assert m<=n, "Invalid matrix dimensions."
    950 
    951     # check for all-zero columns (=unchecked nodes)
    952     if is_pcm:
    953         c_node_deg = np.sum(mat, axis=0)
    954         if np.any(c_node_deg==0):
    955             warnings.warn("All-zero column in parity-check matrix detected. " \
    956                 "It seems as if the code contains unprotected nodes.")
    957 
    958     mat = np.copy(mat)
    959     column_swaps = [] # store all column swaps
    960 
    961     # convert to bool for faster arithmetics
    962     mat = mat.astype(bool)
    963 
    964     # bring in upper triangular form
    965     for idx_c in range(m):
    966         success = mat[idx_c, idx_c]
    967         if not success: # skip if leading "1" already occurred
    968             # step 1: find next leading "1"
    969             for idx_r in range(idx_c+1,m):
    970                 # skip if entry is "0"
    971                 if mat[idx_r, idx_c]:
    972                     mat[[idx_c, idx_r]] = mat[[idx_r, idx_c]] # swap rows
    973                     success = True
    974                     break
    975 
    976         # Could not find "1"-entry for column idx_c
    977         # => swap with columns from non-sys part
    978         # The task is to find a column with index idx_cc that has a "1" at
    979         # row idx_c
    980         if not success:
    981             for idx_cc in range(idx_c+1, n):
    982                 if mat[idx_c, idx_cc]:
    983                     # swap columns
    984                     mat[:,[idx_c, idx_cc]] = mat[:,[idx_cc, idx_c]]
    985                     column_swaps.append([idx_c, idx_cc])
    986                     success = True
    987                     break
    988 
    989         if not success:
    990             raise ValueError("Could not succeed; mat is not full rank?")
    991 
    992         # we can now assume a leading "1" at row idx_c
    993         for idx_r in range(idx_c+1, m):
    994             if mat[idx_r, idx_c]:
    995                 mat[idx_r,:] ^= mat[idx_c,:] # bin. add of row idx_c to idx_r
    996 
    997     # remove upper triangle part in inverse order
    998     for idx_c in range(m-1, -1, -1):
    999         for idx_r in range(idx_c-1, -1, -1):
   1000             if mat[idx_r, idx_c]:
   1001                 mat[idx_r,:] ^= mat[idx_c,:] # bin. add of row idx_c to idx_r
   1002 
   1003     # verify results
   1004     assert np.array_equal(mat[:,:m], np.eye(m)), \
   1005                             "Internal error, could not find systematic matrix."
   1006 
   1007     # bring identity part to end of matrix if parity-check matrix is provided
   1008     if is_pcm:
   1009         # individual column swaps instead of copying entire block
   1010         # this simplifies the tracking of column swaps.
   1011         for i in range(n-1, (n-1)-m, -1):
   1012             j = i - (n-m)
   1013             mat[:,[i, j]] = mat[:,[j, i]]
   1014             column_swaps.append([i, j])
   1015 
   1016     # return integer array
   1017     mat = mat.astype(int)
   1018     return mat, column_swaps
   1019 
   1020 def gm2pcm(gm, verify_results=True):
   1021     r"""Generate the parity-check matrix for a given generator matrix.
   1022 
   1023     This function brings ``gm`` :math:`\mathbf{G}` in its systematic form and
   1024     uses the following relation to find the parity-check matrix
   1025     :math:`\mathbf{H}` in GF(2)
   1026 
   1027     .. math::
   1028 
   1029         \mathbf{G} = [\mathbf{I} |  \mathbf{M}]
   1030         \Leftrightarrow \mathbf{H} = [\mathbf{M} ^t | \mathbf{I}]. \tag{1}
   1031 
   1032     This follows from the fact that for an all-zero syndrome, it must hold that
   1033 
   1034     .. math::
   1035 
   1036         \mathbf{H} \mathbf{c}^t = \mathbf{H} * (\mathbf{u} * \mathbf{G})^t =
   1037         \mathbf{H} * \mathbf{G} ^t * \mathbf{u}^t =: \mathbf{0}
   1038 
   1039     where :math:`\mathbf{c}` denotes an arbitrary codeword and
   1040     :math:`\mathbf{u}` the corresponding information bits.
   1041 
   1042     This leads to
   1043 
   1044     .. math::
   1045 
   1046      \mathbf{G} * \mathbf{H} ^t =: \mathbf{0}. \tag{2}
   1047 
   1048     It can be seen that (1) fulfills (2), as it holds in GF(2) that
   1049 
   1050     .. math::
   1051 
   1052         [\mathbf{I} |  \mathbf{M}] * [\mathbf{M} ^t | \mathbf{I}]^t
   1053          = \mathbf{M} + \mathbf{M} = \mathbf{0}.
   1054 
   1055     Input
   1056     -----
   1057     gm : ndarray
   1058         Binary generator matrix of shape `[k, n]`.
   1059 
   1060     verify_results: bool
   1061         Defaults to True. If True, it is verified that the generated
   1062         parity-check matrix is orthogonal to the generator matrix in GF(2).
   1063 
   1064     Output
   1065     ------
   1066     : ndarray
   1067         Binary parity-check matrix of shape `[n-k, n]`.
   1068 
   1069     Note
   1070     ----
   1071     This algorithm only works if ``gm`` has full rank. Otherwise an error is
   1072     raised.
   1073 
   1074     """
   1075     k = gm.shape[0]
   1076     n = gm.shape[1]
   1077 
   1078     assert k<n, "Invalid matrix dimensions."
   1079 
   1080     # bring gm in systematic form
   1081     gm_sys, c_swaps = make_systematic(gm, is_pcm=False)
   1082 
   1083     m_mat = np.transpose(np.copy(gm_sys[:,-(n-k):]))
   1084     i_mat = np.eye(n-k)
   1085 
   1086     pcm = np.concatenate((m_mat, i_mat), axis=1)
   1087 
   1088     # undo column swaps
   1089     for l in c_swaps[::-1]: # reverse ordering when going through list
   1090         pcm[:,[l[0], l[1]]] = pcm[:,[l[1], l[0]]] # swap columns
   1091 
   1092     if verify_results:
   1093         assert verify_gm_pcm(gm=gm, pcm=pcm), \
   1094             "Resulting parity-check matrix does not match to generator matrix."
   1095 
   1096     return pcm
   1097 
   1098 def pcm2gm(pcm, verify_results=True):
   1099     r"""Generate the generator matrix for a given parity-check matrix.
   1100 
   1101     This function brings ``pcm`` :math:`\mathbf{H}` in its systematic form and
   1102     uses the following relation to find the generator matrix
   1103     :math:`\mathbf{G}` in GF(2)
   1104 
   1105     .. math::
   1106 
   1107         \mathbf{G} = [\mathbf{I} |  \mathbf{M}]
   1108         \Leftrightarrow \mathbf{H} = [\mathbf{M} ^t | \mathbf{I}]. \tag{1}
   1109 
   1110     This follows from the fact that for an all-zero syndrome, it must hold that
   1111 
   1112     .. math::
   1113 
   1114         \mathbf{H} \mathbf{c}^t = \mathbf{H} * (\mathbf{u} * \mathbf{G})^t =
   1115         \mathbf{H} * \mathbf{G} ^t * \mathbf{u}^t =: \mathbf{0}
   1116 
   1117     where :math:`\mathbf{c}` denotes an arbitrary codeword and
   1118     :math:`\mathbf{u}` the corresponding information bits.
   1119 
   1120     This leads to
   1121 
   1122     .. math::
   1123 
   1124      \mathbf{G} * \mathbf{H} ^t =: \mathbf{0}. \tag{2}
   1125 
   1126     It can be seen that (1) fulfills (2) as in GF(2) it holds that
   1127 
   1128     .. math::
   1129 
   1130         [\mathbf{I} |  \mathbf{M}] * [\mathbf{M} ^t | \mathbf{I}]^t
   1131          = \mathbf{M} + \mathbf{M} = \mathbf{0}.
   1132 
   1133     Input
   1134     -----
   1135     pcm : ndarray
   1136         Binary parity-check matrix of shape `[n-k, n]`.
   1137 
   1138     verify_results: bool
   1139         Defaults to True. If True, it is verified that the generated
   1140         generator matrix is orthogonal to the parity-check matrix in GF(2).
   1141 
   1142     Output
   1143     ------
   1144     : ndarray
   1145         Binary generator matrix of shape `[k, n]`.
   1146 
   1147     Note
   1148     ----
   1149     This algorithm only works if ``pcm`` has full rank. Otherwise an error is
   1150     raised.
   1151 
   1152     """
   1153     n = pcm.shape[1]
   1154     k = n - pcm.shape[0]
   1155 
   1156     assert k<n, "Invalid matrix dimensions."
   1157 
   1158     # bring pcm in systematic form
   1159     pcm_sys, c_swaps = make_systematic(pcm, is_pcm=True)
   1160 
   1161     m_mat = np.transpose(np.copy(pcm_sys[:,:k]))
   1162     i_mat = np.eye(k)
   1163     gm = np.concatenate((i_mat, m_mat), axis=1)
   1164 
   1165     # undo column swaps
   1166     for l in c_swaps[::-1]: # reverse ordering when going through list
   1167         gm[:,[l[0], l[1]]] = gm[:,[l[1], l[0]]] # swap columns
   1168 
   1169     if verify_results:
   1170         assert verify_gm_pcm(gm=gm, pcm=pcm), \
   1171             "Resulting parity-check matrix does not match to generator matrix."
   1172     return gm
   1173 
   1174 def verify_gm_pcm(gm, pcm):
   1175     r"""Verify that generator matrix :math:`\mathbf{G}` ``gm`` and parity-check
   1176     matrix :math:`\mathbf{H}` ``pcm`` are orthogonal in GF(2).
   1177 
   1178     For an all-zero syndrome, it must hold that
   1179 
   1180     .. math::
   1181 
   1182         \mathbf{H} \mathbf{c}^t = \mathbf{H} * (\mathbf{u} * \mathbf{G})^t =
   1183         \mathbf{H} * \mathbf{G} ^t * \mathbf{u}^t =: \mathbf{0}
   1184 
   1185     where :math:`\mathbf{c}` denotes an arbitrary codeword and
   1186     :math:`\mathbf{u}` the corresponding information bits.
   1187 
   1188     As :math:`\mathbf{u}` can be arbitrary it follows that
   1189 
   1190     .. math::
   1191         \mathbf{H} * \mathbf{G} ^t =: \mathbf{0}.
   1192 
   1193     Input
   1194     -----
   1195     gm : ndarray
   1196         Binary generator matrix of shape `[k, n]`.
   1197 
   1198     pcm : ndarray
   1199         Binary parity-check matrix of shape `[n-k, n]`.
   1200 
   1201     Output
   1202     ------
   1203     : bool
   1204         True if ``gm`` and ``pcm`` define a valid pair of parity-check and
   1205         generator matrices in GF(2).
   1206     """
   1207 
   1208     # check for valid dimensions
   1209     k = gm.shape[0]
   1210     n = gm.shape[1]
   1211 
   1212     n_pcm = pcm.shape[1]
   1213     k_pcm = n_pcm - pcm.shape[0]
   1214 
   1215     assert k==k_pcm, "Inconsistent shape of gm and pcm."
   1216     assert n==n_pcm, "Inconsistent shape of gm and pcm."
   1217 
   1218     # check that both matrices are binary
   1219     assert ((gm==0) | (gm==1)).all(), "gm is not binary."
   1220     assert ((pcm==0) | (pcm==1)).all(), "pcm is not binary."
   1221 
   1222     # check for zero syndrome
   1223     s = np.mod(np.matmul(pcm, np.transpose(gm)), 2) # mod2 to account for GF(2)
   1224     return np.sum(s)==0 # Check for Non-zero syndrom of H*G'
   1225 
   1226 def generate_reg_ldpc(v, c, n, allow_flex_len=True, verbose=True):
   1227     r"""Generate random regular (v,c) LDPC codes.
   1228 
   1229     This functions generates a random LDPC parity-check matrix of length ``n``
   1230     where each variable node (VN) has degree ``v`` and each check node (CN) has
   1231     degree ``c``. Please note that the LDPC code is not optimized to avoid
   1232     short cycles and the resulting codes may show a non-negligible error-floor.
   1233     For encoding, the :class:`~sionna.fec.utils.LinearEncoder` layer can be
   1234     used, however, the construction does not guarantee that the pcm has full
   1235     rank.
   1236 
   1237     Input
   1238     -----
   1239     v : int
   1240         Desired variable node (VN) degree.
   1241 
   1242     c : int
   1243         Desired check node (CN) degree.
   1244 
   1245     n : int
   1246         Desired codeword length.
   1247 
   1248     allow_flex_len: bool
   1249         Defaults to True. If True, the resulting codeword length can be
   1250         (slightly) increased.
   1251 
   1252     verbose : bool
   1253         Defaults to True. If True, code parameters are printed.
   1254 
   1255     Output
   1256     ------
   1257     (pcm, k, n, coderate):
   1258         Tuple:
   1259 
   1260     pcm: ndarray
   1261         NumPy array of shape `[n-k, n]` containing the parity-check matrix.
   1262 
   1263     k: int
   1264         Number of information bits per codeword.
   1265 
   1266     n: int
   1267         Number of codewords bits.
   1268 
   1269     coderate: float
   1270         Coderate of the code.
   1271 
   1272 
   1273     Note
   1274     ----
   1275     This algorithm works only for regular node degrees. For state-of-the-art
   1276     bit-error-rate performance, usually one needs to optimize irregular degree
   1277     profiles (see [tenBrink]_).
   1278     """
   1279 
   1280     # check input values for consistency
   1281     assert isinstance(allow_flex_len, bool), \
   1282                                     'allow_flex_len must be bool.'
   1283 
   1284     # allow slight change in n to keep num edges
   1285     # from CN and VN perspective an integer
   1286     if allow_flex_len:
   1287         for n_mod in range(n, n+2*c):
   1288             if np.mod((v/c) * n_mod, 1.)==0:
   1289                 n = n_mod
   1290                 if verbose:
   1291                     print("Setting n to: ", n)
   1292                 break
   1293 
   1294     # calculate number of nodes
   1295     coderate = 1 - (v/c)
   1296     n_v = n
   1297     n_c = int((v/c) * n)
   1298     k = n_v - n_c
   1299 
   1300     # generate sockets
   1301     v_socks = np.tile(np.arange(n_v),v)
   1302     c_socks = np.tile(np.arange(n_c),c)
   1303     if verbose:
   1304         print("Number of edges (VN perspective): ", len(v_socks))
   1305         print("Number of edges (CN perspective): ", len(c_socks))
   1306     assert len(v_socks) == len(c_socks), "Number of edges from VN and CN " \
   1307         "perspective does not match. Consider to (slightly) change n."
   1308 
   1309     # apply random permutations
   1310     np.random.shuffle(v_socks)
   1311     np.random.shuffle(c_socks)
   1312 
   1313     # and generate matrix
   1314     pcm = np.zeros([n_c, n_v])
   1315 
   1316     idx = 0
   1317     shuffle_max = 200 # stop if no success
   1318     shuffle_counter = 0
   1319     cont = True
   1320     while cont:
   1321         # if edge is available, take it
   1322         if pcm[c_socks[idx],v_socks[idx]]==0:
   1323             pcm[c_socks[idx],v_socks[idx]] = 1
   1324             idx +=1 # and go to next socket
   1325             shuffle_counter = 0 # reset counter
   1326             if idx==len(v_socks):
   1327                 cont = False
   1328         else: # shuffle sockets
   1329             shuffle_counter+=1
   1330             if shuffle_counter<shuffle_max:
   1331                 np.random.shuffle(v_socks[idx:])
   1332                 np.random.shuffle(c_socks[idx:])
   1333             else:
   1334                 print("Stopping - no solution found!")
   1335                 cont=False
   1336 
   1337     v_deg = np.sum(pcm, axis=0)
   1338     c_deg = np.sum(pcm, axis=1)
   1339 
   1340     assert((v_deg==v).all()), "VN degree not always v."
   1341     assert((c_deg==c).all()), "CN degree not always c."
   1342 
   1343     if verbose:
   1344         print(f"Generated regular ({v},{c}) LDPC code of length n={n}")
   1345         print(f"Code rate is r={coderate:.3f}.")
   1346         plt.spy(pcm)
   1347 
   1348     return pcm, k, n, coderate
   1349 
   1350 
   1351 def int_mod_2(x):
   1352     r"""Efficient implementation of modulo 2 operation for integer inputs.
   1353 
   1354     This function assumes integer inputs or implicitly casts to int.
   1355 
   1356     Remark: the function `tf.math.mod(x, 2)` is placed on the CPU and, thus,
   1357     causes unnecessary memory copies.
   1358 
   1359     Parameters
   1360     ----------
   1361     x: tf.Tensor
   1362         Tensor to which the modulo 2 operation is applied.
   1363 
   1364     """
   1365 
   1366     x_int32 = tf.cast(x, tf.int32)
   1367     y_int32 = tf.bitwise.bitwise_and(x_int32, tf.constant(1, tf.int32))
   1368     return tf.cast(y_int32, x.dtype)
   1369 
   1370 
   1371 ###########################################################
   1372 # Deprecated aliases that will not be included in the next
   1373 # major release
   1374 ###########################################################
   1375 
   1376 # ignore invalid name as this is required for legacy reasons
   1377 # pylint: disable=C0103
   1378 def LinearEncoder(enc_mat,
   1379                   is_pcm=False,
   1380                   dtype=tf.float32,
   1381                   **kwargs):
   1382     # defer import as circular dependency is generated otherwise
   1383     from sionna.fec.linear import LinearEncoder as LE # pylint: disable=C0415
   1384     print("Warning: The alias fec.utils.LinearEncoder will not be included in "\
   1385           "Sionna 1.0. Please use fec.linear.LinearEncoder instead.")
   1386     return LE(enc_mat=enc_mat,
   1387                              is_pcm=is_pcm,
   1388                              dtype=dtype,
   1389                              **kwargs)
   1390 
   1391 # Scrambling has been refactored and c_init is now directly calculated during
   1392 # the initialization of the Scrambler
   1393 def generate_prng_seq(length, n_rnti=None, n_id=None, c_init=None):
   1394 
   1395     print("Warning: The alias sionna.fec.utils.generate_prng_seq will not be " \
   1396           "included in Sionna 1.0. Please use " \
   1397           "sionna.nr.utils.generate_prng_seq instead.")
   1398 
   1399     # n_rnti and n_id have been integrated in the TB5GScrambler
   1400     assert (n_rnti is None), \
   1401         "The parameter n_rnti is deprecated and has been removed. Please " \
   1402         "refer to the TB5GScrambler for the Scrambler initialization."
   1403     assert (n_id is None), \
   1404         "The parameter n_id is deprecated and has been removed. Please " \
   1405         "refer to the TB5GScrambler for the Scrambler initialization."
   1406 
   1407     return generate_prng_seq_utils(length, c_init=c_init)