rayleigh_block_fading.py (4229B)
1 # 2 # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 # SPDX-License-Identifier: Apache-2.0 4 # 5 """Class for simulating Rayleigh block fading""" 6 7 import tensorflow as tf 8 from sionna import config 9 from . import ChannelModel 10 11 class RayleighBlockFading(ChannelModel): 12 # pylint: disable=line-too-long 13 r"""RayleighBlockFading(num_rx, num_rx_ant, num_tx, num_tx_ant, dtype=tf.complex64) 14 15 Generate channel impulse responses corresponding to a Rayleigh block 16 fading channel model. 17 18 The channel impulse responses generated are formed of a single path with 19 zero delay and a normally distributed fading coefficient. 20 All time steps of a batch example share the same channel coefficient 21 (block fading). 22 23 This class can be used in conjunction with the classes that simulate the 24 channel response in time or frequency domain, i.e., 25 :class:`~sionna.channel.OFDMChannel`, 26 :class:`~sionna.channel.TimeChannel`, 27 :class:`~sionna.channel.GenerateOFDMChannel`, 28 :class:`~sionna.channel.ApplyOFDMChannel`, 29 :class:`~sionna.channel.GenerateTimeChannel`, 30 :class:`~sionna.channel.ApplyTimeChannel`. 31 32 Parameters 33 ---------- 34 35 num_rx : int 36 Number of receivers (:math:`N_R`) 37 38 num_rx_ant : int 39 Number of antennas per receiver (:math:`N_{RA}`) 40 41 num_tx : int 42 Number of transmitters (:math:`N_T`) 43 44 num_tx_ant : int 45 Number of antennas per transmitter (:math:`N_{TA}`) 46 47 dtype : tf.DType 48 Complex datatype to use for internal processing and output. 49 Defaults to `tf.complex64`. 50 51 Input 52 ----- 53 batch_size : int 54 Batch size 55 56 num_time_steps : int 57 Number of time steps 58 59 Output 60 ------- 61 a : [batch size, num_rx, num_rx_ant, num_tx, num_tx_ant, num_paths = 1, num_time_steps], tf.complex 62 Path coefficients 63 64 tau : [batch size, num_rx, num_tx, num_paths = 1], tf.float 65 Path delays [s] 66 """ 67 68 def __init__( self, 69 num_rx, 70 num_rx_ant, 71 num_tx, 72 num_tx_ant, 73 dtype=tf.complex64): 74 75 assert dtype.is_complex, "'dtype' must be complex type" 76 self._dtype = dtype 77 78 # We don't set these attributes as private so that the user can update 79 # them 80 self.num_tx = num_tx 81 self.num_tx_ant = num_tx_ant 82 self.num_rx = num_rx 83 self.num_rx_ant = num_rx_ant 84 85 def __call__(self, batch_size, num_time_steps, sampling_frequency=None): 86 87 # Delays 88 # Single path with zero delay 89 delays = tf.zeros([ batch_size, 90 self.num_rx, 91 self.num_tx, 92 1], # Single path 93 dtype=self._dtype.real_dtype) 94 95 # Fading coefficients 96 std = tf.cast(tf.sqrt(0.5), dtype=self._dtype.real_dtype) 97 h_real = config.tf_rng.normal(shape=[batch_size, 98 self.num_rx, 99 self.num_rx_ant, 100 self.num_tx, 101 self.num_tx_ant, 102 1, # One path 103 1], # Same response over the block 104 stddev=std, 105 dtype = self._dtype.real_dtype) 106 h_img = config.tf_rng.normal(shape=[batch_size, 107 self.num_rx, 108 self.num_rx_ant, 109 self.num_tx, 110 self.num_tx_ant, 111 1, # One cluster 112 1], # Same response over the block 113 stddev=std, 114 dtype = self._dtype.real_dtype) 115 h = tf.complex(h_real, h_img) 116 # Tile the response over the block 117 h = tf.tile(h, [1, 1, 1, 1, 1, 1, num_time_steps]) 118 return h, delays