utils.py (9559B)
1 # 2 # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 # SPDX-License-Identifier: Apache-2.0 4 # 5 """Layer for utility functions needed for Turbo Codes.""" 6 7 import math 8 import numpy as np 9 import tensorflow as tf 10 11 def polynomial_selector(constraint_length): 12 r"""Returns the generator polynomials for rate-1/2 convolutional codes 13 for a given ``constraint_length``. 14 15 Input 16 ----- 17 constraint_length: int 18 An integer defining the desired constraint length of the encoder. 19 The memory of the encoder is ``constraint_length`` - 1. 20 21 Output 22 ------ 23 gen_poly: tuple 24 Tuple of strings with each string being a 0,1 sequence where 25 each polynomial is represented in binary form. 26 27 Note 28 ---- 29 Please note that the polynomials are optimized for rsc codes and are 30 not necessarily the same as used in the polynomial selector 31 :class:`~sionna.fec.conv.utils.polynomial_selector` of the 32 convolutional codes. 33 """ 34 assert(isinstance(constraint_length, int)),\ 35 "constraint_length must be int." 36 assert(2 < constraint_length < 7),\ 37 "Unsupported constraint_length." 38 39 gen_poly_dict = { 40 3: ('111', '101'), # (7, 5) 41 4: ('1011', '1101'), # (13, 15) 42 5: ('10011','11011'), # (23, 33) 43 6: ('111101', '101011'), # (75, 53) 44 } 45 gen_poly = gen_poly_dict[constraint_length] 46 return gen_poly 47 48 49 def puncture_pattern(turbo_coderate, conv_coderate): 50 r"""This method returns puncturing pattern such that the 51 Turbo code has rate ``turbo_coderate`` given the underlying 52 convolutional encoder is of rate ``conv_coderate``. 53 54 Input 55 ----- 56 turbo_coderate: float 57 Desired coderate of the Turbo code 58 59 conv_coderate: float 60 Coderate of the underlying convolutional encoder 61 62 Output 63 ------ 64 : tf.bool 65 2D tensor indicating the positions to be punctured. 66 """ 67 tf.debugging.assert_equal(conv_coderate, 1/2) 68 if turbo_coderate == 1/2: 69 pattern = [[1, 1, 0], [1, 0, 1]] 70 elif turbo_coderate == 1/3: 71 pattern = [[1, 1, 1]] 72 else: 73 raise ValueError("Unsupported coderate!") 74 75 turbo_punct_pattern = tf.convert_to_tensor( 76 np.asarray(pattern), dtype=bool) 77 return turbo_punct_pattern 78 79 80 class TurboTermination(object): 81 # pylint: disable=line-too-long 82 r"""TurboTermination(constraint_length, conv_n=2, num_conv_encs=2, num_bit_streams=3) 83 84 Termination object, handles the transformation of termination bits from 85 the convolutional encoders to a Turbo codeword. Similarly, it handles the 86 transformation of channel symbols corresponding to the termination of a 87 Turbo codeword to the underlying convolutional codewords. 88 89 Parameters 90 ---------- 91 constraint_length: int 92 Constraint length of the convolutional encoder used in the Turbo code. 93 Note that the memory of the encoder is ``constraint_length`` - 1. 94 95 conv_n: int 96 Number of output bits for one state transition in the underlying 97 convolutional encoder 98 99 num_conv_encs: int 100 Number of parallel convolutional encoders used in the Turbo code 101 102 num_bit_streams: int 103 Number of output bit streams from Turbo code 104 """ 105 106 def __init__(self, 107 constraint_length, 108 conv_n=2, 109 num_conv_encs=2, 110 num_bitstreams=3): 111 tf.debugging.assert_type(constraint_length, tf.int32) 112 tf.debugging.assert_type(conv_n, tf.int32) 113 tf.debugging.assert_type(num_conv_encs, tf.int32) 114 tf.debugging.assert_type(num_bitstreams, tf.int32) 115 116 self.mu_ = constraint_length - 1 117 self.conv_n = conv_n 118 tf.debugging.assert_equal(num_conv_encs, 2) 119 self.num_conv_encs = num_conv_encs 120 self.num_bitstreams = num_bitstreams 121 122 def get_num_term_syms(self): 123 r""" 124 Computes the number of termination symbols for the Turbo 125 code based on the underlying convolutional code parameters, 126 primarily the memory :math:`\mu`. 127 Note that it is assumed that one Turbo symbol implies 128 ``num_bitstreams`` bits. 129 130 Input 131 ----- 132 None 133 134 Output 135 ------ 136 turbo_term_syms: int 137 Total number of termination symbols for the Turbo Code. One 138 symbol equals ``num_bitstreams`` bits. 139 """ 140 total_term_bits = self.conv_n * self. num_conv_encs * self.mu_ 141 turbo_term_syms = math.ceil(total_term_bits/self.num_bitstreams) 142 return turbo_term_syms 143 144 def termbits_conv2turbo(self, term_bits1, term_bits2): 145 # pylint: disable=line-too-long 146 r""" 147 This method merges ``term_bits1`` and ``term_bits2``, termination 148 bit streams from the two convolutional encoders, to a bit stream 149 corresponding to the Turbo codeword. 150 151 Let ``term_bits1`` and ``term_bits2`` be: 152 153 :math:`[x_1(K), z_1(K), x_1(K+1), z_1(K+1),..., x_1(K+\mu-1),z_1(K+\mu-1)]` 154 155 :math:`[x_2(K), z_2(K), x_2(K+1), z_2(K+1),..., x_2(K+\mu-1), z_2(K+\mu-1)]` 156 157 where :math:`x_i, z_i` are the systematic and parity bit streams 158 respectively for a rate-1/2 convolutional encoder i, for i = 1, 2. 159 160 In the example output below, we assume :math:`\mu=4` to demonstrate zero 161 padding at the end. Zero padding is done such that the total length is 162 divisible by ``num_bitstreams`` (defaults to 3) which is the number of 163 Turbo bit streams. 164 165 Assume ``num_bitstreams`` = 3. Then number of termination symbols for 166 the TurboEncoder is :math:`\lceil \frac{2*conv\_n*\mu}{3} \rceil`: 167 168 :math:`[x_1(K), z_1(K), x_1(K+1)]` 169 170 :math:`[z_1(K+1), x_1(K+2, z_1(K+2)]` 171 172 :math:`[x_1(K+3), z_1(K+3), x_2(K)]` 173 174 :math:`[z_2(K), x_2(K+1), z_2(K+1)]` 175 176 :math:`[x_2(K+2), z_2(K+2), x_2(K+3)]` 177 178 :math:`[z_2(K+3), 0, 0]` 179 180 Therefore, the output from this method is a single dimension vector 181 where all Turbo symbols are concatenated together. 182 183 :math:`[x_1(K), z_1(K), x_1(K+1), z_1(K+1), x_1(K+2, z_1(K+2), x_1(K+3),` 184 185 :math:`z_1(K+3), x_2(K),z_2(K), x_2(K+1), z_2(K+1), x_2(K+2), z_2(K+2),` 186 187 :math:`x_2(K+3), z_2(K+3), 0, 0]` 188 189 Input 190 ----- 191 term_bits1: tf.int32 192 2+D Tensor containing termination bits from convolutional encoder 1 193 194 term_bits2: tf.int32 195 2+D Tensor containing termination bits from convolutional encoder 2 196 197 Output 198 ------ 199 : tf.int32 200 1+D tensor of termination bits. The output is obtained by 201 concatenating the inputs and then adding right zero-padding if 202 needed. 203 """ 204 term_bits = tf.concat([term_bits1, term_bits2],axis=-1) 205 206 num_term_bits = term_bits.get_shape()[-1] 207 num_term_syms = math.ceil(num_term_bits/self.num_bitstreams) 208 209 extra_bits = self.num_bitstreams*num_term_syms - num_term_bits 210 if extra_bits > 0: 211 zer_shape = tf.stack([tf.shape(term_bits)[0], 212 tf.constant(extra_bits)], 213 axis=0) 214 term_bits = tf.concat( 215 [term_bits, tf.zeros(zer_shape, tf.float32)], axis=-1) 216 return term_bits 217 218 def term_bits_turbo2conv(self, term_bits): 219 # pylint: disable=line-too-long 220 r""" 221 This method splits the termination symbols from a Turbo codeword 222 to the termination symbols corresponding to the two convolutional 223 encoders, respectively. 224 225 Let's assume :math:`\mu=4` and the underlying convolutional encoders 226 are systematic and rate-1/2, for demonstration purposes. 227 228 Let ``term_bits`` tensor, corresponding to the termination symbols of 229 the Turbo codeword be as following: 230 231 :math:`y = [x_1(K), z_1(K), x_1(K+1), z_1(K+1), x_1(K+2), z_1(K+2)`, 232 :math:`x_1(K+3), z_1(K+3), x_2(K), z_2(K), x_2(K+1), z_2(K+1),` 233 :math:`x_2(K+2), z_2(K+2), x_2(K+3), z_2(K+3), 0, 0]` 234 235 The two termination tensors corresponding to the convolutional encoders 236 are: 237 :math:`y[0,..., 2\mu]`, :math:`y[2\mu,..., 4\mu]`. The output from this method is a tuple of two tensors, each of 238 size :math:`2\mu` and shape :math:`[\mu,2]`. 239 240 :math:`[[x_1(K), z_1(K)]`, 241 242 :math:`[x_1(K+1), z_1(K+1)]`, 243 244 :math:`[x_1(K+2, z_1(K+2)]`, 245 246 :math:`[x_1(K+3), z_1(K+3)]]` 247 248 and 249 250 :math:`[[x_2(K), z_2(K)],` 251 252 :math:`[x_2(K+1), z_2(K+1)]`, 253 254 :math:`[x_2(K+2), z_2(K+2)]`, 255 256 :math:`[x_2(K+3), z_2(K+3)]]` 257 258 Input 259 ----- 260 term_bits: tf.float32 261 Channel output of the Turbo codeword, corresponding to the 262 termination part 263 264 Output 265 ------ 266 : tf.float32 267 Two tensors of channel outputs, corresponding to encoders 1 and 2, 268 respectively 269 """ 270 input_len = tf.shape(term_bits)[-1] 271 divisible = tf.math.floormod(input_len, self.num_bitstreams) 272 tf.assert_equal(divisible, 0, 'Programming Error.') 273 274 enc1_term_idx = tf.range(0, self.conv_n*self.mu_) 275 enc2_term_idx = tf.range(self.conv_n*self.mu_, 2*self.conv_n*self.mu_) 276 277 term_bits1 = tf.gather(term_bits, enc1_term_idx, axis=-1) 278 term_bits2 = tf.gather(term_bits, enc2_term_idx, axis=-1) 279 280 return term_bits1, term_bits2 281