00001 00030 #include <itpp/comm/convcode.h> 00031 #include <itpp/base/binary.h> 00032 #include <itpp/base/matfunc.h> 00033 #include <limits> 00034 00035 namespace itpp 00036 { 00037 00038 // ----------------- Protected functions ----------------------------- 00039 00040 /* 00041 The weight of the transition from given state with the input given 00042 */ 00043 int Convolutional_Code::weight(const int state, const int input) 00044 { 00045 int i, j, temp, out, w = 0, shiftreg = state; 00046 00047 shiftreg = shiftreg | (input << m); 00048 for (j = 0; j < n; j++) { 00049 out = 0; 00050 temp = shiftreg & gen_pol(j); 00051 for (i = 0; i < K; i++) { 00052 out ^= (temp & 1); 00053 temp = temp >> 1; 00054 } 00055 w += out; 00056 //w += weight_int_table(temp); 00057 } 00058 return w; 00059 } 00060 00061 /* 00062 The weight (of the reverse code) of the transition from given state with 00063 the input given 00064 */ 00065 int Convolutional_Code::weight_reverse(const int state, const int input) 00066 { 00067 int i, j, temp, out, w = 0, shiftreg = state; 00068 00069 shiftreg = shiftreg | (input << m); 00070 for (j = 0; j < n; j++) { 00071 out = 0; 00072 temp = shiftreg & gen_pol_rev(j); 00073 for (i = 0; i < K; i++) { 00074 out ^= (temp & 1); 00075 temp = temp >> 1; 00076 } 00077 w += out; 00078 //w += weight_int_table(temp); 00079 } 00080 return w; 00081 } 00082 00083 /* 00084 The weight of the two paths (input 0 or 1) from given state 00085 */ 00086 void Convolutional_Code::weight(const int state, int &w0, int &w1) 00087 { 00088 int i, j, temp, out, shiftreg = state; 00089 w0 = 0; 00090 w1 = 0; 00091 00092 shiftreg = shiftreg | (1 << m); 00093 for (j = 0; j < n; j++) { 00094 out = 0; 00095 temp = shiftreg & gen_pol(j); 00096 00097 for (i = 0; i < m; i++) { 00098 out ^= (temp & 1); 00099 temp = temp >> 1; 00100 } 00101 w0 += out; 00102 w1 += out ^(temp & 1); 00103 } 00104 } 00105 00106 /* 00107 The weight (of the reverse code) of the two paths (input 0 or 1) from 00108 given state 00109 */ 00110 void Convolutional_Code::weight_reverse(const int state, int &w0, int &w1) 00111 { 00112 int i, j, temp, out, shiftreg = state; 00113 w0 = 0; 00114 w1 = 0; 00115 00116 shiftreg = shiftreg | (1 << m); 00117 for (j = 0; j < n; j++) { 00118 out = 0; 00119 temp = shiftreg & gen_pol_rev(j); 00120 00121 for (i = 0; i < m; i++) { 00122 out ^= (temp & 1); 00123 temp = temp >> 1; 00124 } 00125 w0 += out; 00126 w1 += out ^(temp & 1); 00127 } 00128 } 00129 00130 /* 00131 Output on transition (backwards) with input from state 00132 */ 00133 bvec Convolutional_Code::output_reverse(const int state, const int input) 00134 { 00135 int j, temp, temp_state; 00136 00137 bvec output(n); 00138 00139 temp_state = (state << 1) | input; 00140 for (j = 0; j < n; j++) { 00141 temp = temp_state & gen_pol(j); 00142 output(j) = xor_int_table(temp); 00143 } 00144 00145 return output; 00146 } 00147 00148 /* 00149 Output on transition (backwards) with input from state 00150 */ 00151 void Convolutional_Code::output_reverse(const int state, bvec &zero_output, 00152 bvec &one_output) 00153 { 00154 int j, temp, temp_state; 00155 bin one_bit; 00156 00157 temp_state = (state << 1) | 1; 00158 for (j = 0; j < n; j++) { 00159 temp = temp_state & gen_pol(j); 00160 one_bit = temp & 1; 00161 temp = temp >> 1; 00162 one_output(j) = xor_int_table(temp) ^ one_bit; 00163 zero_output(j) = xor_int_table(temp); 00164 } 00165 } 00166 00167 /* 00168 Output on transition (backwards) with input from state 00169 */ 00170 void Convolutional_Code::output_reverse(const int state, int &zero_output, 00171 int &one_output) 00172 { 00173 int j, temp, temp_state; 00174 bin one_bit; 00175 00176 zero_output = 0, one_output = 0; 00177 temp_state = (state << 1) | 1; 00178 for (j = 0; j < n; j++) { 00179 temp = temp_state & gen_pol(j); 00180 one_bit = temp & 1; 00181 temp = temp >> 1; 00182 00183 one_output = (one_output << 1) | int(xor_int_table(temp) ^ one_bit); 00184 zero_output = (zero_output << 1) | int(xor_int_table(temp)); 00185 } 00186 } 00187 00188 /* 00189 Output on transition (backwards) with input from state 00190 */ 00191 void Convolutional_Code::calc_metric_reverse(int state, 00192 const vec &rx_codeword, 00193 double &zero_metric, 00194 double &one_metric) 00195 { 00196 int temp; 00197 bin one_bit; 00198 one_metric = zero_metric = 0; 00199 00200 int temp_state = (state << 1) | 1; 00201 for (int j = 0; j < n; j++) { 00202 temp = temp_state & gen_pol(j); 00203 one_bit = temp & 1; 00204 temp >>= 1; 00205 one_metric += (2 * static_cast<int>(xor_int_table(temp) ^ one_bit) - 1) 00206 * rx_codeword(j); 00207 zero_metric += (2 * static_cast<int>(xor_int_table(temp)) - 1) 00208 * rx_codeword(j); 00209 } 00210 } 00211 00212 00213 // calculates metrics for all codewords (2^n of them) in natural order 00214 void Convolutional_Code::calc_metric(const vec &rx_codeword, 00215 vec &delta_metrics) 00216 { 00217 int no_outputs = pow2i(n), no_loop = pow2i(n - 1), mask = no_outputs - 1, 00218 temp; 00219 delta_metrics.set_size(no_outputs, false); 00220 00221 if (no_outputs <= no_states) { 00222 for (int i = 0; i < no_loop; i++) { // all input possibilities 00223 delta_metrics(i) = 0; 00224 temp = i; 00225 for (int j = n - 1; j >= 0; j--) { 00226 if (temp & 1) 00227 delta_metrics(i) += rx_codeword(j); 00228 else 00229 delta_metrics(i) -= rx_codeword(j); 00230 temp >>= 1; 00231 } 00232 delta_metrics(i ^ mask) = -delta_metrics(i); // the inverse codeword 00233 } 00234 } 00235 else { 00236 double zero_metric, one_metric; 00237 int zero_output, one_output, temp_state; 00238 bin one_bit; 00239 for (int s = 0; s < no_states; s++) { // all states 00240 zero_metric = 0, one_metric = 0; 00241 zero_output = 0, one_output = 0; 00242 temp_state = (s << 1) | 1; 00243 for (int j = 0; j < n; j++) { 00244 temp = temp_state & gen_pol(j); 00245 one_bit = temp & 1; 00246 temp >>= 1; 00247 if (xor_int_table(temp)) { 00248 zero_metric += rx_codeword(j); 00249 one_metric -= rx_codeword(j); 00250 } 00251 else { 00252 zero_metric -= rx_codeword(j); 00253 one_metric += rx_codeword(j); 00254 } 00255 one_output = (one_output << 1) 00256 | static_cast<int>(xor_int_table(temp) ^ one_bit); 00257 zero_output = (zero_output << 1) 00258 | static_cast<int>(xor_int_table(temp)); 00259 } 00260 delta_metrics(zero_output) = zero_metric; 00261 delta_metrics(one_output) = one_metric; 00262 } 00263 } 00264 } 00265 00267 00268 // MFD codes R=1/2 00269 int Conv_Code_MFD_2[15][2] = { 00270 {0, 0}, 00271 {0, 0}, 00272 {0, 0}, 00273 {05, 07}, 00274 {015, 017}, 00275 {023, 035}, 00276 {053, 075}, 00277 {0133, 0171}, 00278 {0247, 0371}, 00279 {0561, 0753}, 00280 {01167, 01545}, 00281 {02335, 03661}, 00282 {04335, 05723}, 00283 {010533, 017661}, 00284 {021675, 027123}, 00285 }; 00286 00287 // MFD codes R=1/3 00288 int Conv_Code_MFD_3[15][3] = { 00289 {0, 0, 0}, 00290 {0, 0, 0}, 00291 {0, 0, 0}, 00292 {05, 07, 07}, 00293 {013, 015, 017}, 00294 {025, 033, 037}, 00295 {047, 053, 075}, 00296 {0133, 0145, 0175}, 00297 {0225, 0331, 0367}, 00298 {0557, 0663, 0711}, 00299 {0117, 01365, 01633}, 00300 {02353, 02671, 03175}, 00301 {04767, 05723, 06265}, 00302 {010533, 010675, 017661}, 00303 {021645, 035661, 037133}, 00304 }; 00305 00306 // MFD codes R=1/4 00307 int Conv_Code_MFD_4[15][4] = { 00308 {0, 0, 0, 0}, 00309 {0, 0, 0, 0}, 00310 {0, 0, 0, 0}, 00311 {05, 07, 07, 07}, 00312 {013, 015, 015, 017}, 00313 {025, 027, 033, 037}, 00314 {053, 067, 071, 075}, 00315 {0135, 0135, 0147, 0163}, 00316 {0235, 0275, 0313, 0357}, 00317 {0463, 0535, 0733, 0745}, 00318 {0117, 01365, 01633, 01653}, 00319 {02327, 02353, 02671, 03175}, 00320 {04767, 05723, 06265, 07455}, 00321 {011145, 012477, 015573, 016727}, 00322 {021113, 023175, 035527, 035537}, 00323 }; 00324 00325 00326 // MFD codes R=1/5 00327 int Conv_Code_MFD_5[9][5] = { 00328 {0, 0, 0, 0, 0}, 00329 {0, 0, 0, 0, 0}, 00330 {0, 0, 0, 0, 0}, 00331 {07, 07, 07, 05, 05}, 00332 {017, 017, 013, 015, 015}, 00333 {037, 027, 033, 025, 035}, 00334 {075, 071, 073, 065, 057}, 00335 {0175, 0131, 0135, 0135, 0147}, 00336 {0257, 0233, 0323, 0271, 0357}, 00337 }; 00338 00339 // MFD codes R=1/6 00340 int Conv_Code_MFD_6[9][6] = { 00341 {0, 0, 0, 0, 0, 0}, 00342 {0, 0, 0, 0, 0, 0}, 00343 {0, 0, 0, 0, 0, 0}, 00344 {07, 07, 07, 07, 05, 05}, 00345 {017, 017, 013, 013, 015, 015}, 00346 {037, 035, 027, 033, 025, 035}, 00347 {073, 075, 055, 065, 047, 057}, 00348 {0173, 0151, 0135, 0135, 0163, 0137}, 00349 {0253, 0375, 0331, 0235, 0313, 0357}, 00350 }; 00351 00352 // MFD codes R=1/7 00353 int Conv_Code_MFD_7[9][7] = { 00354 {0, 0, 0, 0, 0, 0, 0}, 00355 {0, 0, 0, 0, 0, 0, 0}, 00356 {0, 0, 0, 0, 0, 0, 0}, 00357 {07, 07, 07, 07, 05, 05, 05}, 00358 {017, 017, 013, 013, 013, 015, 015}, 00359 {035, 027, 025, 027, 033, 035, 037}, 00360 {053, 075, 065, 075, 047, 067, 057}, 00361 {0165, 0145, 0173, 0135, 0135, 0147, 0137}, 00362 {0275, 0253, 0375, 0331, 0235, 0313, 0357}, 00363 }; 00364 00365 // MFD codes R=1/8 00366 int Conv_Code_MFD_8[9][8] = { 00367 {0, 0, 0, 0, 0, 0, 0, 0}, 00368 {0, 0, 0, 0, 0, 0, 0, 0}, 00369 {0, 0, 0, 0, 0, 0, 0, 0}, 00370 {07, 07, 05, 05, 05, 07, 07, 07}, 00371 {017, 017, 013, 013, 013, 015, 015, 017}, 00372 {037, 033, 025, 025, 035, 033, 027, 037}, 00373 {057, 073, 051, 065, 075, 047, 067, 057}, 00374 {0153, 0111, 0165, 0173, 0135, 0135, 0147, 0137}, 00375 {0275, 0275, 0253, 0371, 0331, 0235, 0313, 0357}, 00376 }; 00377 00378 int maxK_Conv_Code_MFD[9] = {0, 0, 14, 14, 14, 8, 8, 8, 8}; 00379 00380 void get_MFD_gen_pol(int n, int K, ivec & gen) 00381 { 00382 gen.set_size(n); 00383 00384 switch (n) { 00385 case 2: // Rate 1/2 00386 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[2], "This convolutional code doesn't exist in the tables"); 00387 gen(0) = Conv_Code_MFD_2[K][0]; 00388 gen(1) = Conv_Code_MFD_2[K][1]; 00389 break; 00390 case 3: // Rate 1/3 00391 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[3], "This convolutional code doesn't exist in the tables"); 00392 gen(0) = Conv_Code_MFD_3[K][0]; 00393 gen(1) = Conv_Code_MFD_3[K][1]; 00394 gen(2) = Conv_Code_MFD_3[K][2]; 00395 break; 00396 case 4: // Rate 1/4 00397 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[4], "This convolutional code doesn't exist in the tables"); 00398 gen(0) = Conv_Code_MFD_4[K][0]; 00399 gen(1) = Conv_Code_MFD_4[K][1]; 00400 gen(2) = Conv_Code_MFD_4[K][2]; 00401 gen(3) = Conv_Code_MFD_4[K][3]; 00402 break; 00403 case 5: // Rate 1/5 00404 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[5], "This convolutional code doesn't exist in the tables"); 00405 gen(0) = Conv_Code_MFD_5[K][0]; 00406 gen(1) = Conv_Code_MFD_5[K][1]; 00407 gen(2) = Conv_Code_MFD_5[K][2]; 00408 gen(3) = Conv_Code_MFD_5[K][3]; 00409 gen(4) = Conv_Code_MFD_5[K][4]; 00410 break; 00411 case 6: // Rate 1/6 00412 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[6], "This convolutional code doesn't exist in the tables"); 00413 gen(0) = Conv_Code_MFD_6[K][0]; 00414 gen(1) = Conv_Code_MFD_6[K][1]; 00415 gen(2) = Conv_Code_MFD_6[K][2]; 00416 gen(3) = Conv_Code_MFD_6[K][3]; 00417 gen(4) = Conv_Code_MFD_6[K][4]; 00418 gen(5) = Conv_Code_MFD_6[K][5]; 00419 break; 00420 case 7: // Rate 1/7 00421 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[7], "This convolutional code doesn't exist in the tables"); 00422 gen(0) = Conv_Code_MFD_7[K][0]; 00423 gen(1) = Conv_Code_MFD_7[K][1]; 00424 gen(2) = Conv_Code_MFD_7[K][2]; 00425 gen(3) = Conv_Code_MFD_7[K][3]; 00426 gen(4) = Conv_Code_MFD_7[K][4]; 00427 gen(5) = Conv_Code_MFD_7[K][5]; 00428 gen(6) = Conv_Code_MFD_7[K][6]; 00429 break; 00430 case 8: // Rate 1/8 00431 it_assert(K >= 3 && K <= maxK_Conv_Code_MFD[8], "This convolutional code doesn't exist in the tables"); 00432 gen(0) = Conv_Code_MFD_8[K][0]; 00433 gen(1) = Conv_Code_MFD_8[K][1]; 00434 gen(2) = Conv_Code_MFD_8[K][2]; 00435 gen(3) = Conv_Code_MFD_8[K][3]; 00436 gen(4) = Conv_Code_MFD_8[K][4]; 00437 gen(5) = Conv_Code_MFD_8[K][5]; 00438 gen(6) = Conv_Code_MFD_8[K][6]; 00439 gen(7) = Conv_Code_MFD_8[K][7]; 00440 break; 00441 default: // wrong rate 00442 it_assert(false, "This convolutional code doesn't exist in the tables"); 00443 } 00444 } 00445 00446 // ODS codes R=1/2 00447 int Conv_Code_ODS_2[17][2] = { 00448 {0, 0}, 00449 {0, 0}, 00450 {0, 0}, 00451 {05, 07}, 00452 {015, 017}, 00453 {023, 035}, 00454 {053, 075}, 00455 {0133, 0171}, 00456 {0247, 0371}, 00457 {0561, 0753}, 00458 {01151, 01753}, 00459 {03345, 03613}, 00460 {05261, 07173}, 00461 {012767, 016461}, 00462 {027251, 037363}, 00463 {063057, 044735}, 00464 {0126723, 0152711}, 00465 }; 00466 00467 // ODS codes R=1/3 00468 int Conv_Code_ODS_3[14][3] = { 00469 {0, 0, 0}, 00470 {0, 0, 0}, 00471 {0, 0, 0}, 00472 {05, 07, 07}, 00473 {013, 015, 017}, 00474 {025, 033, 037}, 00475 {047, 053, 075}, 00476 {0133, 0165, 0171}, 00477 {0225, 0331, 0367}, 00478 {0575, 0623, 0727}, 00479 {01233, 01375, 01671}, 00480 {02335, 02531, 03477}, 00481 {05745, 06471, 07553}, 00482 {013261, 015167, 017451}, 00483 }; 00484 00485 // ODS codes R=1/4 00486 int Conv_Code_ODS_4[12][4] = { 00487 {0, 0, 0, 0}, 00488 {0, 0, 0, 0}, 00489 {0, 0, 0, 0}, 00490 {05, 05, 07, 07}, 00491 {013, 015, 015, 017}, 00492 {025, 027, 033, 037}, 00493 {051, 055, 067, 077}, 00494 {0117, 0127, 0155, 0171}, 00495 {0231, 0273, 0327, 0375}, 00496 {0473, 0513, 0671, 0765}, 00497 {01173, 01325, 01467, 01751}, 00498 {02565, 02747, 03311, 03723}, 00499 }; 00500 00501 int maxK_Conv_Code_ODS[5] = {0, 0, 16, 13, 11}; 00502 00503 void get_ODS_gen_pol(int n, int K, ivec & gen) 00504 { 00505 gen.set_size(n); 00506 00507 switch (n) { 00508 case 2: // Rate 1/2 00509 it_assert(K >= 3 && K <= maxK_Conv_Code_ODS[2], "This convolutional code doesn't exist in the tables"); 00510 gen(0) = Conv_Code_ODS_2[K][0]; 00511 gen(1) = Conv_Code_ODS_2[K][1]; 00512 break; 00513 case 3: // Rate 1/3 00514 it_assert(K >= 3 && K <= maxK_Conv_Code_ODS[3], "This convolutional code doesn't exist in the tables"); 00515 gen(0) = Conv_Code_ODS_3[K][0]; 00516 gen(1) = Conv_Code_ODS_3[K][1]; 00517 gen(2) = Conv_Code_ODS_3[K][2]; 00518 break; 00519 case 4: // Rate 1/4 00520 it_assert(K >= 3 && K <= maxK_Conv_Code_ODS[4], "This convolutional code doesn't exist in the tables"); 00521 gen(0) = Conv_Code_ODS_4[K][0]; 00522 gen(1) = Conv_Code_ODS_4[K][1]; 00523 gen(2) = Conv_Code_ODS_4[K][2]; 00524 gen(3) = Conv_Code_ODS_4[K][3]; 00525 break; 00526 default: // wrong rate 00527 it_assert(false, "This convolutional code doesn't exist in the tables"); 00528 } 00529 } 00530 00532 00533 // --------------- Public functions ------------------------- 00534 00535 void Convolutional_Code::set_code(const CONVOLUTIONAL_CODE_TYPE type_of_code, 00536 int inverse_rate, int constraint_length) 00537 { 00538 ivec gen; 00539 00540 if (type_of_code == MFD) 00541 get_MFD_gen_pol(inverse_rate, constraint_length, gen); 00542 else if (type_of_code == ODS) 00543 get_ODS_gen_pol(inverse_rate, constraint_length, gen); 00544 else 00545 it_assert(false, "This convolutional code doesn't exist in the tables"); 00546 00547 set_generator_polynomials(gen, constraint_length); 00548 } 00549 00550 /* 00551 Set generator polynomials. Given in Proakis integer form 00552 */ 00553 void Convolutional_Code::set_generator_polynomials(const ivec &gen, 00554 int constraint_length) 00555 { 00556 it_error_if(constraint_length <= 0, "Convolutional_Code::set_generator_polynomials(): Constraint length out of range"); 00557 gen_pol = gen; 00558 n = gen.size(); 00559 it_error_if(n <= 0, "Convolutional_Code::set_generator_polynomials(): Invalid code rate"); 00560 00561 // Generate table look-up of weight of integers of size K bits 00562 if (constraint_length != K) { 00563 K = constraint_length; 00564 xor_int_table.set_size(pow2i(K), false); 00565 for (int i = 0; i < pow2i(K); i++) { 00566 xor_int_table(i) = (weight_int(K, i) & 1); 00567 } 00568 } 00569 00570 trunc_length = 5 * K; 00571 m = K - 1; 00572 no_states = pow2i(m); 00573 gen_pol_rev.set_size(n, false); 00574 rate = 1.0 / n; 00575 00576 for (int i = 0; i < n; i++) { 00577 gen_pol_rev(i) = reverse_int(K, gen_pol(i)); 00578 } 00579 00580 int zero_output, one_output; 00581 output_reverse_int.set_size(no_states, 2, false); 00582 00583 for (int i = 0; i < no_states; i++) { 00584 output_reverse(i, zero_output, one_output); 00585 output_reverse_int(i, 0) = zero_output; 00586 output_reverse_int(i, 1) = one_output; 00587 } 00588 00589 // initialise memory structures 00590 visited_state.set_size(no_states); 00591 visited_state = false; 00592 visited_state(start_state) = true; // mark start state 00593 00594 sum_metric.set_size(no_states); 00595 sum_metric.clear(); 00596 00597 trunc_ptr = 0; 00598 trunc_state = 0; 00599 00600 } 00601 00602 // Reset encoder and decoder states 00603 void Convolutional_Code::reset() 00604 { 00605 init_encoder(); 00606 00607 visited_state = false; 00608 visited_state(start_state) = true; // mark start state 00609 00610 sum_metric.clear(); 00611 00612 trunc_ptr = 0; 00613 trunc_state = 0; 00614 } 00615 00616 00617 /* 00618 Encode a binary vector of inputs using specified method 00619 */ 00620 void Convolutional_Code::encode(const bvec &input, bvec &output) 00621 { 00622 switch (cc_method) { 00623 case Trunc: 00624 encode_trunc(input, output); 00625 break; 00626 case Tailbite: 00627 encode_tailbite(input, output); 00628 break; 00629 case Tail: 00630 default: 00631 encode_tail(input, output); 00632 break; 00633 }; 00634 } 00635 00636 /* 00637 Encode a binary vector of inputs starting from state set by the 00638 set_state function 00639 */ 00640 void Convolutional_Code::encode_trunc(const bvec &input, bvec &output) 00641 { 00642 int temp; 00643 output.set_size(input.size() * n, false); 00644 00645 for (int i = 0; i < input.size(); i++) { 00646 encoder_state |= static_cast<int>(input(i)) << m; 00647 for (int j = 0; j < n; j++) { 00648 temp = encoder_state & gen_pol(j); 00649 output(i * n + j) = xor_int_table(temp); 00650 } 00651 encoder_state >>= 1; 00652 } 00653 } 00654 00655 /* 00656 Encode a binary vector of inputs starting from zero state and also adds 00657 a tail of K-1 zeros to force the encoder into the zero state. Well 00658 suited for packet transmission. 00659 */ 00660 void Convolutional_Code::encode_tail(const bvec &input, bvec &output) 00661 { 00662 int temp; 00663 output.set_size((input.size() + m) * n, false); 00664 00665 // always start from state 0 00666 encoder_state = 0; 00667 00668 for (int i = 0; i < input.size(); i++) { 00669 encoder_state |= static_cast<int>(input(i)) << m; 00670 for (int j = 0; j < n; j++) { 00671 temp = encoder_state & gen_pol(j); 00672 output(i * n + j) = xor_int_table(temp); 00673 } 00674 encoder_state >>= 1; 00675 } 00676 00677 // add tail of m = K-1 zeros 00678 for (int i = input.size(); i < input.size() + m; i++) { 00679 for (int j = 0; j < n; j++) { 00680 temp = encoder_state & gen_pol(j); 00681 output(i * n + j) = xor_int_table(temp); 00682 } 00683 encoder_state >>= 1; 00684 } 00685 } 00686 00687 /* 00688 Encode a binary vector of inputs starting using tail biting 00689 */ 00690 void Convolutional_Code::encode_tailbite(const bvec &input, bvec &output) 00691 { 00692 int temp; 00693 output.set_size(input.size() * n, false); 00694 00695 // Set the start state equal to the end state: 00696 encoder_state = 0; 00697 bvec last_bits = input.right(m); 00698 for (int i = 0; i < m; i++) { 00699 encoder_state |= static_cast<int>(last_bits(i)) << m; 00700 encoder_state >>= 1; 00701 } 00702 00703 for (int i = 0; i < input.size(); i++) { 00704 encoder_state |= static_cast<int>(input(i)) << m; 00705 for (int j = 0; j < n; j++) { 00706 temp = encoder_state & gen_pol(j); 00707 output(i * n + j) = xor_int_table(temp); 00708 } 00709 encoder_state >>= 1; 00710 } 00711 } 00712 00713 /* 00714 Encode a binary bit starting from the internal encoder state. 00715 To initialize the encoder state use set_start_state() and init_encoder() 00716 */ 00717 void Convolutional_Code::encode_bit(const bin &input, bvec &output) 00718 { 00719 int temp; 00720 output.set_size(n, false); 00721 00722 encoder_state |= static_cast<int>(input) << m; 00723 for (int j = 0; j < n; j++) { 00724 temp = encoder_state & gen_pol(j); 00725 output(j) = xor_int_table(temp); 00726 } 00727 encoder_state >>= 1; 00728 } 00729 00730 00731 // --------------- Hard-decision decoding is not implemented ----------------- 00732 00733 void Convolutional_Code::decode(const bvec &, bvec &) 00734 { 00735 it_error("Convolutional_Code::decode(): Hard-decision decoding not implemented"); 00736 } 00737 00738 bvec Convolutional_Code::decode(const bvec &) 00739 { 00740 it_error("Convolutional_Code::decode(): Hard-decision decoding not implemented"); 00741 return bvec(); 00742 } 00743 00744 00745 /* 00746 Decode a block of encoded data using specified method 00747 */ 00748 void Convolutional_Code::decode(const vec &received_signal, bvec &output) 00749 { 00750 switch (cc_method) { 00751 case Trunc: 00752 decode_trunc(received_signal, output); 00753 break; 00754 case Tailbite: 00755 decode_tailbite(received_signal, output); 00756 break; 00757 case Tail: 00758 default: 00759 decode_tail(received_signal, output); 00760 break; 00761 }; 00762 } 00763 00764 /* 00765 Decode a block of encoded data where encode_tail has been used. 00766 Thus is assumes a decoder start state of zero and that a tail of 00767 K-1 zeros has been added. No memory truncation. 00768 */ 00769 void Convolutional_Code::decode_tail(const vec &received_signal, bvec &output) 00770 { 00771 int block_length = received_signal.size() / n; // no input symbols 00772 it_error_if(block_length - m <= 0, 00773 "Convolutional_Code::decode_tail(): Input sequence to short"); 00774 int S0, S1; 00775 vec temp_sum_metric(no_states), temp_rec(n), delta_metrics; 00776 Array<bool> temp_visited_state(no_states); 00777 double temp_metric_zero, temp_metric_one; 00778 00779 path_memory.set_size(no_states, block_length, false); 00780 output.set_size(block_length - m, false); // no tail in the output 00781 00782 // clear visited states 00783 visited_state = false; 00784 temp_visited_state = visited_state; 00785 visited_state(0) = true; // starts in the zero state 00786 00787 // clear accumulated metrics 00788 sum_metric.clear(); 00789 00790 // evolve up to m where not all states visited 00791 for (int l = 0; l < m; l++) { // all transitions including the tail 00792 temp_rec = received_signal.mid(l * n, n); 00793 00794 // calculate all metrics for all codewords at the same time 00795 calc_metric(temp_rec, delta_metrics); 00796 00797 for (int s = 0; s < no_states; s++) { // all states 00798 // S0 and S1 are the states that expanded end at state s 00799 previous_state(s, S0, S1); 00800 if (visited_state(S0)) { // expand trellis if state S0 is visited 00801 temp_metric_zero = sum_metric(S0) 00802 + delta_metrics(output_reverse_int(s, 0)); 00803 temp_visited_state(s) = true; 00804 } 00805 else { 00806 temp_metric_zero = std::numeric_limits<double>::max(); 00807 } 00808 if (visited_state(S1)) { // expand trellis if state S0 is visited 00809 temp_metric_one = sum_metric(S1) 00810 + delta_metrics(output_reverse_int(s, 1)); 00811 temp_visited_state(s) = true; 00812 } 00813 else { 00814 temp_metric_one = std::numeric_limits<double>::max(); 00815 } 00816 if (temp_metric_zero < temp_metric_one) { // path zero survives 00817 temp_sum_metric(s) = temp_metric_zero; 00818 path_memory(s, l) = 0; 00819 } 00820 else { // path one survives 00821 temp_sum_metric(s) = temp_metric_one; 00822 path_memory(s, l) = 1; 00823 } 00824 } // all states, s 00825 sum_metric = temp_sum_metric; 00826 visited_state = temp_visited_state; 00827 } // all transitions, l 00828 00829 // evolve from m and to the end of the block 00830 for (int l = m; l < block_length; l++) { // all transitions except the tail 00831 temp_rec = received_signal.mid(l * n, n); 00832 00833 // calculate all metrics for all codewords at the same time 00834 calc_metric(temp_rec, delta_metrics); 00835 00836 for (int s = 0; s < no_states; s++) { // all states 00837 // S0 and S1 are the states that expanded end at state s 00838 previous_state(s, S0, S1); 00839 temp_metric_zero = sum_metric(S0) 00840 + delta_metrics(output_reverse_int(s, 0)); 00841 temp_metric_one = sum_metric(S1) 00842 + delta_metrics(output_reverse_int(s, 1)); 00843 if (temp_metric_zero < temp_metric_one) { // path zero survives 00844 temp_sum_metric(s) = temp_metric_zero; 00845 path_memory(s, l) = 0; 00846 } 00847 else { // path one survives 00848 temp_sum_metric(s) = temp_metric_one; 00849 path_memory(s, l) = 1; 00850 } 00851 } // all states, s 00852 sum_metric = temp_sum_metric; 00853 } // all transitions, l 00854 00855 // minimum metric is the zeroth state due to the tail 00856 int min_metric_state = 0; 00857 // trace back to remove tail of zeros 00858 for (int l = block_length - 1; l > block_length - 1 - m; l--) { 00859 // previous state calculation 00860 min_metric_state = previous_state(min_metric_state, 00861 path_memory(min_metric_state, l)); 00862 } 00863 00864 // trace back to calculate output sequence 00865 for (int l = block_length - 1 - m; l >= 0; l--) { 00866 output(l) = get_input(min_metric_state); 00867 // previous state calculation 00868 min_metric_state = previous_state(min_metric_state, 00869 path_memory(min_metric_state, l)); 00870 } 00871 } 00872 00873 00874 /* 00875 Decode a block of encoded data where encode_tailbite has been used. 00876 */ 00877 void Convolutional_Code::decode_tailbite(const vec &received_signal, 00878 bvec &output) 00879 { 00880 int block_length = received_signal.size() / n; // no input symbols 00881 it_error_if(block_length <= 0, 00882 "Convolutional_Code::decode_tailbite(): Input sequence to short"); 00883 int S0, S1; 00884 vec temp_sum_metric(no_states), temp_rec(n), delta_metrics; 00885 Array<bool> temp_visited_state(no_states); 00886 double temp_metric_zero, temp_metric_one; 00887 double best_metric = std::numeric_limits<double>::max(); 00888 bvec best_output(block_length), temp_output(block_length); 00889 00890 path_memory.set_size(no_states, block_length, false); 00891 output.set_size(block_length, false); 00892 00893 00894 // Try all start states ss 00895 for (int ss = 0; ss < no_states; ss++) { 00896 //Clear the visited_state vector: 00897 visited_state = false; 00898 temp_visited_state = visited_state; 00899 visited_state(ss) = true; // starts in the state s 00900 00901 // clear accumulated metrics 00902 sum_metric.zeros(); 00903 00904 for (int l = 0; l < block_length; l++) { // all transitions 00905 temp_rec = received_signal.mid(l * n, n); 00906 // calculate all metrics for all codewords at the same time 00907 calc_metric(temp_rec, delta_metrics); 00908 00909 for (int s = 0; s < no_states; s++) { // all states 00910 // S0 and S1 are the states that expanded end at state s 00911 previous_state(s, S0, S1); 00912 if (visited_state(S0)) { // expand trellis if state S0 is visited 00913 temp_metric_zero = sum_metric(S0) 00914 + delta_metrics(output_reverse_int(s, 0)); 00915 temp_visited_state(s) = true; 00916 } 00917 else { 00918 temp_metric_zero = std::numeric_limits<double>::max(); 00919 } 00920 if (visited_state(S1)) { // expand trellis if state S0 is visited 00921 temp_metric_one = sum_metric(S1) 00922 + delta_metrics(output_reverse_int(s, 1)); 00923 temp_visited_state(s) = true; 00924 } 00925 else { 00926 temp_metric_one = std::numeric_limits<double>::max(); 00927 } 00928 if (temp_metric_zero < temp_metric_one) { // path zero survives 00929 temp_sum_metric(s) = temp_metric_zero; 00930 path_memory(s, l) = 0; 00931 } 00932 else { // path one survives 00933 temp_sum_metric(s) = temp_metric_one; 00934 path_memory(s, l) = 1; 00935 } 00936 } // all states, s 00937 sum_metric = temp_sum_metric; 00938 visited_state = temp_visited_state; 00939 } // all transitions, l 00940 00941 // minimum metric is the ss state due to the tailbite 00942 int min_metric_state = ss; 00943 00944 // trace back to calculate output sequence 00945 for (int l = block_length - 1; l >= 0; l--) { 00946 temp_output(l) = get_input(min_metric_state); 00947 // previous state calculation 00948 min_metric_state = previous_state(min_metric_state, 00949 path_memory(min_metric_state, l)); 00950 } 00951 if (sum_metric(ss) < best_metric) { 00952 best_metric = sum_metric(ss); 00953 best_output = temp_output; 00954 } 00955 } // all start states ss 00956 output = best_output; 00957 } 00958 00959 00960 /* 00961 Viterbi decoding using truncation of memory (default = 5*K) 00962 */ 00963 void Convolutional_Code::decode_trunc(const vec &received_signal, 00964 bvec &output) 00965 { 00966 int block_length = received_signal.size() / n; // no input symbols 00967 it_error_if(block_length <= 0, 00968 "Convolutional_Code::decode_trunc(): Input sequence to short"); 00969 int S0, S1; 00970 vec temp_sum_metric(no_states), temp_rec(n), delta_metrics; 00971 Array<bool> temp_visited_state(no_states); 00972 double temp_metric_zero, temp_metric_one; 00973 00974 path_memory.set_size(no_states, trunc_length, false); 00975 output.set_size(0); 00976 00977 // copy visited states 00978 temp_visited_state = visited_state; 00979 00980 for (int i = 0; i < block_length; i++) { 00981 // update path memory pointer 00982 trunc_ptr = (trunc_ptr + 1) % trunc_length; 00983 00984 temp_rec = received_signal.mid(i * n, n); 00985 // calculate all metrics for all codewords at the same time 00986 calc_metric(temp_rec, delta_metrics); 00987 00988 for (int s = 0; s < no_states; s++) { // all states 00989 // the states that expanded end at state s 00990 previous_state(s, S0, S1); 00991 if (visited_state(S0)) { // expand trellis if state S0 is visited 00992 temp_metric_zero = sum_metric(S0) 00993 + delta_metrics(output_reverse_int(s, 0)); 00994 temp_visited_state(s) = true; 00995 } 00996 else { 00997 temp_metric_zero = std::numeric_limits<double>::max(); 00998 } 00999 if (visited_state(S1)) { // expand trellis if state S0 is visited 01000 temp_metric_one = sum_metric(S1) 01001 + delta_metrics(output_reverse_int(s, 1)); 01002 temp_visited_state(s) = true; 01003 } 01004 else { 01005 temp_metric_one = std::numeric_limits<double>::max(); 01006 } 01007 if (temp_metric_zero < temp_metric_one) { // path zero survives 01008 temp_sum_metric(s) = temp_metric_zero; 01009 path_memory(s, trunc_ptr) = 0; 01010 } 01011 else { // path one survives 01012 temp_sum_metric(s) = temp_metric_one; 01013 path_memory(s, trunc_ptr) = 1; 01014 } 01015 } // all states, s 01016 sum_metric = temp_sum_metric; 01017 visited_state = temp_visited_state; 01018 01019 // find minimum metric 01020 int min_metric_state = min_index(sum_metric); 01021 01022 // normalise accumulated metrics 01023 sum_metric -= sum_metric(min_metric_state); 01024 01025 // check if we had enough metrics to generate output 01026 if (trunc_state >= trunc_length) { 01027 // trace back to calculate the output symbol 01028 for (int j = trunc_length; j > 0; j--) { 01029 // previous state calculation 01030 min_metric_state = 01031 previous_state(min_metric_state, 01032 path_memory(min_metric_state, 01033 (trunc_ptr + j) % trunc_length)); 01034 } 01035 output.ins(output.size(), get_input(min_metric_state)); 01036 } 01037 else { // if not increment trunc_state counter 01038 trunc_state++; 01039 } 01040 } // end for (int i = 0; i < block_length; l++) 01041 } 01042 01043 01044 /* 01045 Calculate the inverse sequence 01046 01047 Assumes that encode_tail is used in the encoding process. Returns false 01048 if there is an error in the coded sequence (not a valid codeword). Do 01049 not check that the tail forces the encoder into the zeroth state. 01050 */ 01051 bool Convolutional_Code::inverse_tail(const bvec coded_sequence, bvec &input) 01052 { 01053 int state = 0, zero_state, one_state, zero_temp, one_temp, i, j; 01054 bvec zero_output(n), one_output(n); 01055 01056 int block_length = coded_sequence.size() / n - m; // no input symbols 01057 it_error_if(block_length <= 0, "The input sequence is to short"); 01058 input.set_length(block_length, false); // not include the tail in the output 01059 01060 01061 for (i = 0; i < block_length; i++) { 01062 zero_state = state; 01063 one_state = state | (1 << m); 01064 for (j = 0; j < n; j++) { 01065 zero_temp = zero_state & gen_pol(j); 01066 one_temp = one_state & gen_pol(j); 01067 zero_output(j) = xor_int_table(zero_temp); 01068 one_output(j) = xor_int_table(one_temp); 01069 } 01070 if (coded_sequence.mid(i*n, n) == zero_output) { 01071 input(i) = bin(0); 01072 state = zero_state >> 1; 01073 } 01074 else if (coded_sequence.mid(i*n, n) == one_output) { 01075 input(i) = bin(1); 01076 state = one_state >> 1; 01077 } 01078 else 01079 return false; 01080 } 01081 01082 return true; 01083 } 01084 01085 /* 01086 Check if catastrophic. Returns true if catastrophic 01087 */ 01088 bool Convolutional_Code::catastrophic(void) 01089 { 01090 int start, S, W0, W1, S0, S1; 01091 bvec visited(1 << m); 01092 01093 for (start = 1; start < no_states; start++) { 01094 visited.zeros(); 01095 S = start; 01096 visited(S) = 1; 01097 01098 node1: 01099 S0 = next_state(S, 0); 01100 S1 = next_state(S, 1); 01101 weight(S, W0, W1); 01102 if (S1 < start) goto node0; 01103 if (W1 > 0) goto node0; 01104 01105 if (visited(S1) == bin(1)) 01106 return true; 01107 S = S1; // goto node1 01108 visited(S) = 1; 01109 goto node1; 01110 01111 node0: 01112 if (S0 < start) continue; // goto end; 01113 if (W0 > 0) continue; // goto end; 01114 01115 if (visited(S0) == bin(1)) 01116 return true; 01117 S = S0; 01118 visited(S) = 1; 01119 goto node1; 01120 01121 //end: 01122 //void; 01123 } 01124 01125 return false; 01126 } 01127 01128 /* 01129 Calculate distance profile. If reverse = true calculate for the reverse code instead. 01130 */ 01131 void Convolutional_Code::distance_profile(ivec &dist_prof, int dmax, bool reverse) 01132 { 01133 int max_stack_size = 50000; 01134 ivec S_stack(max_stack_size), W_stack(max_stack_size), t_stack(max_stack_size); 01135 01136 int stack_pos = -1, t, S, W, W0, w0, w1; 01137 01138 dist_prof.set_size(K, false); 01139 dist_prof.zeros(); 01140 dist_prof += dmax; // just select a big number! 01141 if (reverse) 01142 W = weight_reverse(0, 1); 01143 else 01144 W = weight(0, 1); 01145 01146 S = next_state(0, 1); // first state 0 and one as input, S = 1<<(m-1); 01147 t = 0; 01148 dist_prof(0) = W; 01149 01150 node1: 01151 if (reverse) 01152 weight_reverse(S, w0, w1); 01153 else 01154 weight(S, w0, w1); 01155 01156 if (t < m) { 01157 W0 = W + w0; 01158 if (W0 < dist_prof(m)) { // store node0 (S, W0, and t+1) 01159 stack_pos++; 01160 if (stack_pos >= max_stack_size) { 01161 max_stack_size = 2 * max_stack_size; 01162 S_stack.set_size(max_stack_size, true); 01163 W_stack.set_size(max_stack_size, true); 01164 t_stack.set_size(max_stack_size, true); 01165 } 01166 01167 S_stack(stack_pos) = next_state(S, 0); //S>>1; 01168 W_stack(stack_pos) = W0; 01169 t_stack(stack_pos) = t + 1; 01170 } 01171 } 01172 else goto stack; 01173 01174 W += w1; 01175 if (W > dist_prof(m)) 01176 goto stack; 01177 01178 t++; 01179 S = next_state(S, 1); //S = (S>>1)|(1<<(m-1)); 01180 01181 if (W < dist_prof(t)) 01182 dist_prof(t) = W; 01183 01184 if (t == m) goto stack; 01185 goto node1; 01186 01187 01188 stack: 01189 if (stack_pos >= 0) { 01190 // get root node from stack 01191 S = S_stack(stack_pos); 01192 W = W_stack(stack_pos); 01193 t = t_stack(stack_pos); 01194 stack_pos--; 01195 01196 if (W < dist_prof(t)) 01197 dist_prof(t) = W; 01198 01199 if (t == m) goto stack; 01200 01201 goto node1; 01202 } 01203 } 01204 01205 /* 01206 Calculate spectrum 01207 01208 Calculates both the weight spectrum (Ad) and the information weight spectrum (Cd) and 01209 returns it as ivec:s in the 0:th and 1:st component of spectrum, respectively. Suitable 01210 for calculating many terms in the spectra (uses an breadth first algorithm). It is assumed 01211 that the code is non-catastrophic or else it is a possibility for an eternal loop. 01212 dmax = an upper bound on the free distance 01213 no_terms = no_terms including the dmax term that should be calculated 01214 */ 01215 void Convolutional_Code::calculate_spectrum(Array<ivec> &spectrum, int dmax, int no_terms) 01216 { 01217 imat Ad_states(no_states, dmax + no_terms), Cd_states(no_states, dmax + no_terms); 01218 imat Ad_temp(no_states, dmax + no_terms), Cd_temp(no_states, dmax + no_terms); 01219 ivec mindist(no_states), mindist_temp(1 << m); 01220 01221 spectrum.set_size(2); 01222 spectrum(0).set_size(dmax + no_terms, false); 01223 spectrum(1).set_size(dmax + no_terms, false); 01224 spectrum(0).zeros(); 01225 spectrum(1).zeros(); 01226 Ad_states.zeros(); 01227 Cd_states.zeros(); 01228 mindist.zeros(); 01229 int wmax = dmax + no_terms; 01230 ivec visited_states(no_states), visited_states_temp(no_states); 01231 bool proceede; 01232 int d, w0, w1, s, s0, s1; 01233 01234 visited_states.zeros(); // 0 = false 01235 s = next_state(0, 1); // Start in state from 0 with an one input. 01236 visited_states(s) = 1; // 1 = true. Start in state 2^(m-1). 01237 w1 = weight(0, 1); 01238 Ad_states(s, w1) = 1; 01239 Cd_states(s, w1) = 1; 01240 mindist(s) = w1; 01241 proceede = true; 01242 01243 while (proceede) { 01244 proceede = false; 01245 Ad_temp.zeros(); 01246 Cd_temp.zeros(); 01247 mindist_temp.zeros(); 01248 visited_states_temp.zeros(); //false 01249 for (s = 1; s < no_states; s++) { 01250 if ((mindist(s) > 0) && (mindist(s) < wmax)) { 01251 proceede = true; 01252 weight(s, w0, w1); 01253 s0 = next_state(s, 0); 01254 for (d = mindist(s); d < (wmax - w0); d++) { 01255 Ad_temp(s0, d + w0) += Ad_states(s, d); 01256 Cd_temp(s0, d + w0) += Cd_states(s, d); 01257 visited_states_temp(s0) = 1; //true 01258 } 01259 01260 s1 = next_state(s, 1); 01261 for (d = mindist(s); d < (wmax - w1); d++) { 01262 Ad_temp(s1, d + w1) += Ad_states(s, d); 01263 Cd_temp(s1, d + w1) += Cd_states(s, d) + Ad_states(s, d); 01264 visited_states_temp(s1) = 1; //true 01265 } 01266 if (mindist_temp(s0) > 0) { mindist_temp(s0) = (mindist(s) + w0) < mindist_temp(s0) ? mindist(s) + w0 : mindist_temp(s0); } 01267 else { mindist_temp(s0) = mindist(s) + w0; } 01268 if (mindist_temp(s1) > 0) { mindist_temp(s1) = (mindist(s) + w1) < mindist_temp(s1) ? mindist(s) + w1 : mindist_temp(s1); } 01269 else { mindist_temp(s1) = mindist(s) + w1; } 01270 01271 } 01272 } 01273 Ad_states = Ad_temp; 01274 Cd_states = Cd_temp; 01275 spectrum(0) += Ad_temp.get_row(0); 01276 spectrum(1) += Cd_temp.get_row(0); 01277 visited_states = visited_states_temp; 01278 mindist = elem_mult(mindist_temp, visited_states); 01279 } 01280 } 01281 01282 /* 01283 Cederwall's fast algorithm 01284 01285 See IT No. 6, pp. 1146-1159, Nov. 1989 for details. 01286 Calculates both the weight spectrum (Ad) and the information weight spectrum (Cd) and 01287 returns it as ivec:s in the 0:th and 1:st component of spectrum, respectively. The FAST algorithm 01288 is good for calculating only a few terms in the spectrum. If many terms are desired, use calc_spectrum instead. 01289 The algorithm returns -1 if the code tested is worse that the input dfree and Cdfree. 01290 It returns 0 if the code MAY be catastrophic (assuming that test_catastrophic is true), 01291 and returns 1 if everything went right. 01292 01293 \arg \c dfree the free distance of the code (or an upper bound) 01294 \arg \c no_terms including the dfree term that should be calculated 01295 \ar \c Cdfree is the best value of information weight spectrum found so far 01296 */ 01297 int Convolutional_Code::fast(Array<ivec> &spectrum, const int dfree, const int no_terms, const int Cdfree, const bool test_catastrophic) 01298 { 01299 int cat_treshold = 7 * K; // just a big number, but not to big! 01300 int i; 01301 ivec dist_prof(K), dist_prof_rev(K); 01302 distance_profile(dist_prof, dfree); 01303 distance_profile(dist_prof_rev, dfree, true); // for the reverse code 01304 01305 int dist_prof_rev0 = dist_prof_rev(0); 01306 01307 bool reverse = false; // false = use dist_prof 01308 01309 // is the reverse distance profile better? 01310 for (i = 0; i < K; i++) { 01311 if (dist_prof_rev(i) > dist_prof(i)) { 01312 reverse = true; 01313 dist_prof_rev0 = dist_prof(0); 01314 dist_prof = dist_prof_rev; 01315 break; 01316 } 01317 else if (dist_prof_rev(i) < dist_prof(i)) { 01318 break; 01319 } 01320 } 01321 01322 int d = dfree + no_terms - 1; 01323 int max_stack_size = 50000; 01324 ivec S_stack(max_stack_size), W_stack(max_stack_size), b_stack(max_stack_size), c_stack(max_stack_size); 01325 int stack_pos = -1; 01326 01327 // F1. 01328 int mf = 1, b = 1; 01329 spectrum.set_size(2); 01330 spectrum(0).set_size(dfree + no_terms, false); // ad 01331 spectrum(1).set_size(dfree + no_terms, false); // cd 01332 spectrum(0).zeros(); 01333 spectrum(1).zeros(); 01334 int S, S0, S1, W0, W1, w0, w1, c = 0; 01335 S = next_state(0, 1); //first state zero and one as input 01336 int W = d - dist_prof_rev0; 01337 01338 01339 F2: 01340 S0 = next_state(S, 0); 01341 S1 = next_state(S, 1); 01342 01343 if (reverse) { 01344 weight(S, w0, w1); 01345 } 01346 else { 01347 weight_reverse(S, w0, w1); 01348 } 01349 W0 = W - w0; 01350 W1 = W - w1; 01351 if (mf < m) goto F6; 01352 01353 //F3: 01354 if (W0 >= 0) { 01355 spectrum(0)(d - W0)++; 01356 spectrum(1)(d - W0) += b; 01357 // The code is worse than the best found. 01358 if (((d - W0) < dfree) || (((d - W0) == dfree) && (spectrum(1)(d - W0) > Cdfree))) 01359 return -1; 01360 } 01361 01362 01363 F4: 01364 if ((W1 < dist_prof(m - 1)) || (W < dist_prof(m))) goto F5; 01365 // select node 1 01366 if (test_catastrophic && W == W1) { 01367 c++; 01368 if (c > cat_treshold) 01369 return 0; 01370 } 01371 else { 01372 c = 0; 01373 W = W1; 01374 } 01375 01376 S = S1; 01377 mf = 1; 01378 b++; 01379 goto F2; 01380 01381 F5: 01382 //if (stack_pos == -1) return n_values; 01383 if (stack_pos == -1) goto end; 01384 // get node from stack 01385 S = S_stack(stack_pos); 01386 W = W_stack(stack_pos); 01387 b = b_stack(stack_pos); 01388 c = c_stack(stack_pos); 01389 stack_pos--; 01390 mf = 1; 01391 goto F2; 01392 01393 F6: 01394 if (W0 < dist_prof(m - mf - 1)) goto F4; 01395 01396 //F7: 01397 if ((W1 >= dist_prof(m - 1)) && (W >= dist_prof(m))) { 01398 // save node 1 01399 if (test_catastrophic && stack_pos > 10000) 01400 return 0; 01401 01402 stack_pos++; 01403 if (stack_pos >= max_stack_size) { 01404 max_stack_size = 2 * max_stack_size; 01405 S_stack.set_size(max_stack_size, true); 01406 W_stack.set_size(max_stack_size, true); 01407 b_stack.set_size(max_stack_size, true); 01408 c_stack.set_size(max_stack_size, true); 01409 } 01410 S_stack(stack_pos) = S1; 01411 W_stack(stack_pos) = W1; 01412 b_stack(stack_pos) = b + 1; // because gone to node 1 01413 c_stack(stack_pos) = c; // because gone to node 1 01414 } 01415 // select node 0 01416 S = S0; 01417 if (test_catastrophic && W == W0) { 01418 c++; 01419 if (c > cat_treshold) 01420 return 0; 01421 } 01422 else { 01423 c = 0; 01424 W = W0; 01425 } 01426 01427 01428 mf++; 01429 goto F2; 01430 01431 end: 01432 return 1; 01433 } 01434 01435 //----------- These functions should be moved into some other place ------- 01436 01440 int reverse_int(int length, int in) 01441 { 01442 int i, j, out = 0; 01443 01444 for (i = 0; i < (length >> 1); i++) { 01445 out = out | ((in & (1 << i)) << (length - 1 - (i << 1))); 01446 } 01447 for (j = 0; j < (length - i); j++) { 01448 out = out | ((in & (1 << (j + i))) >> ((j << 1) - (length & 1) + 1)); 01449 //out = out | ( (in & (1<<j+i)) >> ((j<<1)-(length&1)+1) ); old version with preecedence problems in MSC 01450 01451 } 01452 return out; 01453 } 01454 01458 int weight_int(int length, int in) 01459 { 01460 int i, w = 0; 01461 for (i = 0; i < length; i++) { 01462 w += (in & (1 << i)) >> i; 01463 } 01464 return w; 01465 } 01466 01470 int compare_spectra(ivec v1, ivec v2) 01471 { 01472 it_assert_debug(v1.size() == v2.size(), "compare_spectra: wrong sizes"); 01473 01474 for (int i = 0; i < v1.size(); i++) { 01475 if (v1(i) < v2(i)) { 01476 return 1; 01477 } 01478 else if (v1(i) > v2(i)) { 01479 return 0; 01480 } 01481 } 01482 return -1; 01483 } 01484 01490 int compare_spectra(ivec v1, ivec v2, vec weight_profile) 01491 { 01492 double t1 = 0, t2 = 0; 01493 for (int i = 0; i < v1.size(); i++) { 01494 t1 += (double)v1(i) * weight_profile(i); 01495 t2 += (double)v2(i) * weight_profile(i); 01496 } 01497 if (t1 < t2) return 1; 01498 else if (t1 > t2) return 0; 01499 else return -1; 01500 } 01501 01502 } // namespace itpp
Generated on Fri May 1 11:09:17 2009 for IT++ by Doxygen 1.5.8