103 lines
3.2 KiB
Python
103 lines
3.2 KiB
Python
# decoder_backup.py
|
||
import numpy as np
|
||
from encoder_backup import ALPHABET, G
|
||
|
||
# Match the channel’s noise variance
|
||
SIGMA2 = 10.0
|
||
|
||
def _block_ll_scores(Yb: np.ndarray,
|
||
C1: np.ndarray,
|
||
C2: np.ndarray,
|
||
sqrtG: float
|
||
) -> tuple[np.ndarray, np.ndarray]:
|
||
"""
|
||
Compute per-symbol log-likelihood scores for one interleaved block Yb
|
||
under the two channel states (even-boosted vs. odd-boosted).
|
||
Returns (scores_state1, scores_state2).
|
||
"""
|
||
|
||
# Split received block into even/odd samples
|
||
Ye, Yo = Yb[0::2], Yb[1::2]
|
||
|
||
# Precompute the squared-norm penalties for each codeword half
|
||
# (these come from the -||Y - H_s C||^2 term)
|
||
# state1: even half is √G * C1, odd half is C2
|
||
E1 = G * np.sum(C1**2, axis=1) + np.sum(C2**2, axis=1)
|
||
# state2: even half is C1, odd half is √G * C2
|
||
E2 = np.sum(C1**2, axis=1) + G * np.sum(C2**2, axis=1)
|
||
|
||
# Correlation terms
|
||
corr1 = sqrtG * (Ye @ C1.T) + (Yo @ C2.T)
|
||
corr2 = (Ye @ C1.T) + sqrtG * (Yo @ C2.T)
|
||
|
||
# ML log‐likelihood (up to constant −1/(2σ²)):
|
||
scores1 = (corr1 - 0.5 * E1) / SIGMA2
|
||
scores2 = (corr2 - 0.5 * E2) / SIGMA2
|
||
|
||
return scores1, scores2
|
||
|
||
|
||
def decode_blocks(Y: np.ndarray, C: np.ndarray) -> str:
|
||
"""
|
||
Per-block ML decoding marginalizing over the unknown state:
|
||
for each block, compute scores1/2 via _block_ll_scores, then
|
||
marginal_score = logaddexp(scores1, scores2) and pick argmax.
|
||
"""
|
||
n = C.shape[1]
|
||
assert Y.size % n == 0, "Y length must be a multiple of codeword length"
|
||
num_blocks = Y.size // n
|
||
|
||
half = n // 2
|
||
C1, C2 = C[:, :half], C[:, half:]
|
||
sqrtG = np.sqrt(G)
|
||
|
||
recovered = []
|
||
for k in range(num_blocks):
|
||
Yb = Y[k*n:(k+1)*n]
|
||
s1, s2 = _block_ll_scores(Yb, C1, C2, sqrtG)
|
||
# marginal log-likelihood per symbol
|
||
marg = np.logaddexp(s1, s2)
|
||
best = int(np.argmax(marg))
|
||
recovered.append(ALPHABET[best])
|
||
|
||
return "".join(recovered)
|
||
|
||
|
||
def decode_blocks_with_state(Y: np.ndarray, C: np.ndarray) -> (str, int):
|
||
"""
|
||
Joint‐ML state estimation and decoding:
|
||
- For each block, get per-state scores via _block_ll_scores
|
||
- Pick the best symbol index under each state; sum those log‐L’s across blocks
|
||
- Choose the state with the higher total log‐L
|
||
- Reconstruct the string using the best‐symbol indices for that state
|
||
Returns (decoded_string, estimated_state).
|
||
"""
|
||
n = C.shape[1]
|
||
assert Y.size % n == 0, "Y length must be a multiple of codeword length"
|
||
num_blocks = Y.size // n
|
||
|
||
half = n // 2
|
||
C1, C2 = C[:, :half], C[:, half:]
|
||
sqrtG = np.sqrt(G)
|
||
|
||
total1, total2 = 0.0, 0.0
|
||
best1, best2 = [], []
|
||
|
||
for k in range(num_blocks):
|
||
Yb = Y[k*n:(k+1)*n]
|
||
s1, s2 = _block_ll_scores(Yb, C1, C2, sqrtG)
|
||
|
||
idx1 = int(np.argmax(s1))
|
||
idx2 = int(np.argmax(s2))
|
||
|
||
total1 += s1[idx1]
|
||
total2 += s2[idx2]
|
||
|
||
best1.append(idx1)
|
||
best2.append(idx2)
|
||
|
||
s_est = 1 if total1 >= total2 else 2
|
||
chosen = best1 if s_est == 1 else best2
|
||
decoded = "".join(ALPHABET[i] for i in chosen)
|
||
|
||
return decoded, s_est
|