MODWTTransform.java
package com.morphiqlabs.wavelet.modwt;
import com.morphiqlabs.wavelet.api.BoundaryMode;
import com.morphiqlabs.wavelet.api.Coiflet;
import com.morphiqlabs.wavelet.api.Wavelet;
import com.morphiqlabs.wavelet.exception.InvalidSignalException;
import com.morphiqlabs.wavelet.exception.InvalidArgumentException;
import com.morphiqlabs.wavelet.exception.ErrorCode;
import com.morphiqlabs.wavelet.exception.ErrorContext;
import com.morphiqlabs.wavelet.WaveletOperations;
import com.morphiqlabs.wavelet.util.MathUtils;
import com.morphiqlabs.wavelet.util.ValidationUtils;
import com.morphiqlabs.wavelet.performance.AdaptivePerformanceEstimator;
import com.morphiqlabs.wavelet.performance.PredictionResult;
import java.util.Objects;
/**
* Implementation of the MODWT (Maximal Overlap Discrete Wavelet Transform) with Java 21 optimizations.
*
* <p>The MODWT is a non-decimated wavelet transform that offers
* several advantages:</p>
* <ul>
* <li><strong>Shift-invariant:</strong> Translation of input results in corresponding translation of coefficients</li>
* <li><strong>Arbitrary length signals:</strong> Can handle signals of any length, not just powers of two</li>
* <li><strong>Same-length output:</strong> Both approximation and detail coefficients have the same length as input</li>
* <li><strong>Redundant representation:</strong> Provides more information but at computational cost</li>
* </ul>
*
* <p><strong>Performance Note:</strong> The core module is a portable, scalar Java 21 implementation.
* SIMD acceleration via the Java Vector API is provided in the optional
* <code>vectorwave-extensions</code> module (Java 24 incubator). When you add the
* extensions dependency, high‑throughput paths (e.g., batch/AoS facades) and internal
* optimizers can leverage SIMD. Core alone remains scalar and portable.</p>
*
* <p>The MODWT uses convolution without downsampling and employs level-scaled filters.
* At level j, the textbook analysis scaling is 2^(−j/2): h_{j,l} = h_l · 2^(−j/2).</p>
*
* <p><strong>Boundary modes:</strong></p>
* <ul>
* <li>PERIODIC: circular convolution (exact perfect reconstruction)</li>
* <li>ZERO_PADDING: linear convolution with implicit zeros beyond boundaries</li>
* <li>SYMMETRIC: linear convolution with mirrored extension (inverse uses
* documented alignment heuristics; see docs/guides/SYMMETRIC_ALIGNMENT.md)</li>
* </ul>
*
* <h2>Symmetric Boundary Policy (Short Version)</h2>
* <p>Under SYMMETRIC boundaries, reconstruction uses a small, wavelet/level‑dependent
* alignment heuristic to minimize boundary error (a ±τ<sub>j</sub> shift per level).
* Tests assert interior‑region equality rather than global tight RMSE to avoid
* brittle edge comparisons. For strict exactness, prefer PERIODIC. See the full
* guide: {@code docs/guides/SYMMETRIC_ALIGNMENT.md}.</p>
*
* <p>For performance characteristics and SIMD speedups with extensions, see the
* project README and Performance Guide.</p>
*
* <h2>Usage Example:</h2>
* <pre>{@code
* // Create MODWT transform with Haar wavelet
* MODWTTransform modwt = new MODWTTransform(new Haar(), BoundaryMode.PERIODIC);
*
* // Transform arbitrary length signal
* double[] signal = {1, 2, 3, 4, 5, 6, 7}; // Not power of 2!
* MODWTResult result = modwt.forward(signal);
*
* // Reconstruct signal with machine precision
* double[] reconstructed = modwt.inverse(result);
*
* // Monitor performance
* var perfInfo = modwt.getPerformanceInfo();
* System.out.println(perfInfo.description());
* }</pre>
*
* @see MODWTResult
* @since 1.0.0
*/
public class MODWTTransform {
private final Wavelet wavelet;
private final BoundaryMode boundaryMode;
/**
* Constructs a MODWT transformer with the specified wavelet and boundary mode.
* Automatically configures performance optimizations based on system capabilities.
*
* @param wavelet The wavelet to use for the transformations
* @param boundaryMode The boundary handling mode (PERIODIC, ZERO_PADDING, or SYMMETRIC)
* @throws NullPointerException if any parameter is null
* @throws IllegalArgumentException if boundary mode is not supported
*/
public MODWTTransform(Wavelet wavelet, BoundaryMode boundaryMode) {
this.wavelet = Objects.requireNonNull(wavelet, "wavelet cannot be null");
this.boundaryMode = Objects.requireNonNull(boundaryMode, "boundaryMode cannot be null");
// MODWT supports PERIODIC, ZERO_PADDING, and SYMMETRIC boundary modes
if (boundaryMode != BoundaryMode.PERIODIC &&
boundaryMode != BoundaryMode.ZERO_PADDING &&
boundaryMode != BoundaryMode.SYMMETRIC) {
throw new InvalidArgumentException(
ErrorCode.CFG_UNSUPPORTED_BOUNDARY_MODE,
ErrorContext.builder("MODWT only supports PERIODIC, ZERO_PADDING, and SYMMETRIC boundary modes")
.withBoundaryMode(boundaryMode)
.withWavelet(wavelet)
.withSuggestion("Use BoundaryMode.PERIODIC for circular convolution")
.withSuggestion("Use BoundaryMode.ZERO_PADDING for zero-padding at edges")
.withSuggestion("Use BoundaryMode.SYMMETRIC to mirror signal at boundaries")
.withContext("Transform type", "MODWT")
.build()
);
}
}
/**
* Performs a single-level forward MODWT with automatic performance optimization.
*
* <p>Unlike the standard DWT, this produces approximation and detail coefficients
* that are the same length as the input signal, making the transform shift-invariant
* and applicable to arbitrary length signals.</p>
*
* <p><strong>Performance:</strong> Automatically selects optimal implementation:</p>
* <ul>
* <li>FFT-based convolution for large Coiflet filters (≥128 taps)</li>
* <li>Vectorized SIMD implementation for medium filters</li>
* <li>Scalar implementation for small filters or when SIMD unavailable</li>
* </ul>
*
* @param signal The input signal of any length ≥ 1
* @return A MODWTResult containing same-length approximation and detail coefficients
* @throws InvalidSignalException if signal is invalid
*/
public MODWTResult forward(double[] signal) {
// Input validation with modern patterns
validateInputSignal(signal);
// Get filter coefficients
double[] lowPassFilter = wavelet.lowPassDecomposition();
double[] highPassFilter = wavelet.highPassDecomposition();
// Scale filters by 1/sqrt(2) for MODWT
// This is essential for shift-invariance property
double scale = 1.0 / Math.sqrt(2.0);
double[] scaledLowPass = new double[lowPassFilter.length];
double[] scaledHighPass = new double[highPassFilter.length];
for (int i = 0; i < lowPassFilter.length; i++) {
scaledLowPass[i] = lowPassFilter[i] * scale;
}
for (int i = 0; i < highPassFilter.length; i++) {
scaledHighPass[i] = highPassFilter[i] * scale;
}
// Prepare output arrays (same length as input)
int signalLength = signal.length;
double[] approximationCoeffs = new double[signalLength];
double[] detailCoeffs = new double[signalLength];
// Measure actual execution time for adaptive learning
long startTime = System.nanoTime();
// Perform convolution without downsampling based on boundary mode.
// In the core module these operations are scalar. If the optional
// vectorwave-extensions module is present, SIMD-accelerated paths may be
// used by higher-level facades and optimizers.
if (boundaryMode == BoundaryMode.PERIODIC) {
// WaveletOperations.circularConvolveMODWT internally delegates to vectorized
// implementation when beneficial, falling back to scalar otherwise
WaveletOperations.circularConvolveMODWT(signal, scaledLowPass, approximationCoeffs);
WaveletOperations.circularConvolveMODWT(signal, scaledHighPass, detailCoeffs);
} else if (boundaryMode == BoundaryMode.ZERO_PADDING) {
WaveletOperations.zeroPaddingConvolveMODWT(signal, scaledLowPass, approximationCoeffs);
WaveletOperations.zeroPaddingConvolveMODWT(signal, scaledHighPass, detailCoeffs);
} else { // SYMMETRIC
WaveletOperations.symmetricConvolveMODWT(signal, scaledLowPass, approximationCoeffs);
WaveletOperations.symmetricConvolveMODWT(signal, scaledHighPass, detailCoeffs);
}
long endTime = System.nanoTime();
double actualTimeMs = (endTime - startTime) / 1_000_000.0;
// Record measurement for adaptive learning (only for significant operations)
if (signalLength >= 64 && actualTimeMs > 0.01) {
AdaptivePerformanceEstimator.getInstance().recordMeasurement(
"MODWT", signalLength, actualTimeMs,
WaveletOperations.getPerformanceInfo().vectorizationEnabled()
);
}
return MODWTResult.create(approximationCoeffs, detailCoeffs);
}
/**
* Performs a single-level inverse MODWT to reconstruct the signal.
*
* <p>The reconstruction follows the same pattern as DWT but without upsampling.
* Uses scaled reconstruction filters and circular convolution.</p>
*
* @param modwtResult The MODWT result containing approximation and detail coefficients
* @return The reconstructed signal
* @throws NullPointerException if modwtResult is null
* @throws InvalidSignalException if the result contains invalid coefficients
*/
public double[] inverse(MODWTResult modwtResult) {
Objects.requireNonNull(modwtResult, "modwtResult cannot be null");
if (!modwtResult.isValid()) {
throw new InvalidSignalException(
ErrorCode.VAL_NON_FINITE_VALUES,
ErrorContext.builder("MODWTResult contains invalid coefficients")
.withContext("Coefficient validity", "Contains NaN or Infinity values")
.withWavelet(wavelet)
.withBoundaryMode(boundaryMode)
.withSuggestion("Check input signal for NaN or Infinity values")
.withSuggestion("Verify wavelet filter coefficients are valid")
.build()
);
}
// Get coefficients (defensive copies)
double[] approxCoeffs = modwtResult.approximationCoeffs();
double[] detailCoeffs = modwtResult.detailCoeffs();
int signalLength = modwtResult.getSignalLength();
// Get reconstruction filter coefficients
double[] lowPassRecon = wavelet.lowPassReconstruction();
double[] highPassRecon = wavelet.highPassReconstruction();
// Scale reconstruction filters by 1/sqrt(2) for single-level (j=1)
double scale = 1.0 / Math.sqrt(2.0);
double[] scaledLowPassRecon = new double[lowPassRecon.length];
double[] scaledHighPassRecon = new double[highPassRecon.length];
for (int i = 0; i < lowPassRecon.length; i++) {
scaledLowPassRecon[i] = lowPassRecon[i] * scale;
}
for (int i = 0; i < highPassRecon.length; i++) {
scaledHighPassRecon[i] = highPassRecon[i] * scale;
}
// Prepare output array
double[] reconstructed = new double[signalLength];
// Direct reconstruction based on boundary mode
if (boundaryMode == BoundaryMode.PERIODIC) {
// X_t = Σ(l=0 to L-1) [h_l * s_(t+l mod N) + g_l * d_(t+l mod N)] (synthesis-style indexing)
for (int t = 0; t < signalLength; t++) {
double sum = 0.0;
for (int l = 0; l < scaledLowPassRecon.length; l++) {
int coeffIndex = (t + l) % signalLength;
sum += scaledLowPassRecon[l] * approxCoeffs[coeffIndex] +
scaledHighPassRecon[l] * detailCoeffs[coeffIndex];
}
reconstructed[t] = sum;
}
} else if (boundaryMode == BoundaryMode.ZERO_PADDING) {
// X_t = Σ(l=0 to L-1) [h_l * s_(t+l) + g_l * d_(t+l)] with zero padding
for (int t = 0; t < signalLength; t++) {
double sum = 0.0;
for (int l = 0; l < scaledLowPassRecon.length; l++) {
int coeffIndex = t + l;
if (coeffIndex < signalLength) {
sum += scaledLowPassRecon[l] * approxCoeffs[coeffIndex] +
scaledHighPassRecon[l] * detailCoeffs[coeffIndex];
}
// else: treat as zero (no contribution to sum)
}
reconstructed[t] = sum;
}
} else { // SYMMETRIC
// Use symmetric extension for reconstruction
// For MODWT reconstruction with symmetric boundaries, we need to apply
// the convolution with reconstruction filters using the same boundary extension
for (int t = 0; t < signalLength; t++) {
double sum = 0.0;
// Apply convolution with reconstruction filters
// The reconstruction filters are applied in reverse time
for (int l = 0; l < scaledLowPassRecon.length; l++) {
// For reconstruction, we use (t - l) with time-reversed filters
int idx = t - l;
// Apply symmetric boundary extension using utility method
idx = MathUtils.symmetricBoundaryExtension(idx, signalLength);
// The reconstruction filters are time-reversed, so we use them directly
sum += scaledLowPassRecon[l] * approxCoeffs[idx] +
scaledHighPassRecon[l] * detailCoeffs[idx];
}
reconstructed[t] = sum;
}
}
return reconstructed;
}
private static void reverseInPlace(double[] a) {
for (int i = 0, j = a.length - 1; i < j; i++, j--) {
double tmp = a[i];
a[i] = a[j];
a[j] = tmp;
}
}
/**
* Gets the wavelet used by this transform.
*
* @return the wavelet
*/
public Wavelet getWavelet() {
return wavelet;
}
/**
* Gets the boundary mode used by this transform.
*
* @return the boundary mode
*/
public BoundaryMode getBoundaryMode() {
return boundaryMode;
}
/**
* Gets performance information for this transform configuration.
* Useful for monitoring and optimization decisions.
*
* @return Performance characteristics and capabilities
*/
public WaveletOperations.PerformanceInfo getPerformanceInfo() {
return WaveletOperations.getPerformanceInfo();
}
/**
* Estimates the processing time for a given signal length.
* Uses adaptive performance models calibrated for this platform.
*
* @param signalLength The length of the signal to process
* @return Estimated processing time information
*/
public ProcessingEstimate estimateProcessingTime(int signalLength) {
var perfInfo = getPerformanceInfo();
// Use adaptive performance estimator
AdaptivePerformanceEstimator estimator = AdaptivePerformanceEstimator.getInstance();
PredictionResult prediction = estimator.estimateMODWT(
signalLength,
wavelet.name(),
perfInfo.vectorizationEnabled()
);
return new ProcessingEstimate(
signalLength,
prediction.estimatedTime(),
perfInfo.vectorizationEnabled(),
perfInfo.estimateSpeedup(signalLength),
prediction.confidence(),
prediction.lowerBound(),
prediction.upperBound()
);
}
/**
* Input signal validation using modern Java patterns.
*/
private void validateInputSignal(double[] signal) {
validateNotNull(signal);
validateNotEmpty(signal);
validateFiniteValues(signal);
}
/**
* Validates that the signal is not null.
*
* @param signal the signal to validate
* @throws NullPointerException if signal is null
*/
private void validateNotNull(double[] signal) {
Objects.requireNonNull(signal, "signal cannot be null");
}
/**
* Validates that the signal is not empty.
*
* @param signal the signal to validate
* @throws InvalidSignalException if signal is empty
*/
private void validateNotEmpty(double[] signal) {
if (signal.length == 0) {
throw new InvalidSignalException(
ErrorCode.VAL_EMPTY,
ErrorContext.builder("Signal cannot be empty")
.withContext("Transform type", "MODWT")
.withWavelet(wavelet)
.withBoundaryMode(boundaryMode)
.withSuggestion("Provide a signal with at least one sample")
.withSuggestion("Check data loading/generation logic")
.build()
);
}
}
/**
* Validates that all signal values are finite (not NaN or Infinity).
*
* @param signal the signal to validate
* @throws InvalidSignalException if signal contains non-finite values
*/
private void validateFiniteValues(double[] signal) {
ValidationUtils.validateFiniteValues(signal, "signal");
}
/**
* Record representing processing time estimation with confidence bounds.
* Uses Java 21 record pattern for clean data structure.
*
* @param signalLength the input signal length used for the estimate
* @param estimatedTimeMs estimated processing time in milliseconds
* @param vectorizationUsed whether vectorization was used for the estimate
* @param speedupFactor estimated speedup versus scalar path
* @param confidence confidence level for the estimate (0..1)
* @param lowerBoundMs lower bound of the estimate interval (ms)
* @param upperBoundMs upper bound of the estimate interval (ms)
*/
public record ProcessingEstimate(
int signalLength,
double estimatedTimeMs,
boolean vectorizationUsed,
double speedupFactor,
double confidence,
double lowerBoundMs,
double upperBoundMs
) {
/**
* Returns a human-readable description of the processing estimate.
* @return formatted description string
*/
public String description() {
String baseDesc;
if (vectorizationUsed) {
baseDesc = String.format("Signal length %d: %.2fms [%.2f-%.2fms] (%.1fx speedup with vectors)",
signalLength, estimatedTimeMs, lowerBoundMs, upperBoundMs, speedupFactor);
} else {
baseDesc = String.format("Signal length %d: %.2fms [%.2f-%.2fms] (scalar mode)",
signalLength, estimatedTimeMs, lowerBoundMs, upperBoundMs);
}
return baseDesc + String.format(" - %.0f%% confidence", confidence * 100);
}
/**
* Indicates if the processing is expected to be fast ({@literal <} 1ms).
* @return true if estimatedTimeMs {@literal <} 1.0
*/
public boolean isFastProcessing() {
return estimatedTimeMs < 1.0;
}
/**
* Indicates if vectorization provides significant benefit ({@literal >} 2x speedup).
* @return true if speedupFactor {@literal >} 2.0
*/
public boolean hasSignificantSpeedup() {
return speedupFactor > 2.0;
}
}
/**
* Performs batch forward MODWT on multiple signals simultaneously.
*
* <p>This method automatically optimizes batch processing using:</p>
* <ul>
* <li>SIMD vectorization when beneficial</li>
* <li>Optimized memory layout for cache efficiency</li>
* <li>Parallel processing for large batches</li>
* </ul>
*
* @param signals Array of input signals (can be different lengths)
* @return Array of MODWTResult objects corresponding to each input signal
* @throws InvalidSignalException if any signal is invalid
* @throws NullPointerException if signals array or any signal is null
*/
public MODWTResult[] forwardBatch(double[][] signals) {
// Input validation
Objects.requireNonNull(signals, "signals array cannot be null");
if (signals.length == 0) {
return new MODWTResult[0];
}
// Check if all signals have the same length for optimization
boolean sameLengths = true;
int firstLength = signals[0].length;
for (int i = 1; i < signals.length; i++) {
if (signals[i].length != firstLength) {
sameLengths = false;
break;
}
}
// For large batches of same-length signals, use optimized processing
if (sameLengths && signals.length >= 4 && firstLength >= 64) {
return forwardBatchOptimized(signals);
}
// Process each signal individually for mixed lengths or small batches
MODWTResult[] results = new MODWTResult[signals.length];
for (int i = 0; i < signals.length; i++) {
results[i] = forward(signals[i]);
}
return results;
}
/**
* Performs batch inverse MODWT on multiple results simultaneously.
*
* <p>This method automatically optimizes batch reconstruction using:</p>
* <ul>
* <li>SIMD vectorization when beneficial</li>
* <li>Optimized memory layout</li>
* <li>Parallel processing for large batches</li>
* </ul>
*
* @param results Array of MODWTResult objects to reconstruct
* @return Array of reconstructed signals
* @throws NullPointerException if results array or any result is null
* @throws InvalidSignalException if any result contains invalid coefficients
*/
public double[][] inverseBatch(MODWTResult[] results) {
// Input validation
Objects.requireNonNull(results, "results array cannot be null");
if (results.length == 0) {
return new double[0][];
}
// Check if all results have the same length for optimization
boolean sameLengths = true;
int firstLength = results[0].getSignalLength();
for (int i = 1; i < results.length; i++) {
if (results[i].getSignalLength() != firstLength) {
sameLengths = false;
break;
}
}
// For large batches of same-length results, use optimized processing
if (sameLengths && results.length >= 4 && firstLength >= 64) {
return inverseBatchOptimized(results);
}
// Process each result individually for mixed lengths or small batches
double[][] reconstructed = new double[results.length][];
for (int i = 0; i < results.length; i++) {
reconstructed[i] = inverse(results[i]);
}
return reconstructed;
}
/**
* Optimized batch forward transform for same-length signals.
*/
private MODWTResult[] forwardBatchOptimized(double[][] signals) {
int batchSize = signals.length;
int signalLength = signals[0].length;
// Get filter coefficients
double[] lowPassFilter = wavelet.lowPassDecomposition();
double[] highPassFilter = wavelet.highPassDecomposition();
// Scale filters by 1/sqrt(2) for MODWT
double scale = 1.0 / Math.sqrt(2.0);
double[] scaledLowPass = new double[lowPassFilter.length];
double[] scaledHighPass = new double[highPassFilter.length];
for (int i = 0; i < lowPassFilter.length; i++) {
scaledLowPass[i] = lowPassFilter[i] * scale;
}
for (int i = 0; i < highPassFilter.length; i++) {
scaledHighPass[i] = highPassFilter[i] * scale;
}
// Prepare output arrays
double[][] approxCoeffs = new double[batchSize][signalLength];
double[][] detailCoeffs = new double[batchSize][signalLength];
// Process in batches using optimized convolution
if (boundaryMode == BoundaryMode.PERIODIC) {
// For periodic mode, we can use more efficient batch processing
for (int b = 0; b < batchSize; b++) {
WaveletOperations.circularConvolveMODWT(signals[b], scaledLowPass, approxCoeffs[b]);
WaveletOperations.circularConvolveMODWT(signals[b], scaledHighPass, detailCoeffs[b]);
}
} else if (boundaryMode == BoundaryMode.ZERO_PADDING) {
for (int b = 0; b < batchSize; b++) {
WaveletOperations.zeroPaddingConvolveMODWT(signals[b], scaledLowPass, approxCoeffs[b]);
WaveletOperations.zeroPaddingConvolveMODWT(signals[b], scaledHighPass, detailCoeffs[b]);
}
} else {
for (int b = 0; b < batchSize; b++) {
WaveletOperations.symmetricConvolveMODWT(signals[b], scaledLowPass, approxCoeffs[b]);
WaveletOperations.symmetricConvolveMODWT(signals[b], scaledHighPass, detailCoeffs[b]);
}
}
// Create results
MODWTResult[] results = new MODWTResult[batchSize];
for (int i = 0; i < batchSize; i++) {
results[i] = MODWTResult.create(approxCoeffs[i], detailCoeffs[i]);
}
return results;
}
/**
* Optimized batch inverse transform for same-length results.
*/
private double[][] inverseBatchOptimized(MODWTResult[] results) {
int batchSize = results.length;
int signalLength = results[0].getSignalLength();
// Get reconstruction filter coefficients
double[] lowPassRecon = wavelet.lowPassReconstruction();
double[] highPassRecon = wavelet.highPassReconstruction();
// Scale reconstruction filters by 1/sqrt(2) for MODWT
double scale = 1.0 / Math.sqrt(2.0);
double[] scaledLowPassRecon = new double[lowPassRecon.length];
double[] scaledHighPassRecon = new double[highPassRecon.length];
for (int i = 0; i < lowPassRecon.length; i++) {
scaledLowPassRecon[i] = lowPassRecon[i] * scale;
}
for (int i = 0; i < highPassRecon.length; i++) {
scaledHighPassRecon[i] = highPassRecon[i] * scale;
}
// Prepare output arrays
double[][] reconstructed = new double[batchSize][signalLength];
// Process each result
for (int b = 0; b < batchSize; b++) {
double[] approxCoeffs = results[b].approximationCoeffs();
double[] detailCoeffs = results[b].detailCoeffs();
if (boundaryMode == BoundaryMode.PERIODIC) {
// Periodic reconstruction
for (int t = 0; t < signalLength; t++) {
double sum = 0.0;
for (int l = 0; l < scaledLowPassRecon.length; l++) {
int coeffIndex = (t + l) % signalLength;
sum += scaledLowPassRecon[l] * approxCoeffs[coeffIndex] +
scaledHighPassRecon[l] * detailCoeffs[coeffIndex];
}
reconstructed[b][t] = sum;
}
} else if (boundaryMode == BoundaryMode.ZERO_PADDING) {
// Zero-padding reconstruction
for (int t = 0; t < signalLength; t++) {
double sum = 0.0;
for (int l = 0; l < scaledLowPassRecon.length; l++) {
int coeffIndex = t + l;
if (coeffIndex < signalLength) {
sum += scaledLowPassRecon[l] * approxCoeffs[coeffIndex] +
scaledHighPassRecon[l] * detailCoeffs[coeffIndex];
}
}
reconstructed[b][t] = sum;
}
} else {
// Symmetric reconstruction using adjoint operation
int period = signalLength << 1;
for (int t = 0; t < signalLength; t++) {
double sum = 0.0;
for (int l = 0; l < scaledLowPassRecon.length; l++) {
int idx = t + l;
int mod = idx % period;
int coeffIndex = mod < signalLength ? mod : period - mod - 1;
sum += scaledLowPassRecon[l] * approxCoeffs[coeffIndex] +
scaledHighPassRecon[l] * detailCoeffs[coeffIndex];
}
reconstructed[b][t] = sum;
}
}
}
return reconstructed;
}
}