feat: Integrate IndexTTS2 model and update related schemas and frontend components
This commit is contained in:
4
qwen3-tts-backend/indextts/s2mel/dac/model/__init__.py
Normal file
4
qwen3-tts-backend/indextts/s2mel/dac/model/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .base import CodecMixin
|
||||
from .base import DACFile
|
||||
from .dac import DAC
|
||||
from .discriminator import Discriminator
|
||||
294
qwen3-tts-backend/indextts/s2mel/dac/model/base.py
Normal file
294
qwen3-tts-backend/indextts/s2mel/dac/model/base.py
Normal file
@@ -0,0 +1,294 @@
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import tqdm
|
||||
from audiotools import AudioSignal
|
||||
from torch import nn
|
||||
|
||||
SUPPORTED_VERSIONS = ["1.0.0"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DACFile:
|
||||
codes: torch.Tensor
|
||||
|
||||
# Metadata
|
||||
chunk_length: int
|
||||
original_length: int
|
||||
input_db: float
|
||||
channels: int
|
||||
sample_rate: int
|
||||
padding: bool
|
||||
dac_version: str
|
||||
|
||||
def save(self, path):
|
||||
artifacts = {
|
||||
"codes": self.codes.numpy().astype(np.uint16),
|
||||
"metadata": {
|
||||
"input_db": self.input_db.numpy().astype(np.float32),
|
||||
"original_length": self.original_length,
|
||||
"sample_rate": self.sample_rate,
|
||||
"chunk_length": self.chunk_length,
|
||||
"channels": self.channels,
|
||||
"padding": self.padding,
|
||||
"dac_version": SUPPORTED_VERSIONS[-1],
|
||||
},
|
||||
}
|
||||
path = Path(path).with_suffix(".dac")
|
||||
with open(path, "wb") as f:
|
||||
np.save(f, artifacts)
|
||||
return path
|
||||
|
||||
@classmethod
|
||||
def load(cls, path):
|
||||
artifacts = np.load(path, allow_pickle=True)[()]
|
||||
codes = torch.from_numpy(artifacts["codes"].astype(int))
|
||||
if artifacts["metadata"].get("dac_version", None) not in SUPPORTED_VERSIONS:
|
||||
raise RuntimeError(
|
||||
f"Given file {path} can't be loaded with this version of descript-audio-codec."
|
||||
)
|
||||
return cls(codes=codes, **artifacts["metadata"])
|
||||
|
||||
|
||||
class CodecMixin:
|
||||
@property
|
||||
def padding(self):
|
||||
if not hasattr(self, "_padding"):
|
||||
self._padding = True
|
||||
return self._padding
|
||||
|
||||
@padding.setter
|
||||
def padding(self, value):
|
||||
assert isinstance(value, bool)
|
||||
|
||||
layers = [
|
||||
l for l in self.modules() if isinstance(l, (nn.Conv1d, nn.ConvTranspose1d))
|
||||
]
|
||||
|
||||
for layer in layers:
|
||||
if value:
|
||||
if hasattr(layer, "original_padding"):
|
||||
layer.padding = layer.original_padding
|
||||
else:
|
||||
layer.original_padding = layer.padding
|
||||
layer.padding = tuple(0 for _ in range(len(layer.padding)))
|
||||
|
||||
self._padding = value
|
||||
|
||||
def get_delay(self):
|
||||
# Any number works here, delay is invariant to input length
|
||||
l_out = self.get_output_length(0)
|
||||
L = l_out
|
||||
|
||||
layers = []
|
||||
for layer in self.modules():
|
||||
if isinstance(layer, (nn.Conv1d, nn.ConvTranspose1d)):
|
||||
layers.append(layer)
|
||||
|
||||
for layer in reversed(layers):
|
||||
d = layer.dilation[0]
|
||||
k = layer.kernel_size[0]
|
||||
s = layer.stride[0]
|
||||
|
||||
if isinstance(layer, nn.ConvTranspose1d):
|
||||
L = ((L - d * (k - 1) - 1) / s) + 1
|
||||
elif isinstance(layer, nn.Conv1d):
|
||||
L = (L - 1) * s + d * (k - 1) + 1
|
||||
|
||||
L = math.ceil(L)
|
||||
|
||||
l_in = L
|
||||
|
||||
return (l_in - l_out) // 2
|
||||
|
||||
def get_output_length(self, input_length):
|
||||
L = input_length
|
||||
# Calculate output length
|
||||
for layer in self.modules():
|
||||
if isinstance(layer, (nn.Conv1d, nn.ConvTranspose1d)):
|
||||
d = layer.dilation[0]
|
||||
k = layer.kernel_size[0]
|
||||
s = layer.stride[0]
|
||||
|
||||
if isinstance(layer, nn.Conv1d):
|
||||
L = ((L - d * (k - 1) - 1) / s) + 1
|
||||
elif isinstance(layer, nn.ConvTranspose1d):
|
||||
L = (L - 1) * s + d * (k - 1) + 1
|
||||
|
||||
L = math.floor(L)
|
||||
return L
|
||||
|
||||
@torch.no_grad()
|
||||
def compress(
|
||||
self,
|
||||
audio_path_or_signal: Union[str, Path, AudioSignal],
|
||||
win_duration: float = 1.0,
|
||||
verbose: bool = False,
|
||||
normalize_db: float = -16,
|
||||
n_quantizers: int = None,
|
||||
) -> DACFile:
|
||||
"""Processes an audio signal from a file or AudioSignal object into
|
||||
discrete codes. This function processes the signal in short windows,
|
||||
using constant GPU memory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
audio_path_or_signal : Union[str, Path, AudioSignal]
|
||||
audio signal to reconstruct
|
||||
win_duration : float, optional
|
||||
window duration in seconds, by default 5.0
|
||||
verbose : bool, optional
|
||||
by default False
|
||||
normalize_db : float, optional
|
||||
normalize db, by default -16
|
||||
|
||||
Returns
|
||||
-------
|
||||
DACFile
|
||||
Object containing compressed codes and metadata
|
||||
required for decompression
|
||||
"""
|
||||
audio_signal = audio_path_or_signal
|
||||
if isinstance(audio_signal, (str, Path)):
|
||||
audio_signal = AudioSignal.load_from_file_with_ffmpeg(str(audio_signal))
|
||||
|
||||
self.eval()
|
||||
original_padding = self.padding
|
||||
original_device = audio_signal.device
|
||||
|
||||
audio_signal = audio_signal.clone()
|
||||
original_sr = audio_signal.sample_rate
|
||||
|
||||
resample_fn = audio_signal.resample
|
||||
loudness_fn = audio_signal.loudness
|
||||
|
||||
# If audio is > 10 minutes long, use the ffmpeg versions
|
||||
if audio_signal.signal_duration >= 10 * 60 * 60:
|
||||
resample_fn = audio_signal.ffmpeg_resample
|
||||
loudness_fn = audio_signal.ffmpeg_loudness
|
||||
|
||||
original_length = audio_signal.signal_length
|
||||
resample_fn(self.sample_rate)
|
||||
input_db = loudness_fn()
|
||||
|
||||
if normalize_db is not None:
|
||||
audio_signal.normalize(normalize_db)
|
||||
audio_signal.ensure_max_of_audio()
|
||||
|
||||
nb, nac, nt = audio_signal.audio_data.shape
|
||||
audio_signal.audio_data = audio_signal.audio_data.reshape(nb * nac, 1, nt)
|
||||
win_duration = (
|
||||
audio_signal.signal_duration if win_duration is None else win_duration
|
||||
)
|
||||
|
||||
if audio_signal.signal_duration <= win_duration:
|
||||
# Unchunked compression (used if signal length < win duration)
|
||||
self.padding = True
|
||||
n_samples = nt
|
||||
hop = nt
|
||||
else:
|
||||
# Chunked inference
|
||||
self.padding = False
|
||||
# Zero-pad signal on either side by the delay
|
||||
audio_signal.zero_pad(self.delay, self.delay)
|
||||
n_samples = int(win_duration * self.sample_rate)
|
||||
# Round n_samples to nearest hop length multiple
|
||||
n_samples = int(math.ceil(n_samples / self.hop_length) * self.hop_length)
|
||||
hop = self.get_output_length(n_samples)
|
||||
|
||||
codes = []
|
||||
range_fn = range if not verbose else tqdm.trange
|
||||
|
||||
for i in range_fn(0, nt, hop):
|
||||
x = audio_signal[..., i : i + n_samples]
|
||||
x = x.zero_pad(0, max(0, n_samples - x.shape[-1]))
|
||||
|
||||
audio_data = x.audio_data.to(self.device)
|
||||
audio_data = self.preprocess(audio_data, self.sample_rate)
|
||||
_, c, _, _, _ = self.encode(audio_data, n_quantizers)
|
||||
codes.append(c.to(original_device))
|
||||
chunk_length = c.shape[-1]
|
||||
|
||||
codes = torch.cat(codes, dim=-1)
|
||||
|
||||
dac_file = DACFile(
|
||||
codes=codes,
|
||||
chunk_length=chunk_length,
|
||||
original_length=original_length,
|
||||
input_db=input_db,
|
||||
channels=nac,
|
||||
sample_rate=original_sr,
|
||||
padding=self.padding,
|
||||
dac_version=SUPPORTED_VERSIONS[-1],
|
||||
)
|
||||
|
||||
if n_quantizers is not None:
|
||||
codes = codes[:, :n_quantizers, :]
|
||||
|
||||
self.padding = original_padding
|
||||
return dac_file
|
||||
|
||||
@torch.no_grad()
|
||||
def decompress(
|
||||
self,
|
||||
obj: Union[str, Path, DACFile],
|
||||
verbose: bool = False,
|
||||
) -> AudioSignal:
|
||||
"""Reconstruct audio from a given .dac file
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : Union[str, Path, DACFile]
|
||||
.dac file location or corresponding DACFile object.
|
||||
verbose : bool, optional
|
||||
Prints progress if True, by default False
|
||||
|
||||
Returns
|
||||
-------
|
||||
AudioSignal
|
||||
Object with the reconstructed audio
|
||||
"""
|
||||
self.eval()
|
||||
if isinstance(obj, (str, Path)):
|
||||
obj = DACFile.load(obj)
|
||||
|
||||
original_padding = self.padding
|
||||
self.padding = obj.padding
|
||||
|
||||
range_fn = range if not verbose else tqdm.trange
|
||||
codes = obj.codes
|
||||
original_device = codes.device
|
||||
chunk_length = obj.chunk_length
|
||||
recons = []
|
||||
|
||||
for i in range_fn(0, codes.shape[-1], chunk_length):
|
||||
c = codes[..., i : i + chunk_length].to(self.device)
|
||||
z = self.quantizer.from_codes(c)[0]
|
||||
r = self.decode(z)
|
||||
recons.append(r.to(original_device))
|
||||
|
||||
recons = torch.cat(recons, dim=-1)
|
||||
recons = AudioSignal(recons, self.sample_rate)
|
||||
|
||||
resample_fn = recons.resample
|
||||
loudness_fn = recons.loudness
|
||||
|
||||
# If audio is > 10 minutes long, use the ffmpeg versions
|
||||
if recons.signal_duration >= 10 * 60 * 60:
|
||||
resample_fn = recons.ffmpeg_resample
|
||||
loudness_fn = recons.ffmpeg_loudness
|
||||
|
||||
recons.normalize(obj.input_db)
|
||||
resample_fn(obj.sample_rate)
|
||||
recons = recons[..., : obj.original_length]
|
||||
loudness_fn()
|
||||
recons.audio_data = recons.audio_data.reshape(
|
||||
-1, obj.channels, obj.original_length
|
||||
)
|
||||
|
||||
self.padding = original_padding
|
||||
return recons
|
||||
400
qwen3-tts-backend/indextts/s2mel/dac/model/dac.py
Normal file
400
qwen3-tts-backend/indextts/s2mel/dac/model/dac.py
Normal file
@@ -0,0 +1,400 @@
|
||||
import math
|
||||
from typing import List
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from audiotools import AudioSignal
|
||||
from audiotools.ml import BaseModel
|
||||
from torch import nn
|
||||
|
||||
from .base import CodecMixin
|
||||
from indextts.s2mel.dac.nn.layers import Snake1d
|
||||
from indextts.s2mel.dac.nn.layers import WNConv1d
|
||||
from indextts.s2mel.dac.nn.layers import WNConvTranspose1d
|
||||
from indextts.s2mel.dac.nn.quantize import ResidualVectorQuantize
|
||||
from .encodec import SConv1d, SConvTranspose1d, SLSTM
|
||||
|
||||
|
||||
def init_weights(m):
|
||||
if isinstance(m, nn.Conv1d):
|
||||
nn.init.trunc_normal_(m.weight, std=0.02)
|
||||
nn.init.constant_(m.bias, 0)
|
||||
|
||||
|
||||
class ResidualUnit(nn.Module):
|
||||
def __init__(self, dim: int = 16, dilation: int = 1, causal: bool = False):
|
||||
super().__init__()
|
||||
conv1d_type = SConv1d# if causal else WNConv1d
|
||||
pad = ((7 - 1) * dilation) // 2
|
||||
self.block = nn.Sequential(
|
||||
Snake1d(dim),
|
||||
conv1d_type(dim, dim, kernel_size=7, dilation=dilation, padding=pad, causal=causal, norm='weight_norm'),
|
||||
Snake1d(dim),
|
||||
conv1d_type(dim, dim, kernel_size=1, causal=causal, norm='weight_norm'),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
y = self.block(x)
|
||||
pad = (x.shape[-1] - y.shape[-1]) // 2
|
||||
if pad > 0:
|
||||
x = x[..., pad:-pad]
|
||||
return x + y
|
||||
|
||||
|
||||
class EncoderBlock(nn.Module):
|
||||
def __init__(self, dim: int = 16, stride: int = 1, causal: bool = False):
|
||||
super().__init__()
|
||||
conv1d_type = SConv1d# if causal else WNConv1d
|
||||
self.block = nn.Sequential(
|
||||
ResidualUnit(dim // 2, dilation=1, causal=causal),
|
||||
ResidualUnit(dim // 2, dilation=3, causal=causal),
|
||||
ResidualUnit(dim // 2, dilation=9, causal=causal),
|
||||
Snake1d(dim // 2),
|
||||
conv1d_type(
|
||||
dim // 2,
|
||||
dim,
|
||||
kernel_size=2 * stride,
|
||||
stride=stride,
|
||||
padding=math.ceil(stride / 2),
|
||||
causal=causal,
|
||||
norm='weight_norm',
|
||||
),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.block(x)
|
||||
|
||||
|
||||
class Encoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int = 64,
|
||||
strides: list = [2, 4, 8, 8],
|
||||
d_latent: int = 64,
|
||||
causal: bool = False,
|
||||
lstm: int = 2,
|
||||
):
|
||||
super().__init__()
|
||||
conv1d_type = SConv1d# if causal else WNConv1d
|
||||
# Create first convolution
|
||||
self.block = [conv1d_type(1, d_model, kernel_size=7, padding=3, causal=causal, norm='weight_norm')]
|
||||
|
||||
# Create EncoderBlocks that double channels as they downsample by `stride`
|
||||
for stride in strides:
|
||||
d_model *= 2
|
||||
self.block += [EncoderBlock(d_model, stride=stride, causal=causal)]
|
||||
|
||||
# Add LSTM if needed
|
||||
self.use_lstm = lstm
|
||||
if lstm:
|
||||
self.block += [SLSTM(d_model, lstm)]
|
||||
|
||||
# Create last convolution
|
||||
self.block += [
|
||||
Snake1d(d_model),
|
||||
conv1d_type(d_model, d_latent, kernel_size=3, padding=1, causal=causal, norm='weight_norm'),
|
||||
]
|
||||
|
||||
# Wrap black into nn.Sequential
|
||||
self.block = nn.Sequential(*self.block)
|
||||
self.enc_dim = d_model
|
||||
|
||||
def forward(self, x):
|
||||
return self.block(x)
|
||||
|
||||
def reset_cache(self):
|
||||
# recursively find all submodules named SConv1d in self.block and use their reset_cache method
|
||||
def reset_cache(m):
|
||||
if isinstance(m, SConv1d) or isinstance(m, SLSTM):
|
||||
m.reset_cache()
|
||||
return
|
||||
for child in m.children():
|
||||
reset_cache(child)
|
||||
|
||||
reset_cache(self.block)
|
||||
|
||||
|
||||
class DecoderBlock(nn.Module):
|
||||
def __init__(self, input_dim: int = 16, output_dim: int = 8, stride: int = 1, causal: bool = False):
|
||||
super().__init__()
|
||||
conv1d_type = SConvTranspose1d #if causal else WNConvTranspose1d
|
||||
self.block = nn.Sequential(
|
||||
Snake1d(input_dim),
|
||||
conv1d_type(
|
||||
input_dim,
|
||||
output_dim,
|
||||
kernel_size=2 * stride,
|
||||
stride=stride,
|
||||
padding=math.ceil(stride / 2),
|
||||
causal=causal,
|
||||
norm='weight_norm'
|
||||
),
|
||||
ResidualUnit(output_dim, dilation=1, causal=causal),
|
||||
ResidualUnit(output_dim, dilation=3, causal=causal),
|
||||
ResidualUnit(output_dim, dilation=9, causal=causal),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.block(x)
|
||||
|
||||
|
||||
class Decoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
input_channel,
|
||||
channels,
|
||||
rates,
|
||||
d_out: int = 1,
|
||||
causal: bool = False,
|
||||
lstm: int = 2,
|
||||
):
|
||||
super().__init__()
|
||||
conv1d_type = SConv1d# if causal else WNConv1d
|
||||
# Add first conv layer
|
||||
layers = [conv1d_type(input_channel, channels, kernel_size=7, padding=3, causal=causal, norm='weight_norm')]
|
||||
|
||||
if lstm:
|
||||
layers += [SLSTM(channels, num_layers=lstm)]
|
||||
|
||||
# Add upsampling + MRF blocks
|
||||
for i, stride in enumerate(rates):
|
||||
input_dim = channels // 2**i
|
||||
output_dim = channels // 2 ** (i + 1)
|
||||
layers += [DecoderBlock(input_dim, output_dim, stride, causal=causal)]
|
||||
|
||||
# Add final conv layer
|
||||
layers += [
|
||||
Snake1d(output_dim),
|
||||
conv1d_type(output_dim, d_out, kernel_size=7, padding=3, causal=causal, norm='weight_norm'),
|
||||
nn.Tanh(),
|
||||
]
|
||||
|
||||
self.model = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
return self.model(x)
|
||||
|
||||
|
||||
class DAC(BaseModel, CodecMixin):
|
||||
def __init__(
|
||||
self,
|
||||
encoder_dim: int = 64,
|
||||
encoder_rates: List[int] = [2, 4, 8, 8],
|
||||
latent_dim: int = None,
|
||||
decoder_dim: int = 1536,
|
||||
decoder_rates: List[int] = [8, 8, 4, 2],
|
||||
n_codebooks: int = 9,
|
||||
codebook_size: int = 1024,
|
||||
codebook_dim: Union[int, list] = 8,
|
||||
quantizer_dropout: bool = False,
|
||||
sample_rate: int = 44100,
|
||||
lstm: int = 2,
|
||||
causal: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.encoder_dim = encoder_dim
|
||||
self.encoder_rates = encoder_rates
|
||||
self.decoder_dim = decoder_dim
|
||||
self.decoder_rates = decoder_rates
|
||||
self.sample_rate = sample_rate
|
||||
|
||||
if latent_dim is None:
|
||||
latent_dim = encoder_dim * (2 ** len(encoder_rates))
|
||||
|
||||
self.latent_dim = latent_dim
|
||||
|
||||
self.hop_length = np.prod(encoder_rates)
|
||||
self.encoder = Encoder(encoder_dim, encoder_rates, latent_dim, causal=causal, lstm=lstm)
|
||||
|
||||
self.n_codebooks = n_codebooks
|
||||
self.codebook_size = codebook_size
|
||||
self.codebook_dim = codebook_dim
|
||||
self.quantizer = ResidualVectorQuantize(
|
||||
input_dim=latent_dim,
|
||||
n_codebooks=n_codebooks,
|
||||
codebook_size=codebook_size,
|
||||
codebook_dim=codebook_dim,
|
||||
quantizer_dropout=quantizer_dropout,
|
||||
)
|
||||
|
||||
self.decoder = Decoder(
|
||||
latent_dim,
|
||||
decoder_dim,
|
||||
decoder_rates,
|
||||
lstm=lstm,
|
||||
causal=causal,
|
||||
)
|
||||
self.sample_rate = sample_rate
|
||||
self.apply(init_weights)
|
||||
|
||||
self.delay = self.get_delay()
|
||||
|
||||
def preprocess(self, audio_data, sample_rate):
|
||||
if sample_rate is None:
|
||||
sample_rate = self.sample_rate
|
||||
assert sample_rate == self.sample_rate
|
||||
|
||||
length = audio_data.shape[-1]
|
||||
right_pad = math.ceil(length / self.hop_length) * self.hop_length - length
|
||||
audio_data = nn.functional.pad(audio_data, (0, right_pad))
|
||||
|
||||
return audio_data
|
||||
|
||||
def encode(
|
||||
self,
|
||||
audio_data: torch.Tensor,
|
||||
n_quantizers: int = None,
|
||||
):
|
||||
"""Encode given audio data and return quantized latent codes
|
||||
|
||||
Parameters
|
||||
----------
|
||||
audio_data : Tensor[B x 1 x T]
|
||||
Audio data to encode
|
||||
n_quantizers : int, optional
|
||||
Number of quantizers to use, by default None
|
||||
If None, all quantizers are used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary with the following keys:
|
||||
"z" : Tensor[B x D x T]
|
||||
Quantized continuous representation of input
|
||||
"codes" : Tensor[B x N x T]
|
||||
Codebook indices for each codebook
|
||||
(quantized discrete representation of input)
|
||||
"latents" : Tensor[B x N*D x T]
|
||||
Projected latents (continuous representation of input before quantization)
|
||||
"vq/commitment_loss" : Tensor[1]
|
||||
Commitment loss to train encoder to predict vectors closer to codebook
|
||||
entries
|
||||
"vq/codebook_loss" : Tensor[1]
|
||||
Codebook loss to update the codebook
|
||||
"length" : int
|
||||
Number of samples in input audio
|
||||
"""
|
||||
z = self.encoder(audio_data)
|
||||
z, codes, latents, commitment_loss, codebook_loss = self.quantizer(
|
||||
z, n_quantizers
|
||||
)
|
||||
return z, codes, latents, commitment_loss, codebook_loss
|
||||
|
||||
def decode(self, z: torch.Tensor):
|
||||
"""Decode given latent codes and return audio data
|
||||
|
||||
Parameters
|
||||
----------
|
||||
z : Tensor[B x D x T]
|
||||
Quantized continuous representation of input
|
||||
length : int, optional
|
||||
Number of samples in output audio, by default None
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary with the following keys:
|
||||
"audio" : Tensor[B x 1 x length]
|
||||
Decoded audio data.
|
||||
"""
|
||||
return self.decoder(z)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
audio_data: torch.Tensor,
|
||||
sample_rate: int = None,
|
||||
n_quantizers: int = None,
|
||||
):
|
||||
"""Model forward pass
|
||||
|
||||
Parameters
|
||||
----------
|
||||
audio_data : Tensor[B x 1 x T]
|
||||
Audio data to encode
|
||||
sample_rate : int, optional
|
||||
Sample rate of audio data in Hz, by default None
|
||||
If None, defaults to `self.sample_rate`
|
||||
n_quantizers : int, optional
|
||||
Number of quantizers to use, by default None.
|
||||
If None, all quantizers are used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary with the following keys:
|
||||
"z" : Tensor[B x D x T]
|
||||
Quantized continuous representation of input
|
||||
"codes" : Tensor[B x N x T]
|
||||
Codebook indices for each codebook
|
||||
(quantized discrete representation of input)
|
||||
"latents" : Tensor[B x N*D x T]
|
||||
Projected latents (continuous representation of input before quantization)
|
||||
"vq/commitment_loss" : Tensor[1]
|
||||
Commitment loss to train encoder to predict vectors closer to codebook
|
||||
entries
|
||||
"vq/codebook_loss" : Tensor[1]
|
||||
Codebook loss to update the codebook
|
||||
"length" : int
|
||||
Number of samples in input audio
|
||||
"audio" : Tensor[B x 1 x length]
|
||||
Decoded audio data.
|
||||
"""
|
||||
length = audio_data.shape[-1]
|
||||
audio_data = self.preprocess(audio_data, sample_rate)
|
||||
z, codes, latents, commitment_loss, codebook_loss = self.encode(
|
||||
audio_data, n_quantizers
|
||||
)
|
||||
|
||||
x = self.decode(z)
|
||||
return {
|
||||
"audio": x[..., :length],
|
||||
"z": z,
|
||||
"codes": codes,
|
||||
"latents": latents,
|
||||
"vq/commitment_loss": commitment_loss,
|
||||
"vq/codebook_loss": codebook_loss,
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import numpy as np
|
||||
from functools import partial
|
||||
|
||||
model = DAC().to("cpu")
|
||||
|
||||
for n, m in model.named_modules():
|
||||
o = m.extra_repr()
|
||||
p = sum([np.prod(p.size()) for p in m.parameters()])
|
||||
fn = lambda o, p: o + f" {p/1e6:<.3f}M params."
|
||||
setattr(m, "extra_repr", partial(fn, o=o, p=p))
|
||||
print(model)
|
||||
print("Total # of params: ", sum([np.prod(p.size()) for p in model.parameters()]))
|
||||
|
||||
length = 88200 * 2
|
||||
x = torch.randn(1, 1, length).to(model.device)
|
||||
x.requires_grad_(True)
|
||||
x.retain_grad()
|
||||
|
||||
# Make a forward pass
|
||||
out = model(x)["audio"]
|
||||
print("Input shape:", x.shape)
|
||||
print("Output shape:", out.shape)
|
||||
|
||||
# Create gradient variable
|
||||
grad = torch.zeros_like(out)
|
||||
grad[:, :, grad.shape[-1] // 2] = 1
|
||||
|
||||
# Make a backward pass
|
||||
out.backward(grad)
|
||||
|
||||
# Check non-zero values
|
||||
gradmap = x.grad.squeeze(0)
|
||||
gradmap = (gradmap != 0).sum(0) # sum across features
|
||||
rf = (gradmap != 0).sum()
|
||||
|
||||
print(f"Receptive field: {rf.item()}")
|
||||
|
||||
x = AudioSignal(torch.randn(1, 1, 44100 * 60), 44100)
|
||||
model.decompress(model.compress(x, verbose=True), verbose=True)
|
||||
228
qwen3-tts-backend/indextts/s2mel/dac/model/discriminator.py
Normal file
228
qwen3-tts-backend/indextts/s2mel/dac/model/discriminator.py
Normal file
@@ -0,0 +1,228 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from audiotools import AudioSignal
|
||||
from audiotools import ml
|
||||
from audiotools import STFTParams
|
||||
from einops import rearrange
|
||||
from torch.nn.utils import weight_norm
|
||||
|
||||
|
||||
def WNConv1d(*args, **kwargs):
|
||||
act = kwargs.pop("act", True)
|
||||
conv = weight_norm(nn.Conv1d(*args, **kwargs))
|
||||
if not act:
|
||||
return conv
|
||||
return nn.Sequential(conv, nn.LeakyReLU(0.1))
|
||||
|
||||
|
||||
def WNConv2d(*args, **kwargs):
|
||||
act = kwargs.pop("act", True)
|
||||
conv = weight_norm(nn.Conv2d(*args, **kwargs))
|
||||
if not act:
|
||||
return conv
|
||||
return nn.Sequential(conv, nn.LeakyReLU(0.1))
|
||||
|
||||
|
||||
class MPD(nn.Module):
|
||||
def __init__(self, period):
|
||||
super().__init__()
|
||||
self.period = period
|
||||
self.convs = nn.ModuleList(
|
||||
[
|
||||
WNConv2d(1, 32, (5, 1), (3, 1), padding=(2, 0)),
|
||||
WNConv2d(32, 128, (5, 1), (3, 1), padding=(2, 0)),
|
||||
WNConv2d(128, 512, (5, 1), (3, 1), padding=(2, 0)),
|
||||
WNConv2d(512, 1024, (5, 1), (3, 1), padding=(2, 0)),
|
||||
WNConv2d(1024, 1024, (5, 1), 1, padding=(2, 0)),
|
||||
]
|
||||
)
|
||||
self.conv_post = WNConv2d(
|
||||
1024, 1, kernel_size=(3, 1), padding=(1, 0), act=False
|
||||
)
|
||||
|
||||
def pad_to_period(self, x):
|
||||
t = x.shape[-1]
|
||||
x = F.pad(x, (0, self.period - t % self.period), mode="reflect")
|
||||
return x
|
||||
|
||||
def forward(self, x):
|
||||
fmap = []
|
||||
|
||||
x = self.pad_to_period(x)
|
||||
x = rearrange(x, "b c (l p) -> b c l p", p=self.period)
|
||||
|
||||
for layer in self.convs:
|
||||
x = layer(x)
|
||||
fmap.append(x)
|
||||
|
||||
x = self.conv_post(x)
|
||||
fmap.append(x)
|
||||
|
||||
return fmap
|
||||
|
||||
|
||||
class MSD(nn.Module):
|
||||
def __init__(self, rate: int = 1, sample_rate: int = 44100):
|
||||
super().__init__()
|
||||
self.convs = nn.ModuleList(
|
||||
[
|
||||
WNConv1d(1, 16, 15, 1, padding=7),
|
||||
WNConv1d(16, 64, 41, 4, groups=4, padding=20),
|
||||
WNConv1d(64, 256, 41, 4, groups=16, padding=20),
|
||||
WNConv1d(256, 1024, 41, 4, groups=64, padding=20),
|
||||
WNConv1d(1024, 1024, 41, 4, groups=256, padding=20),
|
||||
WNConv1d(1024, 1024, 5, 1, padding=2),
|
||||
]
|
||||
)
|
||||
self.conv_post = WNConv1d(1024, 1, 3, 1, padding=1, act=False)
|
||||
self.sample_rate = sample_rate
|
||||
self.rate = rate
|
||||
|
||||
def forward(self, x):
|
||||
x = AudioSignal(x, self.sample_rate)
|
||||
x.resample(self.sample_rate // self.rate)
|
||||
x = x.audio_data
|
||||
|
||||
fmap = []
|
||||
|
||||
for l in self.convs:
|
||||
x = l(x)
|
||||
fmap.append(x)
|
||||
x = self.conv_post(x)
|
||||
fmap.append(x)
|
||||
|
||||
return fmap
|
||||
|
||||
|
||||
BANDS = [(0.0, 0.1), (0.1, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0)]
|
||||
|
||||
|
||||
class MRD(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
window_length: int,
|
||||
hop_factor: float = 0.25,
|
||||
sample_rate: int = 44100,
|
||||
bands: list = BANDS,
|
||||
):
|
||||
"""Complex multi-band spectrogram discriminator.
|
||||
Parameters
|
||||
----------
|
||||
window_length : int
|
||||
Window length of STFT.
|
||||
hop_factor : float, optional
|
||||
Hop factor of the STFT, defaults to ``0.25 * window_length``.
|
||||
sample_rate : int, optional
|
||||
Sampling rate of audio in Hz, by default 44100
|
||||
bands : list, optional
|
||||
Bands to run discriminator over.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.window_length = window_length
|
||||
self.hop_factor = hop_factor
|
||||
self.sample_rate = sample_rate
|
||||
self.stft_params = STFTParams(
|
||||
window_length=window_length,
|
||||
hop_length=int(window_length * hop_factor),
|
||||
match_stride=True,
|
||||
)
|
||||
|
||||
n_fft = window_length // 2 + 1
|
||||
bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands]
|
||||
self.bands = bands
|
||||
|
||||
ch = 32
|
||||
convs = lambda: nn.ModuleList(
|
||||
[
|
||||
WNConv2d(2, ch, (3, 9), (1, 1), padding=(1, 4)),
|
||||
WNConv2d(ch, ch, (3, 9), (1, 2), padding=(1, 4)),
|
||||
WNConv2d(ch, ch, (3, 9), (1, 2), padding=(1, 4)),
|
||||
WNConv2d(ch, ch, (3, 9), (1, 2), padding=(1, 4)),
|
||||
WNConv2d(ch, ch, (3, 3), (1, 1), padding=(1, 1)),
|
||||
]
|
||||
)
|
||||
self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))])
|
||||
self.conv_post = WNConv2d(ch, 1, (3, 3), (1, 1), padding=(1, 1), act=False)
|
||||
|
||||
def spectrogram(self, x):
|
||||
x = AudioSignal(x, self.sample_rate, stft_params=self.stft_params)
|
||||
x = torch.view_as_real(x.stft())
|
||||
x = rearrange(x, "b 1 f t c -> (b 1) c t f")
|
||||
# Split into bands
|
||||
x_bands = [x[..., b[0] : b[1]] for b in self.bands]
|
||||
return x_bands
|
||||
|
||||
def forward(self, x):
|
||||
x_bands = self.spectrogram(x)
|
||||
fmap = []
|
||||
|
||||
x = []
|
||||
for band, stack in zip(x_bands, self.band_convs):
|
||||
for layer in stack:
|
||||
band = layer(band)
|
||||
fmap.append(band)
|
||||
x.append(band)
|
||||
|
||||
x = torch.cat(x, dim=-1)
|
||||
x = self.conv_post(x)
|
||||
fmap.append(x)
|
||||
|
||||
return fmap
|
||||
|
||||
|
||||
class Discriminator(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
rates: list = [],
|
||||
periods: list = [2, 3, 5, 7, 11],
|
||||
fft_sizes: list = [2048, 1024, 512],
|
||||
sample_rate: int = 44100,
|
||||
bands: list = BANDS,
|
||||
):
|
||||
"""Discriminator that combines multiple discriminators.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rates : list, optional
|
||||
sampling rates (in Hz) to run MSD at, by default []
|
||||
If empty, MSD is not used.
|
||||
periods : list, optional
|
||||
periods (of samples) to run MPD at, by default [2, 3, 5, 7, 11]
|
||||
fft_sizes : list, optional
|
||||
Window sizes of the FFT to run MRD at, by default [2048, 1024, 512]
|
||||
sample_rate : int, optional
|
||||
Sampling rate of audio in Hz, by default 44100
|
||||
bands : list, optional
|
||||
Bands to run MRD at, by default `BANDS`
|
||||
"""
|
||||
super().__init__()
|
||||
discs = []
|
||||
discs += [MPD(p) for p in periods]
|
||||
discs += [MSD(r, sample_rate=sample_rate) for r in rates]
|
||||
discs += [MRD(f, sample_rate=sample_rate, bands=bands) for f in fft_sizes]
|
||||
self.discriminators = nn.ModuleList(discs)
|
||||
|
||||
def preprocess(self, y):
|
||||
# Remove DC offset
|
||||
y = y - y.mean(dim=-1, keepdims=True)
|
||||
# Peak normalize the volume of input audio
|
||||
y = 0.8 * y / (y.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
|
||||
return y
|
||||
|
||||
def forward(self, x):
|
||||
x = self.preprocess(x)
|
||||
fmaps = [d(x) for d in self.discriminators]
|
||||
return fmaps
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
disc = Discriminator()
|
||||
x = torch.zeros(1, 1, 44100)
|
||||
results = disc(x)
|
||||
for i, result in enumerate(results):
|
||||
print(f"disc{i}")
|
||||
for i, r in enumerate(result):
|
||||
print(r.shape, r.mean(), r.min(), r.max())
|
||||
print()
|
||||
320
qwen3-tts-backend/indextts/s2mel/dac/model/encodec.py
Normal file
320
qwen3-tts-backend/indextts/s2mel/dac/model/encodec.py
Normal file
@@ -0,0 +1,320 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
"""Convolutional layers wrappers and utilities."""
|
||||
|
||||
import math
|
||||
import typing as tp
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
from torch.nn.utils import spectral_norm, weight_norm
|
||||
|
||||
import typing as tp
|
||||
|
||||
import einops
|
||||
|
||||
|
||||
class ConvLayerNorm(nn.LayerNorm):
|
||||
"""
|
||||
Convolution-friendly LayerNorm that moves channels to last dimensions
|
||||
before running the normalization and moves them back to original position right after.
|
||||
"""
|
||||
def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs):
|
||||
super().__init__(normalized_shape, **kwargs)
|
||||
|
||||
def forward(self, x):
|
||||
x = einops.rearrange(x, 'b ... t -> b t ...')
|
||||
x = super().forward(x)
|
||||
x = einops.rearrange(x, 'b t ... -> b ... t')
|
||||
return
|
||||
|
||||
|
||||
CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
|
||||
'time_layer_norm', 'layer_norm', 'time_group_norm'])
|
||||
|
||||
|
||||
def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module:
|
||||
assert norm in CONV_NORMALIZATIONS
|
||||
if norm == 'weight_norm':
|
||||
return weight_norm(module)
|
||||
elif norm == 'spectral_norm':
|
||||
return spectral_norm(module)
|
||||
else:
|
||||
# We already check was in CONV_NORMALIZATION, so any other choice
|
||||
# doesn't need reparametrization.
|
||||
return module
|
||||
|
||||
|
||||
def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module:
|
||||
"""Return the proper normalization module. If causal is True, this will ensure the returned
|
||||
module is causal, or return an error if the normalization doesn't support causal evaluation.
|
||||
"""
|
||||
assert norm in CONV_NORMALIZATIONS
|
||||
if norm == 'layer_norm':
|
||||
assert isinstance(module, nn.modules.conv._ConvNd)
|
||||
return ConvLayerNorm(module.out_channels, **norm_kwargs)
|
||||
elif norm == 'time_group_norm':
|
||||
if causal:
|
||||
raise ValueError("GroupNorm doesn't support causal evaluation.")
|
||||
assert isinstance(module, nn.modules.conv._ConvNd)
|
||||
return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
|
||||
else:
|
||||
return nn.Identity()
|
||||
|
||||
|
||||
def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
|
||||
padding_total: int = 0) -> int:
|
||||
"""See `pad_for_conv1d`.
|
||||
"""
|
||||
length = x.shape[-1]
|
||||
n_frames = (length - kernel_size + padding_total) / stride + 1
|
||||
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
|
||||
return ideal_length - length
|
||||
|
||||
|
||||
def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
|
||||
"""Pad for a convolution to make sure that the last window is full.
|
||||
Extra padding is added at the end. This is required to ensure that we can rebuild
|
||||
an output of the same length, as otherwise, even with padding, some time steps
|
||||
might get removed.
|
||||
For instance, with total padding = 4, kernel size = 4, stride = 2:
|
||||
0 0 1 2 3 4 5 0 0 # (0s are padding)
|
||||
1 2 3 # (output frames of a convolution, last 0 is never used)
|
||||
0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
|
||||
1 2 3 4 # once you removed padding, we are missing one time step !
|
||||
"""
|
||||
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
|
||||
return F.pad(x, (0, extra_padding))
|
||||
|
||||
|
||||
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.):
|
||||
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input.
|
||||
If this is the case, we insert extra 0 padding to the right before the reflection happen.
|
||||
"""
|
||||
length = x.shape[-1]
|
||||
padding_left, padding_right = paddings
|
||||
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
|
||||
if mode == 'reflect':
|
||||
max_pad = max(padding_left, padding_right)
|
||||
extra_pad = 0
|
||||
if length <= max_pad:
|
||||
extra_pad = max_pad - length + 1
|
||||
x = F.pad(x, (0, extra_pad))
|
||||
padded = F.pad(x, paddings, mode, value)
|
||||
end = padded.shape[-1] - extra_pad
|
||||
return padded[..., :end]
|
||||
else:
|
||||
return F.pad(x, paddings, mode, value)
|
||||
|
||||
|
||||
def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
|
||||
"""Remove padding from x, handling properly zero padding. Only for 1d!"""
|
||||
padding_left, padding_right = paddings
|
||||
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
|
||||
assert (padding_left + padding_right) <= x.shape[-1]
|
||||
end = x.shape[-1] - padding_right
|
||||
return x[..., padding_left: end]
|
||||
|
||||
|
||||
class NormConv1d(nn.Module):
|
||||
"""Wrapper around Conv1d and normalization applied to this conv
|
||||
to provide a uniform interface across normalization approaches.
|
||||
"""
|
||||
def __init__(self, *args, causal: bool = False, norm: str = 'none',
|
||||
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
||||
super().__init__()
|
||||
self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
|
||||
self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
|
||||
self.norm_type = norm
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
class NormConv2d(nn.Module):
|
||||
"""Wrapper around Conv2d and normalization applied to this conv
|
||||
to provide a uniform interface across normalization approaches.
|
||||
"""
|
||||
def __init__(self, *args, norm: str = 'none',
|
||||
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
||||
super().__init__()
|
||||
self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
|
||||
self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
|
||||
self.norm_type = norm
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
class NormConvTranspose1d(nn.Module):
|
||||
"""Wrapper around ConvTranspose1d and normalization applied to this conv
|
||||
to provide a uniform interface across normalization approaches.
|
||||
"""
|
||||
def __init__(self, *args, causal: bool = False, norm: str = 'none',
|
||||
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
||||
super().__init__()
|
||||
self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
|
||||
self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
|
||||
self.norm_type = norm
|
||||
|
||||
def forward(self, x):
|
||||
x = self.convtr(x)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
class NormConvTranspose2d(nn.Module):
|
||||
"""Wrapper around ConvTranspose2d and normalization applied to this conv
|
||||
to provide a uniform interface across normalization approaches.
|
||||
"""
|
||||
def __init__(self, *args, norm: str = 'none',
|
||||
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
||||
super().__init__()
|
||||
self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
|
||||
self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.convtr(x)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
class SConv1d(nn.Module):
|
||||
"""Conv1d with some builtin handling of asymmetric or causal padding
|
||||
and normalization.
|
||||
"""
|
||||
def __init__(self, in_channels: int, out_channels: int,
|
||||
kernel_size: int, stride: int = 1, dilation: int = 1,
|
||||
groups: int = 1, bias: bool = True, causal: bool = False,
|
||||
norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
|
||||
pad_mode: str = 'reflect', **kwargs):
|
||||
super().__init__()
|
||||
# warn user on unusual setup between dilation and stride
|
||||
if stride > 1 and dilation > 1:
|
||||
warnings.warn('SConv1d has been initialized with stride > 1 and dilation > 1'
|
||||
f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).')
|
||||
self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
|
||||
dilation=dilation, groups=groups, bias=bias, causal=causal,
|
||||
norm=norm, norm_kwargs=norm_kwargs)
|
||||
self.causal = causal
|
||||
self.pad_mode = pad_mode
|
||||
|
||||
self.cache_enabled = False
|
||||
|
||||
def reset_cache(self):
|
||||
"""Reset the cache when starting a new stream."""
|
||||
self.cache = None
|
||||
self.cache_enabled = True
|
||||
|
||||
def forward(self, x):
|
||||
B, C, T = x.shape
|
||||
kernel_size = self.conv.conv.kernel_size[0]
|
||||
stride = self.conv.conv.stride[0]
|
||||
dilation = self.conv.conv.dilation[0]
|
||||
kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
|
||||
padding_total = kernel_size - stride
|
||||
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
|
||||
|
||||
if self.causal:
|
||||
# Left padding for causal
|
||||
if self.cache_enabled and self.cache is not None:
|
||||
# Concatenate the cache (previous inputs) with the new input for streaming
|
||||
x = torch.cat([self.cache, x], dim=2)
|
||||
else:
|
||||
x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
|
||||
else:
|
||||
# Asymmetric padding required for odd strides
|
||||
padding_right = padding_total // 2
|
||||
padding_left = padding_total - padding_right
|
||||
x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
|
||||
|
||||
# Store the most recent input frames for future cache use
|
||||
if self.cache_enabled:
|
||||
if self.cache is None:
|
||||
# Initialize cache with zeros (at the start of streaming)
|
||||
self.cache = torch.zeros(B, C, kernel_size - 1, device=x.device)
|
||||
# Update the cache by storing the latest input frames
|
||||
if kernel_size > 1:
|
||||
self.cache = x[:, :, -kernel_size + 1:].detach() # Only store the necessary frames
|
||||
|
||||
return self.conv(x)
|
||||
|
||||
|
||||
|
||||
class SConvTranspose1d(nn.Module):
|
||||
"""ConvTranspose1d with some builtin handling of asymmetric or causal padding
|
||||
and normalization.
|
||||
"""
|
||||
def __init__(self, in_channels: int, out_channels: int,
|
||||
kernel_size: int, stride: int = 1, causal: bool = False,
|
||||
norm: str = 'none', trim_right_ratio: float = 1.,
|
||||
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
||||
super().__init__()
|
||||
self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
|
||||
causal=causal, norm=norm, norm_kwargs=norm_kwargs)
|
||||
self.causal = causal
|
||||
self.trim_right_ratio = trim_right_ratio
|
||||
assert self.causal or self.trim_right_ratio == 1., \
|
||||
"`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
|
||||
assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
|
||||
|
||||
def forward(self, x):
|
||||
kernel_size = self.convtr.convtr.kernel_size[0]
|
||||
stride = self.convtr.convtr.stride[0]
|
||||
padding_total = kernel_size - stride
|
||||
|
||||
y = self.convtr(x)
|
||||
|
||||
# We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
|
||||
# removed at the very end, when keeping only the right length for the output,
|
||||
# as removing it here would require also passing the length at the matching layer
|
||||
# in the encoder.
|
||||
if self.causal:
|
||||
# Trim the padding on the right according to the specified ratio
|
||||
# if trim_right_ratio = 1.0, trim everything from right
|
||||
padding_right = math.ceil(padding_total * self.trim_right_ratio)
|
||||
padding_left = padding_total - padding_right
|
||||
y = unpad1d(y, (padding_left, padding_right))
|
||||
else:
|
||||
# Asymmetric padding required for odd strides
|
||||
padding_right = padding_total // 2
|
||||
padding_left = padding_total - padding_right
|
||||
y = unpad1d(y, (padding_left, padding_right))
|
||||
return y
|
||||
|
||||
class SLSTM(nn.Module):
|
||||
"""
|
||||
LSTM without worrying about the hidden state, nor the layout of the data.
|
||||
Expects input as convolutional layout.
|
||||
"""
|
||||
def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
|
||||
super().__init__()
|
||||
self.skip = skip
|
||||
self.lstm = nn.LSTM(dimension, dimension, num_layers)
|
||||
self.hidden = None
|
||||
self.cache_enabled = False
|
||||
|
||||
def forward(self, x):
|
||||
x = x.permute(2, 0, 1)
|
||||
if self.training or not self.cache_enabled:
|
||||
y, _ = self.lstm(x)
|
||||
else:
|
||||
y, self.hidden = self.lstm(x, self.hidden)
|
||||
if self.skip:
|
||||
y = y + x
|
||||
y = y.permute(1, 2, 0)
|
||||
return y
|
||||
|
||||
def reset_cache(self):
|
||||
self.hidden = None
|
||||
self.cache_enabled = True
|
||||
Reference in New Issue
Block a user