refactor: rename backend/frontend dirs and remove NovelWriter submodule
- Rename qwen3-tts-backend → canto-backend - Rename qwen3-tts-frontend → canto-frontend - Remove NovelWriter embedded repo Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
0
canto-backend/indextts/gpt/conformer/__init__.py
Normal file
0
canto-backend/indextts/gpt/conformer/__init__.py
Normal file
312
canto-backend/indextts/gpt/conformer/attention.py
Normal file
312
canto-backend/indextts/gpt/conformer/attention.py
Normal file
@@ -0,0 +1,312 @@
|
||||
# Copyright (c) 2019 Shigeki Karita
|
||||
# 2020 Mobvoi Inc (Binbin Zhang)
|
||||
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Multi-Head Attention layer definition."""
|
||||
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
class MultiHeadedAttention(nn.Module):
|
||||
"""Multi-Head Attention layer.
|
||||
|
||||
Args:
|
||||
n_head (int): The number of heads.
|
||||
n_feat (int): The number of features.
|
||||
dropout_rate (float): Dropout rate.
|
||||
|
||||
"""
|
||||
def __init__(self, n_head: int, n_feat: int, dropout_rate: float):
|
||||
"""Construct an MultiHeadedAttention object."""
|
||||
super().__init__()
|
||||
assert n_feat % n_head == 0
|
||||
# We assume d_v always equals d_k
|
||||
self.d_k = n_feat // n_head
|
||||
self.h = n_head
|
||||
self.linear_q = nn.Linear(n_feat, n_feat)
|
||||
self.linear_k = nn.Linear(n_feat, n_feat)
|
||||
self.linear_v = nn.Linear(n_feat, n_feat)
|
||||
self.linear_out = nn.Linear(n_feat, n_feat)
|
||||
self.dropout = nn.Dropout(p=dropout_rate)
|
||||
|
||||
def forward_qkv(
|
||||
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Transform query, key and value.
|
||||
|
||||
Args:
|
||||
query (torch.Tensor): Query tensor (#batch, time1, size).
|
||||
key (torch.Tensor): Key tensor (#batch, time2, size).
|
||||
value (torch.Tensor): Value tensor (#batch, time2, size).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Transformed query tensor, size
|
||||
(#batch, n_head, time1, d_k).
|
||||
torch.Tensor: Transformed key tensor, size
|
||||
(#batch, n_head, time2, d_k).
|
||||
torch.Tensor: Transformed value tensor, size
|
||||
(#batch, n_head, time2, d_k).
|
||||
|
||||
"""
|
||||
n_batch = query.size(0)
|
||||
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
|
||||
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
|
||||
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
|
||||
q = q.transpose(1, 2) # (batch, head, time1, d_k)
|
||||
k = k.transpose(1, 2) # (batch, head, time2, d_k)
|
||||
v = v.transpose(1, 2) # (batch, head, time2, d_k)
|
||||
|
||||
return q, k, v
|
||||
|
||||
def forward_attention(
|
||||
self, value: torch.Tensor, scores: torch.Tensor,
|
||||
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)
|
||||
) -> torch.Tensor:
|
||||
"""Compute attention context vector.
|
||||
|
||||
Args:
|
||||
value (torch.Tensor): Transformed value, size
|
||||
(#batch, n_head, time2, d_k).
|
||||
scores (torch.Tensor): Attention score, size
|
||||
(#batch, n_head, time1, time2).
|
||||
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
|
||||
(#batch, time1, time2), (0, 0, 0) means fake mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Transformed value (#batch, time1, d_model)
|
||||
weighted by the attention score (#batch, time1, time2).
|
||||
|
||||
"""
|
||||
n_batch = value.size(0)
|
||||
# NOTE(xcsong): When will `if mask.size(2) > 0` be True?
|
||||
# 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the
|
||||
# 1st chunk to ease the onnx export.]
|
||||
# 2. pytorch training
|
||||
if mask.size(2) > 0 : # time2 > 0
|
||||
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
|
||||
# For last chunk, time2 might be larger than scores.size(-1)
|
||||
mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2)
|
||||
scores = scores.masked_fill(mask, -float('inf'))
|
||||
attn = torch.softmax(scores, dim=-1).masked_fill(
|
||||
mask, 0.0) # (batch, head, time1, time2)
|
||||
# NOTE(xcsong): When will `if mask.size(2) > 0` be False?
|
||||
# 1. onnx(16/-1, -1/-1, 16/0)
|
||||
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
|
||||
else:
|
||||
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
|
||||
|
||||
p_attn = self.dropout(attn)
|
||||
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
|
||||
x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
|
||||
self.h * self.d_k)
|
||||
) # (batch, time1, d_model)
|
||||
|
||||
return self.linear_out(x) # (batch, time1, d_model)
|
||||
|
||||
def forward(self, query: torch.Tensor, key: torch.Tensor,
|
||||
value: torch.Tensor,
|
||||
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
||||
pos_emb: torch.Tensor = torch.empty(0),
|
||||
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute scaled dot product attention.
|
||||
|
||||
Args:
|
||||
query (torch.Tensor): Query tensor (#batch, time1, size).
|
||||
key (torch.Tensor): Key tensor (#batch, time2, size).
|
||||
value (torch.Tensor): Value tensor (#batch, time2, size).
|
||||
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
|
||||
(#batch, time1, time2).
|
||||
1.When applying cross attention between decoder and encoder,
|
||||
the batch padding mask for input is in (#batch, 1, T) shape.
|
||||
2.When applying self attention of encoder,
|
||||
the mask is in (#batch, T, T) shape.
|
||||
3.When applying self attention of decoder,
|
||||
the mask is in (#batch, L, L) shape.
|
||||
4.If the different position in decoder see different block
|
||||
of the encoder, such as Mocha, the passed in mask could be
|
||||
in (#batch, L, T) shape. But there is no such case in current
|
||||
Wenet.
|
||||
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
|
||||
where `cache_t == chunk_size * num_decoding_left_chunks`
|
||||
and `head * d_k == size`
|
||||
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Output tensor (#batch, time1, d_model).
|
||||
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
|
||||
where `cache_t == chunk_size * num_decoding_left_chunks`
|
||||
and `head * d_k == size`
|
||||
|
||||
"""
|
||||
q, k, v = self.forward_qkv(query, key, value)
|
||||
|
||||
# NOTE(xcsong):
|
||||
# when export onnx model, for 1st chunk, we feed
|
||||
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
|
||||
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
|
||||
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
|
||||
# and we will always do splitting and
|
||||
# concatnation(this will simplify onnx export). Note that
|
||||
# it's OK to concat & split zero-shaped tensors(see code below).
|
||||
# when export jit model, for 1st chunk, we always feed
|
||||
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
|
||||
# >>> a = torch.ones((1, 2, 0, 4))
|
||||
# >>> b = torch.ones((1, 2, 3, 4))
|
||||
# >>> c = torch.cat((a, b), dim=2)
|
||||
# >>> torch.equal(b, c) # True
|
||||
# >>> d = torch.split(a, 2, dim=-1)
|
||||
# >>> torch.equal(d[0], d[1]) # True
|
||||
if cache.size(0) > 0:
|
||||
key_cache, value_cache = torch.split(
|
||||
cache, cache.size(-1) // 2, dim=-1)
|
||||
k = torch.cat([key_cache, k], dim=2)
|
||||
v = torch.cat([value_cache, v], dim=2)
|
||||
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
|
||||
# non-trivial to calculate `next_cache_start` here.
|
||||
new_cache = torch.cat((k, v), dim=-1)
|
||||
|
||||
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
|
||||
return self.forward_attention(v, scores, mask), new_cache
|
||||
|
||||
|
||||
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
|
||||
"""Multi-Head Attention layer with relative position encoding.
|
||||
Paper: https://arxiv.org/abs/1901.02860
|
||||
Args:
|
||||
n_head (int): The number of heads.
|
||||
n_feat (int): The number of features.
|
||||
dropout_rate (float): Dropout rate.
|
||||
"""
|
||||
def __init__(self, n_head, n_feat, dropout_rate):
|
||||
"""Construct an RelPositionMultiHeadedAttention object."""
|
||||
super().__init__(n_head, n_feat, dropout_rate)
|
||||
# linear transformation for positional encoding
|
||||
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
|
||||
# these two learnable bias are used in matrix c and matrix d
|
||||
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
|
||||
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
|
||||
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
|
||||
torch.nn.init.xavier_uniform_(self.pos_bias_u)
|
||||
torch.nn.init.xavier_uniform_(self.pos_bias_v)
|
||||
|
||||
def rel_shift(self, x, zero_triu: bool = False):
|
||||
"""Compute relative positinal encoding.
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (batch, time, size).
|
||||
zero_triu (bool): If true, return the lower triangular part of
|
||||
the matrix.
|
||||
Returns:
|
||||
torch.Tensor: Output tensor.
|
||||
"""
|
||||
|
||||
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
|
||||
device=x.device,
|
||||
dtype=x.dtype)
|
||||
x_padded = torch.cat([zero_pad, x], dim=-1)
|
||||
|
||||
x_padded = x_padded.view(x.size()[0],
|
||||
x.size()[1],
|
||||
x.size(3) + 1, x.size(2))
|
||||
x = x_padded[:, :, 1:].view_as(x)
|
||||
|
||||
if zero_triu:
|
||||
ones = torch.ones((x.size(2), x.size(3)))
|
||||
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
|
||||
|
||||
return x
|
||||
|
||||
def forward(self, query: torch.Tensor,
|
||||
key: torch.Tensor, value: torch.Tensor,
|
||||
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
||||
pos_emb: torch.Tensor = torch.empty(0),
|
||||
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
|
||||
Args:
|
||||
query (torch.Tensor): Query tensor (#batch, time1, size).
|
||||
key (torch.Tensor): Key tensor (#batch, time2, size).
|
||||
value (torch.Tensor): Value tensor (#batch, time2, size).
|
||||
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
|
||||
(#batch, time1, time2), (0, 0, 0) means fake mask.
|
||||
pos_emb (torch.Tensor): Positional embedding tensor
|
||||
(#batch, time2, size).
|
||||
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
|
||||
where `cache_t == chunk_size * num_decoding_left_chunks`
|
||||
and `head * d_k == size`
|
||||
Returns:
|
||||
torch.Tensor: Output tensor (#batch, time1, d_model).
|
||||
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
|
||||
where `cache_t == chunk_size * num_decoding_left_chunks`
|
||||
and `head * d_k == size`
|
||||
"""
|
||||
q, k, v = self.forward_qkv(query, key, value)
|
||||
q = q.transpose(1, 2) # (batch, time1, head, d_k)
|
||||
|
||||
# NOTE(xcsong):
|
||||
# when export onnx model, for 1st chunk, we feed
|
||||
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
|
||||
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
|
||||
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
|
||||
# and we will always do splitting and
|
||||
# concatnation(this will simplify onnx export). Note that
|
||||
# it's OK to concat & split zero-shaped tensors(see code below).
|
||||
# when export jit model, for 1st chunk, we always feed
|
||||
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
|
||||
# >>> a = torch.ones((1, 2, 0, 4))
|
||||
# >>> b = torch.ones((1, 2, 3, 4))
|
||||
# >>> c = torch.cat((a, b), dim=2)
|
||||
# >>> torch.equal(b, c) # True
|
||||
# >>> d = torch.split(a, 2, dim=-1)
|
||||
# >>> torch.equal(d[0], d[1]) # True
|
||||
if cache.size(0) > 0:
|
||||
key_cache, value_cache = torch.split(
|
||||
cache, cache.size(-1) // 2, dim=-1)
|
||||
k = torch.cat([key_cache, k], dim=2)
|
||||
v = torch.cat([value_cache, v], dim=2)
|
||||
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
|
||||
# non-trivial to calculate `next_cache_start` here.
|
||||
new_cache = torch.cat((k, v), dim=-1)
|
||||
|
||||
n_batch_pos = pos_emb.size(0)
|
||||
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
|
||||
p = p.transpose(1, 2) # (batch, head, time1, d_k)
|
||||
|
||||
# (batch, head, time1, d_k)
|
||||
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
|
||||
# (batch, head, time1, d_k)
|
||||
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
|
||||
|
||||
# compute attention score
|
||||
# first compute matrix a and matrix c
|
||||
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
|
||||
# (batch, head, time1, time2)
|
||||
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
|
||||
|
||||
# compute matrix b and matrix d
|
||||
# (batch, head, time1, time2)
|
||||
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
|
||||
# Remove rel_shift since it is useless in speech recognition,
|
||||
# and it requires special attention for streaming.
|
||||
# matrix_bd = self.rel_shift(matrix_bd)
|
||||
|
||||
scores = (matrix_ac + matrix_bd) / math.sqrt(
|
||||
self.d_k) # (batch, head, time1, time2)
|
||||
|
||||
return self.forward_attention(v, scores, mask), new_cache
|
||||
163
canto-backend/indextts/gpt/conformer/embedding.py
Normal file
163
canto-backend/indextts/gpt/conformer/embedding.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# Modified from ESPnet(https://github.com/espnet/espnet)
|
||||
|
||||
"""Positonal Encoding Module."""
|
||||
|
||||
import math
|
||||
from typing import Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class PositionalEncoding(torch.nn.Module):
|
||||
"""Positional encoding.
|
||||
|
||||
:param int d_model: embedding dim
|
||||
:param float dropout_rate: dropout rate
|
||||
:param int max_len: maximum input length
|
||||
|
||||
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
|
||||
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
|
||||
"""
|
||||
def __init__(self,
|
||||
d_model: int,
|
||||
dropout_rate: float,
|
||||
max_len: int = 5000,
|
||||
reverse: bool = False):
|
||||
"""Construct an PositionalEncoding object."""
|
||||
super().__init__()
|
||||
self.d_model = d_model
|
||||
self.xscale = math.sqrt(self.d_model)
|
||||
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
||||
self.max_len = max_len
|
||||
|
||||
pe = torch.zeros(self.max_len, self.d_model)
|
||||
position = torch.arange(0, self.max_len).unsqueeze(1)
|
||||
div_term = torch.exp(
|
||||
torch.arange(0, self.d_model, 2) *
|
||||
-(math.log(10000.0) / self.d_model))
|
||||
pe[:, 0::2] = torch.sin(position * div_term)
|
||||
pe[:, 1::2] = torch.cos(position * div_term)
|
||||
pe = pe.unsqueeze(0)
|
||||
self.register_buffer('pe', pe)
|
||||
|
||||
def forward(self,
|
||||
x: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0) \
|
||||
-> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Add positional encoding.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input. Its shape is (batch, time, ...)
|
||||
offset (int, torch.tensor): position offset
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
|
||||
torch.Tensor: for compatibility to RelPositionalEncoding
|
||||
"""
|
||||
|
||||
self.pe = self.pe.to(x.device)
|
||||
pos_emb = self.position_encoding(offset, x.size(1), False)
|
||||
x = x * self.xscale + pos_emb
|
||||
return self.dropout(x), self.dropout(pos_emb)
|
||||
|
||||
def position_encoding(self, offset: Union[int, torch.Tensor], size: int,
|
||||
apply_dropout: bool = True) -> torch.Tensor:
|
||||
""" For getting encoding in a streaming fashion
|
||||
|
||||
Attention!!!!!
|
||||
we apply dropout only once at the whole utterance level in a none
|
||||
streaming way, but will call this function several times with
|
||||
increasing input size in a streaming scenario, so the dropout will
|
||||
be applied several times.
|
||||
|
||||
Args:
|
||||
offset (int or torch.tensor): start offset
|
||||
size (int): required size of position encoding
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Corresponding encoding
|
||||
"""
|
||||
# How to subscript a Union type:
|
||||
# https://github.com/pytorch/pytorch/issues/69434
|
||||
if isinstance(offset, int):
|
||||
assert offset + size < self.max_len
|
||||
pos_emb = self.pe[:, offset:offset + size]
|
||||
elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
|
||||
assert offset + size < self.max_len
|
||||
pos_emb = self.pe[:, offset:offset + size]
|
||||
else: # for batched streaming decoding on GPU
|
||||
assert torch.max(offset) + size < self.max_len
|
||||
index = offset.unsqueeze(1) + \
|
||||
torch.arange(0, size).to(offset.device) # B X T
|
||||
flag = index > 0
|
||||
# remove negative offset
|
||||
index = index * flag
|
||||
pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
|
||||
|
||||
if apply_dropout:
|
||||
pos_emb = self.dropout(pos_emb)
|
||||
return pos_emb
|
||||
|
||||
class RelPositionalEncoding(PositionalEncoding):
|
||||
"""Relative positional encoding module.
|
||||
See : Appendix B in https://arxiv.org/abs/1901.02860
|
||||
Args:
|
||||
d_model (int): Embedding dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
max_len (int): Maximum input length.
|
||||
"""
|
||||
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
|
||||
"""Initialize class."""
|
||||
super().__init__(d_model, dropout_rate, max_len, reverse=True)
|
||||
|
||||
def forward(self,
|
||||
x: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0) \
|
||||
-> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute positional encoding.
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (batch, time, `*`).
|
||||
Returns:
|
||||
torch.Tensor: Encoded tensor (batch, time, `*`).
|
||||
torch.Tensor: Positional embedding tensor (1, time, `*`).
|
||||
"""
|
||||
self.pe = self.pe.to(x.device)
|
||||
x = x * self.xscale
|
||||
pos_emb = self.position_encoding(offset, x.size(1), False)
|
||||
return self.dropout(x), self.dropout(pos_emb)
|
||||
|
||||
|
||||
class NoPositionalEncoding(torch.nn.Module):
|
||||
""" No position encoding
|
||||
"""
|
||||
def __init__(self, d_model: int, dropout_rate: float):
|
||||
super().__init__()
|
||||
self.d_model = d_model
|
||||
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
||||
|
||||
def forward(self,
|
||||
x: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0) \
|
||||
-> Tuple[torch.Tensor, torch.Tensor]:
|
||||
""" Just return zero vector for interface compatibility
|
||||
"""
|
||||
pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
|
||||
return self.dropout(x), pos_emb
|
||||
|
||||
def position_encoding(
|
||||
self, offset: Union[int, torch.Tensor], size: int) -> torch.Tensor:
|
||||
return torch.zeros(1, size, self.d_model)
|
||||
348
canto-backend/indextts/gpt/conformer/subsampling.py
Normal file
348
canto-backend/indextts/gpt/conformer/subsampling.py
Normal file
@@ -0,0 +1,348 @@
|
||||
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# Modified from ESPnet(https://github.com/espnet/espnet)
|
||||
|
||||
|
||||
"""Subsampling layer definition."""
|
||||
|
||||
from typing import Tuple, Union
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class BaseSubsampling(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.right_context = 0
|
||||
self.subsampling_rate = 1
|
||||
|
||||
def position_encoding(self, offset: Union[int, torch.Tensor],
|
||||
size: int) -> torch.Tensor:
|
||||
return self.pos_enc.position_encoding(offset, size)
|
||||
|
||||
|
||||
class LinearNoSubsampling(BaseSubsampling):
|
||||
"""Linear transform the input without subsampling
|
||||
|
||||
Args:
|
||||
idim (int): Input dimension.
|
||||
odim (int): Output dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
|
||||
"""
|
||||
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
||||
pos_enc_class: torch.nn.Module):
|
||||
"""Construct an linear object."""
|
||||
super().__init__()
|
||||
self.out = torch.nn.Sequential(
|
||||
torch.nn.Linear(idim, odim),
|
||||
torch.nn.LayerNorm(odim, eps=1e-5),
|
||||
torch.nn.Dropout(dropout_rate),
|
||||
)
|
||||
self.pos_enc = pos_enc_class
|
||||
self.right_context = 0
|
||||
self.subsampling_rate = 1
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Input x.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (#batch, time, idim).
|
||||
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: linear input tensor (#batch, time', odim),
|
||||
where time' = time .
|
||||
torch.Tensor: linear input mask (#batch, 1, time'),
|
||||
where time' = time .
|
||||
|
||||
"""
|
||||
x = self.out(x)
|
||||
x, pos_emb = self.pos_enc(x, offset)
|
||||
return x, pos_emb, x_mask
|
||||
|
||||
|
||||
class Conv2dSubsampling3(BaseSubsampling):
|
||||
"""Convolutional 2D subsampling (to 1/3 length).
|
||||
|
||||
Args:
|
||||
idim (int): Input dimension.
|
||||
odim (int): Output dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
|
||||
"""
|
||||
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
||||
pos_enc_class: torch.nn.Module):
|
||||
"""Construct an Conv2dSubsampling3 object."""
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Sequential(
|
||||
torch.nn.Conv2d(1, odim, 5, 3),
|
||||
torch.nn.ReLU()
|
||||
)
|
||||
self.out = torch.nn.Sequential(
|
||||
torch.nn.Linear(odim * ((idim - 2) // 3), odim))
|
||||
self.pos_enc = pos_enc_class
|
||||
# The right context for every conv layer is computed by:
|
||||
# (kernel_size - 1) * frame_rate_of_this_layer
|
||||
self.subsampling_rate = 3
|
||||
# 4 = (5 - 1) * 1
|
||||
self.right_context = 4
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Subsample x.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (#batch, time, idim).
|
||||
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
||||
where time' = time // 3.
|
||||
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
||||
where time' = time // 3.
|
||||
torch.Tensor: positional encoding
|
||||
|
||||
"""
|
||||
x = x.unsqueeze(1) # (b, c=1, t, f)
|
||||
x = self.conv(x)
|
||||
b, c, t, f = x.size()
|
||||
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
||||
x, pos_emb = self.pos_enc(x, offset)
|
||||
return x, pos_emb, x_mask[:, :, :-2:3]
|
||||
|
||||
|
||||
class Conv2dSubsampling2(BaseSubsampling):
|
||||
"""Convolutional 2D subsampling (to 1/2 length).
|
||||
|
||||
Args:
|
||||
idim (int): Input dimension.
|
||||
odim (int): Output dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
|
||||
"""
|
||||
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
||||
pos_enc_class: torch.nn.Module):
|
||||
"""Construct an Conv2dSubsampling4 object."""
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Sequential(
|
||||
torch.nn.Conv2d(1, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
)
|
||||
self.out = torch.nn.Sequential(
|
||||
torch.nn.Linear(odim * ((idim - 1) // 2), odim))
|
||||
self.pos_enc = pos_enc_class
|
||||
# The right context for every conv layer is computed by:
|
||||
# (kernel_size - 1) * frame_rate_of_this_layer
|
||||
self.subsampling_rate = 2
|
||||
# 2 = (3 - 1) * 1
|
||||
self.right_context = 2
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Subsample x.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (#batch, time, idim).
|
||||
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
||||
where time' = time // 2.
|
||||
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
||||
where time' = time // 2.
|
||||
torch.Tensor: positional encoding
|
||||
|
||||
"""
|
||||
x = x.unsqueeze(1) # (b, c=1, t, f)
|
||||
x = self.conv(x)
|
||||
b, c, t, f = x.size()
|
||||
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
||||
x, pos_emb = self.pos_enc(x, offset)
|
||||
return x, pos_emb, x_mask[:, :, 2::2]
|
||||
|
||||
|
||||
class Conv2dSubsampling4(BaseSubsampling):
|
||||
"""Convolutional 2D subsampling (to 1/4 length).
|
||||
|
||||
Args:
|
||||
idim (int): Input dimension.
|
||||
odim (int): Output dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
|
||||
"""
|
||||
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
||||
pos_enc_class: torch.nn.Module):
|
||||
"""Construct an Conv2dSubsampling4 object."""
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Sequential(
|
||||
torch.nn.Conv2d(1, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Conv2d(odim, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
)
|
||||
self.out = torch.nn.Sequential(
|
||||
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))
|
||||
self.pos_enc = pos_enc_class
|
||||
# The right context for every conv layer is computed by:
|
||||
# (kernel_size - 1) * frame_rate_of_this_layer
|
||||
self.subsampling_rate = 4
|
||||
# 6 = (3 - 1) * 1 + (3 - 1) * 2
|
||||
self.right_context = 6
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Subsample x.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (#batch, time, idim).
|
||||
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
||||
where time' = time // 4.
|
||||
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
||||
where time' = time // 4.
|
||||
torch.Tensor: positional encoding
|
||||
|
||||
"""
|
||||
x = x.unsqueeze(1) # (b, c=1, t, f)
|
||||
x = self.conv(x)
|
||||
b, c, t, f = x.size()
|
||||
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
||||
x, pos_emb = self.pos_enc(x, offset)
|
||||
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]
|
||||
|
||||
|
||||
class Conv2dSubsampling6(BaseSubsampling):
|
||||
"""Convolutional 2D subsampling (to 1/6 length).
|
||||
Args:
|
||||
idim (int): Input dimension.
|
||||
odim (int): Output dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
pos_enc (torch.nn.Module): Custom position encoding layer.
|
||||
"""
|
||||
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
||||
pos_enc_class: torch.nn.Module):
|
||||
"""Construct an Conv2dSubsampling6 object."""
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Sequential(
|
||||
torch.nn.Conv2d(1, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Conv2d(odim, odim, 5, 3),
|
||||
torch.nn.ReLU(),
|
||||
)
|
||||
self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),
|
||||
odim)
|
||||
self.pos_enc = pos_enc_class
|
||||
# 10 = (3 - 1) * 1 + (5 - 1) * 2
|
||||
self.subsampling_rate = 6
|
||||
self.right_context = 10
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Subsample x.
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (#batch, time, idim).
|
||||
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
||||
where time' = time // 6.
|
||||
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
||||
where time' = time // 6.
|
||||
torch.Tensor: positional encoding
|
||||
"""
|
||||
x = x.unsqueeze(1) # (b, c, t, f)
|
||||
x = self.conv(x)
|
||||
b, c, t, f = x.size()
|
||||
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
||||
x, pos_emb = self.pos_enc(x, offset)
|
||||
return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]
|
||||
|
||||
|
||||
class Conv2dSubsampling8(BaseSubsampling):
|
||||
"""Convolutional 2D subsampling (to 1/8 length).
|
||||
|
||||
Args:
|
||||
idim (int): Input dimension.
|
||||
odim (int): Output dimension.
|
||||
dropout_rate (float): Dropout rate.
|
||||
|
||||
"""
|
||||
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
||||
pos_enc_class: torch.nn.Module):
|
||||
"""Construct an Conv2dSubsampling8 object."""
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Sequential(
|
||||
torch.nn.Conv2d(1, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Conv2d(odim, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Conv2d(odim, odim, 3, 2),
|
||||
torch.nn.ReLU(),
|
||||
)
|
||||
self.linear = torch.nn.Linear(
|
||||
odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)
|
||||
self.pos_enc = pos_enc_class
|
||||
self.subsampling_rate = 8
|
||||
# 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4
|
||||
self.right_context = 14
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
x_mask: torch.Tensor,
|
||||
offset: Union[int, torch.Tensor] = 0
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Subsample x.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor (#batch, time, idim).
|
||||
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
||||
where time' = time // 8.
|
||||
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
||||
where time' = time // 8.
|
||||
torch.Tensor: positional encoding
|
||||
"""
|
||||
x = x.unsqueeze(1) # (b, c, t, f)
|
||||
x = self.conv(x)
|
||||
b, c, t, f = x.size()
|
||||
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
||||
x, pos_emb = self.pos_enc(x, offset)
|
||||
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]
|
||||
Reference in New Issue
Block a user