Skip to content

Commit

Permalink
⬆️ sync upstream changes for dvae.py
Browse files Browse the repository at this point in the history
  • Loading branch information
zhzLuke96 committed Jun 28, 2024
1 parent fa63491 commit cc3ca09
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 58 deletions.
145 changes: 90 additions & 55 deletions modules/ChatTTS/ChatTTS/model/dvae.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import math
from typing import List, Optional

import numpy as np
import pybase16384 as b14
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from vector_quantize_pytorch import GroupedResidualFSQ


Expand All @@ -12,8 +14,8 @@ def __init__(
self,
dim: int,
intermediate_dim: int,
kernel,
dilation,
kernel: int,
dilation: int,
layer_scale_init_value: float = 1e-6,
):
# ConvNeXt Block copied from Vocos.
Expand Down Expand Up @@ -41,23 +43,32 @@ def __init__(

def forward(self, x: torch.Tensor, cond=None) -> torch.Tensor:
residual = x
x = self.dwconv(x)
x = x.transpose(1, 2) # (B, C, T) -> (B, T, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)

y = self.dwconv(x)
y.transpose_(1, 2) # (B, C, T) -> (B, T, C)
x = self.norm(y)
del y
y = self.pwconv1(x)
del x
x = self.act(y)
del y
y = self.pwconv2(x)
del x
if self.gamma is not None:
x = self.gamma * x
x = x.transpose(1, 2) # (B, T, C) -> (B, C, T)
y *= self.gamma
y.transpose_(1, 2) # (B, T, C) -> (B, C, T)

x = y + residual
del y

x = residual + x
return x


class GFSQ(nn.Module):

def __init__(self, dim, levels, G, R, eps=1e-5, transpose=True):
def __init__(
self, dim: int, levels: List[int], G: int, R: int, eps=1e-5, transpose=True
):
super(GFSQ, self).__init__()
self.quantizer = GroupedResidualFSQ(
dim=dim,
Expand All @@ -71,48 +82,51 @@ def __init__(self, dim, levels, G, R, eps=1e-5, transpose=True):
self.G = G
self.R = R

def _embed(self, x):
def _embed(self, x: torch.Tensor):
if self.transpose:
x = x.transpose(1, 2)
"""
x = rearrange(
x,
"b t (g r) -> g b t r",
g=self.G,
r=self.R,
x, "b t (g r) -> g b t r", g = self.G, r = self.R,
)
"""
x = x.view(x.size(0), x.size(1), self.G, self.R).permute(2, 0, 1, 3)
feat = self.quantizer.get_output_from_indices(x)
return feat.transpose(1, 2) if self.transpose else feat
return feat.transpose_(1, 2) if self.transpose else feat

def forward(
self,
x,
):
def forward(self, x):
if self.transpose:
x = x.transpose(1, 2)
feat, ind = self.quantizer(x)
"""
ind = rearrange(
ind,
"g b t r ->b t (g r)",
ind, "g b t r ->b t (g r)",
)
embed_onehot = F.one_hot(ind.long(), self.n_ind).to(x.dtype)
"""
ind = ind.permute(1, 2, 0, 3).contiguous()
ind = ind.view(ind.size(0), ind.size(1), -1)
embed_onehot_tmp = F.one_hot(ind.long(), self.n_ind)
embed_onehot = embed_onehot_tmp.to(x.dtype)
del embed_onehot_tmp
e_mean = torch.mean(embed_onehot, dim=[0, 1])
e_mean = e_mean / (e_mean.sum(dim=1) + self.eps).unsqueeze(1)
# e_mean = e_mean / (e_mean.sum(dim=1) + self.eps).unsqueeze(1)
torch.div(e_mean, (e_mean.sum(dim=1) + self.eps).unsqueeze(1), out=e_mean)
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + self.eps), dim=1))

return (
torch.zeros(perplexity.shape, dtype=x.dtype, device=x.device),
feat.transpose(1, 2) if self.transpose else feat,
feat.transpose_(1, 2) if self.transpose else feat,
perplexity,
None,
ind.transpose(1, 2) if self.transpose else ind,
ind.transpose_(1, 2) if self.transpose else ind,
)


class DVAEDecoder(nn.Module):
def __init__(
self,
idim,
odim,
idim: int,
odim: int,
n_layer=12,
bn_dim=64,
hidden=256,
Expand Down Expand Up @@ -140,21 +154,35 @@ def __init__(
)
self.conv_out = nn.Conv1d(hidden, odim, kernel_size=1, bias=False)

def forward(self, input, conditioning=None):
def forward(self, input: torch.Tensor, conditioning=None) -> torch.Tensor:
# B, T, C
x = input.transpose(1, 2)
x = self.conv_in(x)
x = input.transpose_(1, 2)
y = self.conv_in(x)
del x
for f in self.decoder_block:
x = f(x, conditioning)
y = f(y, conditioning)

x = self.conv_out(x)
return x.transpose(1, 2)
x = self.conv_out(y)
del y
return x.transpose_(1, 2)


class DVAE(nn.Module):
def __init__(self, decoder_config, vq_config, dim=512):
def __init__(
self,
decoder_config,
vq_config,
dim=512,
coef: Optional[str] = None,
):
super().__init__()
self.register_buffer("coef", torch.randn(1, 100, 1))
if coef is None:
coef = torch.rand(100)
else:
coef = torch.from_numpy(
np.copy(np.frombuffer(b14.decode_from_string(coef), dtype=np.float32))
)
self.register_buffer("coef", coef.unsqueeze(0).unsqueeze_(2))

self.decoder = DVAEDecoder(**decoder_config)
self.out_conv = nn.Conv1d(dim, 100, 3, 1, 1, bias=False)
Expand All @@ -163,24 +191,31 @@ def __init__(self, decoder_config, vq_config, dim=512):
else:
self.vq_layer = None

def forward(self, inp):
def __repr__(self) -> str:
return b14.encode_to_string(
self.coef.cpu().numpy().astype(np.float32).tobytes()
)

if self.vq_layer is not None:
vq_feats = self.vq_layer._embed(inp)
else:
vq_feats = inp.detach().clone()
def forward(self, inp: torch.Tensor) -> torch.Tensor:
with torch.no_grad():

vq_feats = (
vq_feats.view(
(vq_feats.size(0), 2, vq_feats.size(1) // 2, vq_feats.size(2)),
if self.vq_layer is not None:
vq_feats = self.vq_layer._embed(inp)
else:
vq_feats = inp.detach().clone()

vq_feats = (
vq_feats.view(
(vq_feats.size(0), 2, vq_feats.size(1) // 2, vq_feats.size(2)),
)
.permute(0, 2, 3, 1)
.flatten(2)
)
.permute(0, 2, 3, 1)
.flatten(2)
)

vq_feats = vq_feats.transpose(1, 2)
dec_out = self.decoder(input=vq_feats)
dec_out = self.out_conv(dec_out.transpose(1, 2))
mel = dec_out * self.coef
dec_out = self.out_conv(
self.decoder(
input=vq_feats.transpose_(1, 2),
).transpose_(1, 2),
)

return mel
return torch.mul(dec_out, self.coef, out=dec_out)
3 changes: 2 additions & 1 deletion requirements.dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -158,4 +158,5 @@ websockets==11.0.3
Werkzeug==3.0.3
zhon==2.0.2
ftfy==6.2.0
pyrubberband==0.3.0
pyrubberband==0.3.0
pybase16384==0.3.7
3 changes: 2 additions & 1 deletion requirements.docker.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,5 @@ cn2an
python-box
ftfy
librosa
pyrubberband
pyrubberband
pybase16384
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,5 @@ cn2an
python-box
ftfy
librosa
pyrubberband
pyrubberband
pybase16384

0 comments on commit cc3ca09

Please sign in to comment.