Spaces:
Running
Running
Update Fun-ASR/model.py
Browse files- Fun-ASR/model.py +109 -42
Fun-ASR/model.py
CHANGED
|
@@ -15,6 +15,7 @@ from funasr.register import tables
|
|
| 15 |
from funasr.train_utils.device_funcs import force_gatherable, to_device
|
| 16 |
from funasr.utils.datadir_writer import DatadirWriter
|
| 17 |
from funasr.utils.load_utils import extract_fbank, load_audio_text_image_video
|
|
|
|
| 18 |
|
| 19 |
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
|
| 20 |
|
|
@@ -37,13 +38,21 @@ class FunASRNano(nn.Module):
|
|
| 37 |
|
| 38 |
# audio encoder
|
| 39 |
hub = audio_encoder_conf.get("hub", None)
|
| 40 |
-
self.audio_encoder_activation_checkpoint = audio_encoder_conf.get(
|
|
|
|
|
|
|
| 41 |
if hub == "ms":
|
| 42 |
model = AutoModel(model=audio_encoder, model_revision="master")
|
| 43 |
audio_encoder_output_size = (
|
| 44 |
-
model.model.encoder_output_size
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
)
|
| 46 |
-
audio_encoder = model.model.model.encoder if hasattr(model.model, "model") else model.model.encoder
|
| 47 |
else:
|
| 48 |
encoder_class = tables.encoder_classes.get(audio_encoder)
|
| 49 |
audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
|
|
@@ -61,16 +70,9 @@ class FunASRNano(nn.Module):
|
|
| 61 |
init_param_path = llm_conf.get("init_param_path", None)
|
| 62 |
llm_dim = None
|
| 63 |
|
| 64 |
-
from transformers import AutoModelForCausalLM
|
| 65 |
-
|
| 66 |
llm_load_kwargs = llm_conf.get("load_kwargs", {})
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
load_in_8bit=None,
|
| 70 |
-
device_map=None,
|
| 71 |
-
use_cache=None,
|
| 72 |
-
**llm_load_kwargs,
|
| 73 |
-
)
|
| 74 |
|
| 75 |
freeze = llm_conf.get("freeze", True)
|
| 76 |
if freeze:
|
|
@@ -110,13 +112,10 @@ class FunASRNano(nn.Module):
|
|
| 110 |
adaptor_class = tables.adaptor_classes.get(audio_adaptor)
|
| 111 |
if audio_encoder_output_size > 0:
|
| 112 |
audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
|
| 113 |
-
audio_adaptor_conf["llm_dim"] =
|
|
|
|
|
|
|
| 114 |
audio_adaptor = adaptor_class(**audio_adaptor_conf)
|
| 115 |
-
init_param_path = audio_adaptor_conf.get("init_param_path", None)
|
| 116 |
-
if init_param_path is not None:
|
| 117 |
-
src_state = torch.load(init_param_path, map_location="cpu")
|
| 118 |
-
flag = audio_adaptor.load_state_dict(src_state, strict=False)
|
| 119 |
-
logging.info(f"Loading audio_adaptor ckpt: {init_param_path}, status: {flag}")
|
| 120 |
freeze = audio_adaptor_conf.get("freeze", False)
|
| 121 |
if freeze:
|
| 122 |
for name, param in audio_adaptor.named_parameters():
|
|
@@ -153,12 +152,16 @@ class FunASRNano(nn.Module):
|
|
| 153 |
if self.audio_encoder_activation_checkpoint:
|
| 154 |
from torch.utils.checkpoint import checkpoint
|
| 155 |
|
| 156 |
-
encoder_out, encoder_out_lens = checkpoint(
|
|
|
|
|
|
|
| 157 |
else:
|
| 158 |
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
|
| 159 |
|
| 160 |
# audio_adaptor
|
| 161 |
-
encoder_out, encoder_out_lens = self.audio_adaptor(
|
|
|
|
|
|
|
| 162 |
|
| 163 |
batch_size, token_num, dims = inputs_embeds.shape
|
| 164 |
fake_token_len = kwargs.get("fake_token_len")
|
|
@@ -197,7 +200,9 @@ class FunASRNano(nn.Module):
|
|
| 197 |
stats["batch_size_speech"] = batch_size_speech
|
| 198 |
stats["batch_size_x_frames"] = frames * batch_size_speech
|
| 199 |
stats["batch_size_real_frames"] = speech_lengths.sum().item()
|
| 200 |
-
stats["padding_frames"] =
|
|
|
|
|
|
|
| 201 |
|
| 202 |
with torch.cuda.amp.autocast(
|
| 203 |
enabled=True if self.llm_dtype != "fp32" else False,
|
|
@@ -214,7 +219,9 @@ class FunASRNano(nn.Module):
|
|
| 214 |
|
| 215 |
with torch.no_grad():
|
| 216 |
preds = torch.argmax(model_outputs.logits, -1)
|
| 217 |
-
acc_att = compute_accuracy(
|
|
|
|
|
|
|
| 218 |
stats["acc"] = acc_att
|
| 219 |
|
| 220 |
stats["loss"] = torch.clone(loss.detach())
|
|
@@ -222,7 +229,9 @@ class FunASRNano(nn.Module):
|
|
| 222 |
|
| 223 |
stats["batch_size_x_tokens"] = token_num * batch_size
|
| 224 |
stats["batch_size_real_tokens"] = attention_mask.sum().item()
|
| 225 |
-
stats["padding_tokens"] =
|
|
|
|
|
|
|
| 226 |
|
| 227 |
dialog_turns = (fbank_beg > 0).sum(-1)
|
| 228 |
dialog_turns_max = torch.max(dialog_turns).int().item()
|
|
@@ -244,7 +253,9 @@ class FunASRNano(nn.Module):
|
|
| 244 |
def encode(self, speech, speech_lengths):
|
| 245 |
# audio encoder
|
| 246 |
if self.feat_permute:
|
| 247 |
-
encoder_out, encoder_out_lens = self.audio_encoder(
|
|
|
|
|
|
|
| 248 |
else:
|
| 249 |
encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
|
| 250 |
|
|
@@ -275,7 +286,9 @@ class FunASRNano(nn.Module):
|
|
| 275 |
|
| 276 |
return contents
|
| 277 |
|
| 278 |
-
def data_load_speech(
|
|
|
|
|
|
|
| 279 |
system = contents["system"]
|
| 280 |
user = contents["user"]
|
| 281 |
assistant = contents["assistant"]
|
|
@@ -296,7 +309,9 @@ class FunASRNano(nn.Module):
|
|
| 296 |
[],
|
| 297 |
)
|
| 298 |
input_source_ids = []
|
| 299 |
-
for i, (system_prompt, user_prompt, target_out) in enumerate(
|
|
|
|
|
|
|
| 300 |
if i >= kwargs.get("multiturn_num_max", 5):
|
| 301 |
break
|
| 302 |
if len(input_ids) > kwargs.get("max_token_length", 1500):
|
|
@@ -332,18 +347,24 @@ class FunASRNano(nn.Module):
|
|
| 332 |
source_ids += sub_token
|
| 333 |
fbank_mask_i += [0] * len(sub_token)
|
| 334 |
else:
|
| 335 |
-
sub_str = sub_str.replace("<|startofspeech|>", "").replace(
|
|
|
|
|
|
|
| 336 |
if sub_str.startswith("!"):
|
| 337 |
sub_str = sub_str[1:]
|
| 338 |
if sub_str.startswith("!"): # !!: audio sample point
|
| 339 |
sub_str = audio
|
| 340 |
try:
|
| 341 |
time1 = time.perf_counter()
|
| 342 |
-
data_src = load_audio_text_image_video(
|
|
|
|
|
|
|
| 343 |
time2 = time.perf_counter()
|
| 344 |
meta_data["load_data"] = f"{time2 - time1:0.3f}"
|
| 345 |
except Exception as e:
|
| 346 |
-
logging.error(
|
|
|
|
|
|
|
| 347 |
|
| 348 |
speech, speech_lengths = extract_fbank(
|
| 349 |
data_src,
|
|
@@ -355,7 +376,10 @@ class FunASRNano(nn.Module):
|
|
| 355 |
time3 = time.perf_counter()
|
| 356 |
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
|
| 357 |
meta_data["batch_data_time"] = (
|
| 358 |
-
speech_lengths.sum().item()
|
|
|
|
|
|
|
|
|
|
| 359 |
)
|
| 360 |
|
| 361 |
if self.feat_permute:
|
|
@@ -382,7 +406,9 @@ class FunASRNano(nn.Module):
|
|
| 382 |
fbank.append(speech[0, :, :])
|
| 383 |
fbank_lens.append(speech_lengths)
|
| 384 |
|
| 385 |
-
input_ids = torch.tensor(
|
|
|
|
|
|
|
| 386 |
attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
|
| 387 |
labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
|
| 388 |
|
|
@@ -393,8 +419,12 @@ class FunASRNano(nn.Module):
|
|
| 393 |
target_ids = torch.tensor(target_ids, dtype=torch.int64)
|
| 394 |
|
| 395 |
if len(fbank) > 0:
|
| 396 |
-
speech = torch.nn.utils.rnn.pad_sequence(
|
| 397 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
else:
|
| 399 |
speech = []
|
| 400 |
speech_lengths = []
|
|
@@ -428,7 +458,9 @@ class FunASRNano(nn.Module):
|
|
| 428 |
raise NotImplementedError("batch decoding is not implemented")
|
| 429 |
|
| 430 |
contents = self.data_template(data_in[0])
|
| 431 |
-
output = self.data_load_speech(
|
|
|
|
|
|
|
| 432 |
batch = to_device(output, kwargs["device"])
|
| 433 |
|
| 434 |
# audio encoder
|
|
@@ -449,7 +481,9 @@ class FunASRNano(nn.Module):
|
|
| 449 |
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
|
| 450 |
|
| 451 |
# audio_adaptor
|
| 452 |
-
encoder_out, encoder_out_lens = self.audio_adaptor(
|
|
|
|
|
|
|
| 453 |
meta_data["audio_adaptor_out"] = encoder_out
|
| 454 |
meta_data["audio_adaptor_out_lens"] = encoder_out_lens
|
| 455 |
|
|
@@ -509,13 +543,36 @@ class FunASRNano(nn.Module):
|
|
| 509 |
frontend=None,
|
| 510 |
**kwargs,
|
| 511 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 512 |
new_data_in = []
|
| 513 |
for data in data_in:
|
| 514 |
if isinstance(data, str):
|
| 515 |
new_data_in.append(
|
| 516 |
[
|
| 517 |
{"role": "system", "content": "You are a helpful assistant."},
|
| 518 |
-
{
|
|
|
|
|
|
|
|
|
|
| 519 |
{"role": "assistant", "content": "null"},
|
| 520 |
]
|
| 521 |
)
|
|
@@ -523,7 +580,11 @@ class FunASRNano(nn.Module):
|
|
| 523 |
new_data_in.append(
|
| 524 |
[
|
| 525 |
{"role": "system", "content": "You are a helpful assistant."},
|
| 526 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
| 527 |
{"role": "assistant", "content": "null"},
|
| 528 |
]
|
| 529 |
)
|
|
@@ -533,7 +594,9 @@ class FunASRNano(nn.Module):
|
|
| 533 |
key = []
|
| 534 |
for _ in data_in:
|
| 535 |
chars = string.ascii_letters + string.digits
|
| 536 |
-
key.append(
|
|
|
|
|
|
|
| 537 |
|
| 538 |
return self.inference_llm(
|
| 539 |
data_in,
|
|
@@ -561,7 +624,9 @@ class FunASRNano(nn.Module):
|
|
| 561 |
llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
|
| 562 |
llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
|
| 563 |
|
| 564 |
-
with torch.cuda.amp.autocast(
|
|
|
|
|
|
|
| 565 |
label = contents["assistant"][-1]
|
| 566 |
self.llm = self.llm.to(dtype_map[llm_dtype])
|
| 567 |
inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
|
|
@@ -608,7 +673,7 @@ class FunASRNano(nn.Module):
|
|
| 608 |
response_clean = re.sub(r"[^\w\s\u3000\u4e00-\u9fff]+", "", response)
|
| 609 |
result_i = {
|
| 610 |
"key": key[0],
|
| 611 |
-
"text": response,
|
| 612 |
"text_tn": response_clean,
|
| 613 |
"label": label,
|
| 614 |
}
|
|
@@ -627,6 +692,8 @@ class FunASRNano(nn.Module):
|
|
| 627 |
def from_pretrained(model: str = None, **kwargs):
|
| 628 |
from funasr import AutoModel
|
| 629 |
|
| 630 |
-
model, kwargs = AutoModel.build_model(
|
|
|
|
|
|
|
| 631 |
|
| 632 |
-
return model, kwargs
|
|
|
|
| 15 |
from funasr.train_utils.device_funcs import force_gatherable, to_device
|
| 16 |
from funasr.utils.datadir_writer import DatadirWriter
|
| 17 |
from funasr.utils.load_utils import extract_fbank, load_audio_text_image_video
|
| 18 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
| 19 |
|
| 20 |
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
|
| 21 |
|
|
|
|
| 38 |
|
| 39 |
# audio encoder
|
| 40 |
hub = audio_encoder_conf.get("hub", None)
|
| 41 |
+
self.audio_encoder_activation_checkpoint = audio_encoder_conf.get(
|
| 42 |
+
"activation_checkpoint", False
|
| 43 |
+
)
|
| 44 |
if hub == "ms":
|
| 45 |
model = AutoModel(model=audio_encoder, model_revision="master")
|
| 46 |
audio_encoder_output_size = (
|
| 47 |
+
model.model.encoder_output_size
|
| 48 |
+
if hasattr(model.model, "encoder_output_size")
|
| 49 |
+
else -1
|
| 50 |
+
)
|
| 51 |
+
audio_encoder = (
|
| 52 |
+
model.model.model.encoder
|
| 53 |
+
if hasattr(model.model, "model")
|
| 54 |
+
else model.model.encoder
|
| 55 |
)
|
|
|
|
| 56 |
else:
|
| 57 |
encoder_class = tables.encoder_classes.get(audio_encoder)
|
| 58 |
audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
|
|
|
|
| 70 |
init_param_path = llm_conf.get("init_param_path", None)
|
| 71 |
llm_dim = None
|
| 72 |
|
|
|
|
|
|
|
| 73 |
llm_load_kwargs = llm_conf.get("load_kwargs", {})
|
| 74 |
+
config = AutoConfig.from_pretrained(init_param_path)
|
| 75 |
+
model = AutoModelForCausalLM.from_config(config, **llm_load_kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
freeze = llm_conf.get("freeze", True)
|
| 78 |
if freeze:
|
|
|
|
| 112 |
adaptor_class = tables.adaptor_classes.get(audio_adaptor)
|
| 113 |
if audio_encoder_output_size > 0:
|
| 114 |
audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
|
| 115 |
+
audio_adaptor_conf["llm_dim"] = (
|
| 116 |
+
llm_dim if llm_dim is not None else audio_adaptor_conf["llm_dim"]
|
| 117 |
+
)
|
| 118 |
audio_adaptor = adaptor_class(**audio_adaptor_conf)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
freeze = audio_adaptor_conf.get("freeze", False)
|
| 120 |
if freeze:
|
| 121 |
for name, param in audio_adaptor.named_parameters():
|
|
|
|
| 152 |
if self.audio_encoder_activation_checkpoint:
|
| 153 |
from torch.utils.checkpoint import checkpoint
|
| 154 |
|
| 155 |
+
encoder_out, encoder_out_lens = checkpoint(
|
| 156 |
+
self.encode, speech, speech_lengths, use_reentrant=False
|
| 157 |
+
)
|
| 158 |
else:
|
| 159 |
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
|
| 160 |
|
| 161 |
# audio_adaptor
|
| 162 |
+
encoder_out, encoder_out_lens = self.audio_adaptor(
|
| 163 |
+
encoder_out, encoder_out_lens
|
| 164 |
+
)
|
| 165 |
|
| 166 |
batch_size, token_num, dims = inputs_embeds.shape
|
| 167 |
fake_token_len = kwargs.get("fake_token_len")
|
|
|
|
| 200 |
stats["batch_size_speech"] = batch_size_speech
|
| 201 |
stats["batch_size_x_frames"] = frames * batch_size_speech
|
| 202 |
stats["batch_size_real_frames"] = speech_lengths.sum().item()
|
| 203 |
+
stats["padding_frames"] = (
|
| 204 |
+
stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
|
| 205 |
+
)
|
| 206 |
|
| 207 |
with torch.cuda.amp.autocast(
|
| 208 |
enabled=True if self.llm_dtype != "fp32" else False,
|
|
|
|
| 219 |
|
| 220 |
with torch.no_grad():
|
| 221 |
preds = torch.argmax(model_outputs.logits, -1)
|
| 222 |
+
acc_att = compute_accuracy(
|
| 223 |
+
preds[:, :-1], labels_ids[:, 1:], ignore_label=-100
|
| 224 |
+
)
|
| 225 |
stats["acc"] = acc_att
|
| 226 |
|
| 227 |
stats["loss"] = torch.clone(loss.detach())
|
|
|
|
| 229 |
|
| 230 |
stats["batch_size_x_tokens"] = token_num * batch_size
|
| 231 |
stats["batch_size_real_tokens"] = attention_mask.sum().item()
|
| 232 |
+
stats["padding_tokens"] = (
|
| 233 |
+
stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
|
| 234 |
+
)
|
| 235 |
|
| 236 |
dialog_turns = (fbank_beg > 0).sum(-1)
|
| 237 |
dialog_turns_max = torch.max(dialog_turns).int().item()
|
|
|
|
| 253 |
def encode(self, speech, speech_lengths):
|
| 254 |
# audio encoder
|
| 255 |
if self.feat_permute:
|
| 256 |
+
encoder_out, encoder_out_lens = self.audio_encoder(
|
| 257 |
+
speech.permute(0, 2, 1), speech_lengths
|
| 258 |
+
)
|
| 259 |
else:
|
| 260 |
encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
|
| 261 |
|
|
|
|
| 286 |
|
| 287 |
return contents
|
| 288 |
|
| 289 |
+
def data_load_speech(
|
| 290 |
+
self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs
|
| 291 |
+
):
|
| 292 |
system = contents["system"]
|
| 293 |
user = contents["user"]
|
| 294 |
assistant = contents["assistant"]
|
|
|
|
| 309 |
[],
|
| 310 |
)
|
| 311 |
input_source_ids = []
|
| 312 |
+
for i, (system_prompt, user_prompt, target_out) in enumerate(
|
| 313 |
+
zip(system, user, assistant)
|
| 314 |
+
):
|
| 315 |
if i >= kwargs.get("multiturn_num_max", 5):
|
| 316 |
break
|
| 317 |
if len(input_ids) > kwargs.get("max_token_length", 1500):
|
|
|
|
| 347 |
source_ids += sub_token
|
| 348 |
fbank_mask_i += [0] * len(sub_token)
|
| 349 |
else:
|
| 350 |
+
sub_str = sub_str.replace("<|startofspeech|>", "").replace(
|
| 351 |
+
"<|endofspeech|>", ""
|
| 352 |
+
)
|
| 353 |
if sub_str.startswith("!"):
|
| 354 |
sub_str = sub_str[1:]
|
| 355 |
if sub_str.startswith("!"): # !!: audio sample point
|
| 356 |
sub_str = audio
|
| 357 |
try:
|
| 358 |
time1 = time.perf_counter()
|
| 359 |
+
data_src = load_audio_text_image_video(
|
| 360 |
+
sub_str, fs=frontend.fs, **kwargs
|
| 361 |
+
)
|
| 362 |
time2 = time.perf_counter()
|
| 363 |
meta_data["load_data"] = f"{time2 - time1:0.3f}"
|
| 364 |
except Exception as e:
|
| 365 |
+
logging.error(
|
| 366 |
+
f"Loading wav failed! {str(e)}, {traceback.format_exc()}"
|
| 367 |
+
)
|
| 368 |
|
| 369 |
speech, speech_lengths = extract_fbank(
|
| 370 |
data_src,
|
|
|
|
| 376 |
time3 = time.perf_counter()
|
| 377 |
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
|
| 378 |
meta_data["batch_data_time"] = (
|
| 379 |
+
speech_lengths.sum().item()
|
| 380 |
+
* frontend.frame_shift
|
| 381 |
+
* frontend.lfr_n
|
| 382 |
+
/ 1000
|
| 383 |
)
|
| 384 |
|
| 385 |
if self.feat_permute:
|
|
|
|
| 406 |
fbank.append(speech[0, :, :])
|
| 407 |
fbank_lens.append(speech_lengths)
|
| 408 |
|
| 409 |
+
input_ids = torch.tensor(
|
| 410 |
+
input_ids, dtype=torch.int64
|
| 411 |
+
) # [: self.max_token_length]
|
| 412 |
attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
|
| 413 |
labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
|
| 414 |
|
|
|
|
| 419 |
target_ids = torch.tensor(target_ids, dtype=torch.int64)
|
| 420 |
|
| 421 |
if len(fbank) > 0:
|
| 422 |
+
speech = torch.nn.utils.rnn.pad_sequence(
|
| 423 |
+
fbank, batch_first=True, padding_value=0.0
|
| 424 |
+
)
|
| 425 |
+
speech_lengths = torch.nn.utils.rnn.pad_sequence(
|
| 426 |
+
fbank_lens, batch_first=True, padding_value=-1
|
| 427 |
+
)
|
| 428 |
else:
|
| 429 |
speech = []
|
| 430 |
speech_lengths = []
|
|
|
|
| 458 |
raise NotImplementedError("batch decoding is not implemented")
|
| 459 |
|
| 460 |
contents = self.data_template(data_in[0])
|
| 461 |
+
output = self.data_load_speech(
|
| 462 |
+
contents, tokenizer, frontend, meta_data=meta_data, **kwargs
|
| 463 |
+
)
|
| 464 |
batch = to_device(output, kwargs["device"])
|
| 465 |
|
| 466 |
# audio encoder
|
|
|
|
| 481 |
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
|
| 482 |
|
| 483 |
# audio_adaptor
|
| 484 |
+
encoder_out, encoder_out_lens = self.audio_adaptor(
|
| 485 |
+
encoder_out, encoder_out_lens
|
| 486 |
+
)
|
| 487 |
meta_data["audio_adaptor_out"] = encoder_out
|
| 488 |
meta_data["audio_adaptor_out_lens"] = encoder_out_lens
|
| 489 |
|
|
|
|
| 543 |
frontend=None,
|
| 544 |
**kwargs,
|
| 545 |
):
|
| 546 |
+
hotwords = kwargs.get("hotwords", [])
|
| 547 |
+
if len(hotwords) > 0:
|
| 548 |
+
hotwords = ", ".join(hotwords)
|
| 549 |
+
prompt = f"请结合上下文信息,更加准确地完成语音转写任务。如果没有相关信息,我们会留空。\n\n\n**上下文信息:**\n\n\n"
|
| 550 |
+
prompt += f"热词列表:[{hotwords}]\n"
|
| 551 |
+
else:
|
| 552 |
+
prompt = ""
|
| 553 |
+
language = kwargs.get("language", "auto")
|
| 554 |
+
if language not in ("auto", "zh", "en", "ja"):
|
| 555 |
+
language = "auto"
|
| 556 |
+
if language == "auto":
|
| 557 |
+
prompt += "语音转写"
|
| 558 |
+
else:
|
| 559 |
+
LANGUAGE_MAP = {"zh": "中文", "en": "英文", "ja": "日文"}
|
| 560 |
+
prompt += f"语音转写成{LANGUAGE_MAP[language]}"
|
| 561 |
+
itn = kwargs.get("itn", True)
|
| 562 |
+
if not itn:
|
| 563 |
+
prompt += ",不进行文本规整"
|
| 564 |
+
prompt += ":"
|
| 565 |
+
|
| 566 |
new_data_in = []
|
| 567 |
for data in data_in:
|
| 568 |
if isinstance(data, str):
|
| 569 |
new_data_in.append(
|
| 570 |
[
|
| 571 |
{"role": "system", "content": "You are a helpful assistant."},
|
| 572 |
+
{
|
| 573 |
+
"role": "user",
|
| 574 |
+
"content": f"{prompt}<|startofspeech|>!{data}<|endofspeech|>",
|
| 575 |
+
},
|
| 576 |
{"role": "assistant", "content": "null"},
|
| 577 |
]
|
| 578 |
)
|
|
|
|
| 580 |
new_data_in.append(
|
| 581 |
[
|
| 582 |
{"role": "system", "content": "You are a helpful assistant."},
|
| 583 |
+
{
|
| 584 |
+
"role": "user",
|
| 585 |
+
"content": f"{prompt}<|startofspeech|>!!<|endofspeech|>",
|
| 586 |
+
"audio": data,
|
| 587 |
+
},
|
| 588 |
{"role": "assistant", "content": "null"},
|
| 589 |
]
|
| 590 |
)
|
|
|
|
| 594 |
key = []
|
| 595 |
for _ in data_in:
|
| 596 |
chars = string.ascii_letters + string.digits
|
| 597 |
+
key.append(
|
| 598 |
+
"rand_key_" + "".join(random.choice(chars) for _ in range(13))
|
| 599 |
+
)
|
| 600 |
|
| 601 |
return self.inference_llm(
|
| 602 |
data_in,
|
|
|
|
| 624 |
llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
|
| 625 |
llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
|
| 626 |
|
| 627 |
+
with torch.cuda.amp.autocast(
|
| 628 |
+
enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
|
| 629 |
+
):
|
| 630 |
label = contents["assistant"][-1]
|
| 631 |
self.llm = self.llm.to(dtype_map[llm_dtype])
|
| 632 |
inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
|
|
|
|
| 673 |
response_clean = re.sub(r"[^\w\s\u3000\u4e00-\u9fff]+", "", response)
|
| 674 |
result_i = {
|
| 675 |
"key": key[0],
|
| 676 |
+
"text": re.sub(r'\s+', ' ', response.replace("/sil", " ")),
|
| 677 |
"text_tn": response_clean,
|
| 678 |
"label": label,
|
| 679 |
}
|
|
|
|
| 692 |
def from_pretrained(model: str = None, **kwargs):
|
| 693 |
from funasr import AutoModel
|
| 694 |
|
| 695 |
+
model, kwargs = AutoModel.build_model(
|
| 696 |
+
model=model, trust_remote_code=True, **kwargs
|
| 697 |
+
)
|
| 698 |
|
| 699 |
+
return model, kwargs
|