initial commit

This commit is contained in:
George Kasparyants
2024-04-24 06:57:30 +04:00
commit 3633aa99e5
29 changed files with 2555 additions and 0 deletions

View File

@@ -0,0 +1,164 @@
import random
import shutil
model_bsList_old = ["browDownLeft",
"browDownRight",
"browInnerUp",
"browOuterUpLeft",
"browOuterUpRight",
"cheekPuff",
"cheekSquintLeft",
"cheekSquintRight",
"eyeBlinkLeft",
"eyeBlinkRight",
"eyeLookDownLeft",
"eyeLookDownRight",
"eyeLookInLeft",
"eyeLookInRight",
"eyeLookOutLeft",
"eyeLookOutRight",
"eyeLookUpLeft",
"eyeLookUpRight",
"eyeSquintLeft",
"eyeSquintRight",
"eyeWideLeft",
"eyeWideRight",
"jawForward",
"jawLeft",
"jawOpen",
"jawRight",
"mouthClose",
"mouthDimpleLeft",
"mouthDimpleRight",
"mouthFrownLeft",
"mouthFrownRight",
"mouthFunnel",
"mouthLeft",
"mouthLowerDownLeft",
"mouthLowerDownRight",
"mouthPressLeft",
"mouthPressRight",
"mouthPucker",
"mouthRight",
"mouthRollLower",
"mouthRollUpper",
"mouthShrugLower",
"mouthShrugUpper",
"mouthSmileLeft",
"mouthSmileRight",
"mouthStretchLeft",
"mouthStretchRight",
"mouthUpperUpLeft",
"mouthUpperUpRight",
"noseSneerLeft",
"noseSneerRight",
"tongueOut"]
import bpy
import os
import numpy as np
import sys
filename = str(sys.argv[-1])
root_dir = str(sys.argv[-2])
object_name = "MFA_body"
obj = bpy.data.objects[object_name]
bpy.context.scene.render.engine = 'BLENDER_WORKBENCH'
bpy.context.scene.display.shading.light = 'MATCAP'
bpy.context.scene.display.render_aa = 'FXAA'
bpy.context.scene.render.resolution_x = int(512)
bpy.context.scene.render.resolution_y = int(768)
bpy.context.scene.render.fps = 30
bpy.context.scene.render.image_settings.file_format = 'PNG'
cam = bpy.data.objects['0Camera']
cam.scale = [2, 2, 2]
bpy.context.scene.camera = cam
"""
model_bsList = ['Basis',
'0',
'X_postrig',
'X_neck',
'X_head',
'X_eyesfix',
'X_breast',
'X_nails',
'X_pus_conf.1',
'X_pus_assym', 'X_jadafication',
'X_facetweak', 'X_eyeshape',
'A_nipple_in', 'A_nailsmax',
'A_pregnant', 'PAD_breathe',
'PAD_swallow', 'Head',
'cr_neck1', 'cr_neck2',
'cr_neck3.R', 'cr_neck3.L',
'cr_neck4.L', 'cr_neck4.R', 'cr_jaw1', 'cr_jaw2', 'sqz_jaw3', 'cr_brows_dwn', 'cr_brows_up',
'cr_eye_lookdown', 'cr_eye_open',
'cr_eye_look.L', 'cr_eye_look.R', 'cr_mouthmax.L', 'cr_mouthmax.R', 'cr_cheekin.L', 'cr_cheekin.R', 'Body', 'cr_spine',
'cr_spine2', 'cr_spine3', 'cr_spine2.L',
'cr_spine2.R', 'cr_spine4.L', 'cr_spine4.R',
'cr_spine5.L', 'cr_spine5.R', 'cr_lowerspine.bcw',
'cr_lowerspine.fwd', 'size_breastXL.L', 'size_breastXL.R',
'size_breastXS.L', 'size_breastXS.R', 'size_oreola.L',
'size_oreola.R', 'Legs', 'cr_hipout.L', 'cr_hipout.R',
'cr_hipin.L', 'cr_hipin.R', 'cr_pussyflattern',
'cr_hip0.L', 'cr_hip0.R', 'cr_hip1.L', 'cr_hip1.R',
'cr_hip45.L', 'cr_hip45.R', 'sqz_hip1max.L',
'sqz_hip1max.R', 'sqz_hip1vol.L', 'sqz_hip1vol.R',
'sqz_hip1squeeze.L', 'sqz_hip1squeeze.R', 'cr_hip2.L',
'cr_hip2.R', 'sqz_hip2.L', 'sqz_hip2.R', 'cr_hip3.L',
'cr_hip3.R', 'sqz_buttrest.L', 'sqz_buttrest.R',
'cr_knee45.L', 'cr_knee45.R', 'cr_knee.L', 'cr_knee.R',
'sqz_knee.L', 'sqz_knee.R', 'sqz_stance.L', 'sqz_stance.R',
'cr_buttheart.L', 'cr_buttheart.R', 'rest_buttcheek.L',
'rest_buttcheek.R', 'rest_knee.L', 'rest_knee.R', 'rest_knee_fat.L',
'rest_knee_fat.R', 'rest_hip.L', 'rest_hip.R', 'vol_butt.L',
'vol_butt.R', 'Feet', 'cr_feet1.L', 'cr_feet1.R', 'cr_feet2.L',
'cr_feet2.R', 'cr_feet3.L', 'cr_feet3.R', 'cr_toe1.L', 'cr_toe1.R',
'cr_toe2.L', 'cr_toe2.R', 'Arms', 'cr_arm-up.L', 'cr_arm-up.R',
'cr_arm-fwd.L', 'cr_arm-fwd.R', 'cr_arm-dwn.L', 'cr_arm-dwn.R',
'sqz_arm-fwd.L', 'sqz_arm-fwd.R', 'sqz_armpit.L', 'sqz_armpit.R',
'sqz_arm-bcw.L', 'sqz_arm-bcw.R', 'sqz_arm-bcw_max.L',
'sqz_arm-bcw_max.R', 'cr_arm-trc.L', 'cr_arm-trc.R',
'D_cr_elbow.L', 'U_cr_elbow.L', 'D_cr_elbow.R', 'U_cr_elbow.R',
'D_sqz_elbowMax.L', 'U_sqz_elbowMax.L', 'D_sqz_elbowMax.R',
'U_sqz_elbowMax.R', 'cr_armrest.L', 'cr_armrest.R',
'cr_shoulder_fwd.L', 'cr_shoulder_fwd.R', 'cr_shoulder_bcw.L',
'cr_shoulder_bcw.R', 'cr_shoulder_dwn.L', 'cr_shoulder_dwn.R',
'cr_shoulder_up.L', 'cr_shoulder_up.R', 'rest_elbow.L', 'rest_elbow.R',
'Hands', 'cr_hand1.L', 'cr_hand1.R',
'cr_hand2.L', 'cr_hand2.R', 'cr_handtwistU.L', 'cr_handtwistU.R',
'cr_handtwistD.L',
'cr_handtwistD.R',
'cr_thumb.01.L', 'cr_thumb.01.R',
'cr_f_index.01.L', 'cr_f_index.01.R', 'cr_f_index.02.L',
'cr_f_index.02.R',
'cr_f_middle.01.L', 'cr_f_middle.01.R', 'cr_f_middle.02.L',
'cr_f_middle.02.R', 'cr_f_ring.01.L', 'cr_f_ring.01.R',
'cr_f_ring.02.L', 'cr_f_ring.02.R', 'cr_f_pinky.01.L',
'cr_f_pinky.01.R', 'cr_f_pinky.02.L', 'cr_f_pinky.02.R', 'EM',
'em_eye_close.L', 'em_eye_close.R', 'em_eye_half.L', 'em_eye_half.R',
'em_smile_open', 'em_smile_close', 'em_kiss', 'em_disg', 'em_blow',
'em_surprise', 'em_sad', 'em_frown', 'PH', 'ph_+', 'ph_bpm',
'ph_fv', 'ph_ou',
'ph_e', 'ph_r', 'ph_ch', 'ph_th', 'ph_a']"""
model_bsList = list(obj.data.shape_keys.key_blocks.keys())
# print(obj.data.shape_keys.key_blocks.keys())
output_dir = root_dir + filename
blendshape_path = root_dir + filename + '.npy'
result = []
bs = np.load(blendshape_path)
for i in range(10):
for kp_name in model_bsList:
obj.data.shape_keys.key_blocks[kp_name].value = random.random()
bpy.context.scene.render.filepath = os.path.join(output_dir,
'{}.png'.format(i))
bpy.ops.render.render(write_still=True)

0
miapia_own/__init__.py Normal file
View File

57
miapia_own/a.py Normal file
View File

@@ -0,0 +1,57 @@
import bpy
import os
import numpy as np
import sys
filename = str(sys.argv[-1])
root_dir = str(sys.argv[-2])
object_name = "MFA_body"
obj = bpy.data.objects[object_name]
bpy.context.scene.render.engine = 'BLENDER_WORKBENCH'
bpy.context.scene.display.shading.light = 'MATCAP'
bpy.context.scene.display.render_aa = 'FXAA'
bpy.context.scene.render.resolution_x = int(512)
bpy.context.scene.render.resolution_y = int(768)
bpy.context.scene.render.fps = 30
bpy.context.scene.render.image_settings.file_format = 'PNG'
cam = bpy.data.objects['0Camera']
cam.scale = [2, 2, 2]
bpy.context.scene.camera = cam
model_bsList = ['Basis',
'0',
'X_postrig',
'X_neck',
'X_head',
'X_eyesfix',
'X_breast',
'X_nails',
'X_pus_conf.1',
'X_pus_assym', 'X_jadafication',
'X_facetweak', 'X_eyeshape',
'A_nipple_in', 'A_nailsmax',
'A_pregnant', 'PAD_breathe',
'PAD_swallow', 'Head',
'cr_neck1', 'cr_neck2',
'cr_neck3.R', 'cr_neck3.L',
'cr_neck4.L', 'cr_neck4.R', 'cr_jaw1', 'cr_jaw2', 'sqz_jaw3', 'cr_brows_dwn', 'cr_brows_up',
'cr_eye_lookdown', 'cr_eye_open',
'cr_eye_look.L', 'cr_eye_look.R', 'cr_mouthmax.L', 'cr_mouthmax.R', 'cr_cheekin.L', 'cr_cheekin.R', 'Body', 'cr_spine', 'cr_spine2', 'cr_spine3', 'cr_spine2.L', 'cr_spine2.R', 'cr_spine4.L', 'cr_spine4.R', 'cr_spine5.L', 'cr_spine5.R', 'cr_lowerspine.bcw', 'cr_lowerspine.fwd', 'size_breastXL.L', 'size_breastXL.R', 'size_breastXS.L', 'size_breastXS.R', 'size_oreola.L', 'size_oreola.R', 'Legs', 'cr_hipout.L', 'cr_hipout.R', 'cr_hipin.L', 'cr_hipin.R', 'cr_pussyflattern', 'cr_hip0.L', 'cr_hip0.R', 'cr_hip1.L', 'cr_hip1.R', 'cr_hip45.L', 'cr_hip45.R', 'sqz_hip1max.L', 'sqz_hip1max.R', 'sqz_hip1vol.L', 'sqz_hip1vol.R', 'sqz_hip1squeeze.L', 'sqz_hip1squeeze.R', 'cr_hip2.L', 'cr_hip2.R', 'sqz_hip2.L', 'sqz_hip2.R', 'cr_hip3.L', 'cr_hip3.R', 'sqz_buttrest.L', 'sqz_buttrest.R', 'cr_knee45.L', 'cr_knee45.R', 'cr_knee.L', 'cr_knee.R', 'sqz_knee.L', 'sqz_knee.R', 'sqz_stance.L', 'sqz_stance.R', 'cr_buttheart.L', 'cr_buttheart.R', 'rest_buttcheek.L', 'rest_buttcheek.R', 'rest_knee.L', 'rest_knee.R', 'rest_knee_fat.L', 'rest_knee_fat.R', 'rest_hip.L', 'rest_hip.R', 'vol_butt.L', 'vol_butt.R', 'Feet', 'cr_feet1.L', 'cr_feet1.R', 'cr_feet2.L', 'cr_feet2.R', 'cr_feet3.L', 'cr_feet3.R', 'cr_toe1.L', 'cr_toe1.R', 'cr_toe2.L', 'cr_toe2.R', 'Arms', 'cr_arm-up.L', 'cr_arm-up.R', 'cr_arm-fwd.L', 'cr_arm-fwd.R', 'cr_arm-dwn.L', 'cr_arm-dwn.R', 'sqz_arm-fwd.L', 'sqz_arm-fwd.R', 'sqz_armpit.L', 'sqz_armpit.R', 'sqz_arm-bcw.L', 'sqz_arm-bcw.R', 'sqz_arm-bcw_max.L', 'sqz_arm-bcw_max.R', 'cr_arm-trc.L', 'cr_arm-trc.R', 'D_cr_elbow.L', 'U_cr_elbow.L', 'D_cr_elbow.R', 'U_cr_elbow.R', 'D_sqz_elbowMax.L', 'U_sqz_elbowMax.L', 'D_sqz_elbowMax.R', 'U_sqz_elbowMax.R', 'cr_armrest.L', 'cr_armrest.R', 'cr_shoulder_fwd.L', 'cr_shoulder_fwd.R', 'cr_shoulder_bcw.L', 'cr_shoulder_bcw.R', 'cr_shoulder_dwn.L', 'cr_shoulder_dwn.R', 'cr_shoulder_up.L', 'cr_shoulder_up.R', 'rest_elbow.L', 'rest_elbow.R', 'Hands', 'cr_hand1.L', 'cr_hand1.R', 'cr_hand2.L', 'cr_hand2.R', 'cr_handtwistU.L', 'cr_handtwistU.R', 'cr_handtwistD.L', 'cr_handtwistD.R', 'cr_thumb.01.L', 'cr_thumb.01.R', 'cr_f_index.01.L', 'cr_f_index.01.R', 'cr_f_index.02.L', 'cr_f_index.02.R', 'cr_f_middle.01.L', 'cr_f_middle.01.R', 'cr_f_middle.02.L', 'cr_f_middle.02.R', 'cr_f_ring.01.L', 'cr_f_ring.01.R', 'cr_f_ring.02.L', 'cr_f_ring.02.R', 'cr_f_pinky.01.L', 'cr_f_pinky.01.R', 'cr_f_pinky.02.L', 'cr_f_pinky.02.R', 'EM', 'em_eye_close.L', 'em_eye_close.R', 'em_eye_half.L', 'em_eye_half.R', 'em_smile_open', 'em_smile_close', 'em_kiss', 'em_disg', 'em_blow', 'em_surprise', 'em_sad', 'em_frown', 'PH', 'ph_+', 'ph_bpm', 'ph_fv', 'ph_ou', 'ph_e', 'ph_r', 'ph_ch', 'ph_th', 'ph_a']
# print(obj.data.shape_keys.key_blocks.keys())
output_dir = root_dir + filename
blendshape_path = root_dir + filename + '.npy'
result = []
bs = np.load(blendshape_path)
for i in range(bs.shape[0]):
obj.data.shape_keys.key_blocks['cr_eye_open'].value = i / bs.shape[0]
bpy.context.scene.render.filepath = os.path.join(output_dir,
'{}.png'.format(i))
bpy.ops.render.render(write_still=True)

36
miapia_own/aihandler.py Normal file
View File

@@ -0,0 +1,36 @@
import requests
class AIHandler(object):
def __init__(self):
pass
def __call__(self, text):
resp = requests.post("https://fast-pia.avemio.technology/chat-completion",
json={
"session-id": "chatcmpl",
"user-location": "Zweibrücken",
"wheel-of-life": [
{
"personal_growth": 10,
"health_exercise": 5,
"familiy_friends": 5,
"romance_relationship": 5,
"career_work": 5,
"finances": 5,
"recreation_fun": 5,
"living_situation": 5}
],
"messages": [
{
"role": "user",
"content": text
}
]
})
resp = resp.json()
return {
"text": resp[0]['text'],
"emotion": resp[0]['emotion']
}

243
miapia_own/main.py Normal file
View File

@@ -0,0 +1,243 @@
import sys
import pandas as pd
import argparse
import base64
from flask import send_file, Response
from flask_socketio import emit
from piedemo.fields.ajax_group import AjaxChatField, AjaxGroup
from piedemo.fields.grid import VStack, HStack, SpaceField
from piedemo.fields.inputs.hidden import InputHiddenField
from piedemo.fields.outputs.colored_text import ptext, OutputColoredTextField
from piedemo.fields.outputs.json import OutputJSONField
from piedemo.fields.outputs.progress import ProgressField
from piedemo.fields.outputs.video import OutputVideoField
from piedemo.web import Web
import os
import io
from piedemo.page import Page
from piedemo.hub.svgpil import SVGImage
from piedemo.fields.outputs.table import OutputTableField
from piedemo.fields.inputs.int_list import InputIntListField
from piedemo.fields.navigation import Navigation
from piedemo.fields.inputs.chat import ChatField
import librosa
import uuid
import numpy as np
import redis
import argparse
from scipy.signal import savgol_filter
import torch
import random
import os, subprocess
import shlex
from tqdm import tqdm
from aihandler import AIHandler
from pieinfer import PieInfer, render_video, construct_video
import torch
from TTS.api import TTS
# Get device
device = "cuda" if torch.cuda.is_available() else "cpu"
def get_asset(fname):
return SVGImage.open(os.path.join(os.path.dirname(__file__),
"assets",
fname)).svg_content
class MainPage(Page):
def __init__(self, model_name: str):
super(MainPage, self).__init__()
self.infer = PieInfer()
self.tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
self.r = redis.Redis(host='localhost', port=6379, decode_responses=True)
self.aihandler = AIHandler()
self.fields = Navigation(AjaxGroup("ChatGroup", VStack([
HStack([
AjaxChatField("Chat",
self.register_ajax(f"/refresh_{model_name}",
self.message_sent),
deps_names=["sid",
"session_id",
"Chat",
"Chat__piedemo__file"],
use_socketio_support=True,
nopie=True,
style={
"height": "100%"
}),
OutputColoredTextField("video",
nopie=True,
use_socketio_support=True),
], xs=[8, 4]),
ProgressField("progress",
nopie=True,
use_socketio_support=True),
InputHiddenField("session_id", None),
]), no_return=True), no_submit=True, page_title="MIA PIA", page_style={
})
self.fields.add_link("SIMPLE",
"/simple",
active=model_name == "render")
self.fields.add_link("MIA PIA",
"/nice",
active=model_name != "render")
self.model_name = model_name
def get_content(self, **kwargs):
fields = self.fields.copy()
fields.child_loc["Chat"].set_default_options(["Hello! What is your name?", "Say one word and stop."])
"""
fields.child_loc["Chat"].set_avatars({
"self": get_asset("avatar.svg"),
"ChatGPT": get_asset("dog.svg"),
})
"""
session_id = str(uuid.uuid4())
return self.fill(fields, {
"video": f"""
""",
"session_id": session_id,
})
def message_sent(self, **data):
sid = data['sid']
self.emit(self.fields.child_loc["Chat"].clear_input(),
to=sid)
self.emit(self.fields.child_loc["video"].update(f"""
"""))
data = self.parse(self.fields, data)
session_id = data['session_id']
messages_map = self.r.hgetall(f'user-session:{session_id}')
messages = [self.fields.child_loc["Chat"].format_message("self" if i % 2 == 0 else "ChatGPT",
messages_map[f"message_{i}"])
for i in range(len(messages_map))]
print("history: ", messages)
text = data['Chat']['text']
self.emit(self.fields.child_loc["Chat"].update(messages + [
self.fields.child_loc["Chat"].format_message("self", text),
self.fields.child_loc["Chat"].format_message("ChatGPT", "Generating text..."),
]), to=sid)
output = self.aihandler(text)
output_text = output['text']
output_emotion = output['emotion']
messages_map[f"message_{len(messages)}"] = text
messages_map[f"message_{len(messages) + 1}"] = output_text
self.r.hset(f'user-session:{session_id}', mapping=messages_map)
self.emit(self.fields.child_loc["Chat"].update(messages + [
self.fields.child_loc["Chat"].format_message("self", text),
self.fields.child_loc["Chat"].format_message("ChatGPT", "Generating audio..."),
]), to=sid)
self.tts.tts_to_file(text=output_text,
speaker_wav="/home/ubuntu/repo/of_couse_here.wav",
language="en",
emotion=output_emotion,
file_path=f"./audio/{session_id}.wav")
speech_array, sampling_rate = librosa.load(f"./audio/{session_id}.wav",
sr=16000)
output = self.infer(speech_array, sampling_rate)
np.save(os.path.join("./audio", "{}.npy".format(session_id)),
output)
self.emit(self.fields.child_loc["Chat"].update(messages + [
self.fields.child_loc["Chat"].format_message("self", text),
self.fields.child_loc["Chat"].format_message("ChatGPT", "Rendering..."),
]), to=sid)
n = output.shape[0]
for i, fname in enumerate(tqdm(render_video(f"{session_id}",
model_name=self.model_name),
total=n)):
print("Got frame: ", fname, file=sys.stderr)
self.emit(self.fields.child_loc["progress"].update(100 * i // n),
to=sid)
construct_video(session_id)
self.emit(self.fields.child_loc["video"].update(f"""
<video controls="1" autoplay="1" name="media" style="border-radius: 12px; height: 80%">
<source src="/api/video/{session_id}" type="video/mp4">
</video>
"""), to=sid)
'''self.emit(self.fields.child_loc["video"].update(f"""
<img name="media" style="border-radius: 12px; height: 80%" src="/api/video/stream/{session_id}"></img>
"""))'''
self.emit(self.fields.child_loc["Chat"].update(messages + [
self.fields.child_loc["Chat"].format_message("self", text),
self.fields.child_loc["Chat"].format_message("ChatGPT", output_text),
]), to=sid)
web = Web({
"": "simple",
"simple": MainPage("render"),
"nice": MainPage("FemAdv_b350_V2_050523"),
}, use_socketio_support=True)
host = '0.0.0.0'
port = 8011
debug = False
app = web.get_app()
@app.route("/api/video/<session_id>", methods=["GET"])
def get_video(session_id):
return send_file("./audio/{}.mp4".format(session_id))
def gen(session_id):
for image_path in render_video(f"{session_id}"):
with open(image_path, 'rb') as f:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + f.read() + b'\r\n')
construct_video(session_id)
@app.route("/api/video/stream/<session_id>", methods=["GET"])
def get_video_async(session_id):
return Response(gen(session_id),
mimetype='multipart/x-mixed-replace; boundary=frame')
io = web.get_socketio(app)
@io.on("io_set_text")
def io_set_text(data):
sid = None
if "text" not in data:
emit("io_error", {"message": "Text not found"},
to=sid)
encode_string = base64.b64encode(open("../feeling_good.wav", "rb").read())
for i in range(10):
j = random.randint(0, 2)
emit("io_set_coef", [{
"index": j,
"value": i / 10,
}], to=sid)
emit("io_push_audio_blob", {
"dataURL": f"base64,{encode_string}"
}, to=sid)
emit("io_finish", {}, to=sid)
io.run(app,
host=host, port=port, debug=debug,
allow_unsafe_werkzeug=True)

153
miapia_own/pieinfer.py Normal file
View File

@@ -0,0 +1,153 @@
import librosa
import numpy as np
import argparse
from parse import parse
from scipy.signal import savgol_filter
import torch
from model import EmoTalk
import random
import os, subprocess
import shlex
from munch import Munch
@torch.no_grad()
def test(model, speech_array, sampling_rate):
args = Munch(
bs_dim=52,
feature_dim=832,
period=30,
device="cuda",
model_path="./pretrain_model/EmoTalk.pth",
max_seq_len=5000,
num_workers=0,
batch_size=1,
post_processing=True,
blender_path="./blender/blender")
eye1 = np.array([0.36537236, 0.950235724, 0.95593375, 0.916715622, 0.367256105, 0.119113259, 0.025357503])
eye2 = np.array([0.234776169, 0.909951985, 0.944758058, 0.777862132, 0.191071674, 0.235437036, 0.089163929])
eye3 = np.array([0.870040774, 0.949833691, 0.949418545, 0.695911646, 0.191071674, 0.072576277, 0.007108896])
eye4 = np.array([0.000307991, 0.556701422, 0.952656746, 0.942345619, 0.425857186, 0.148335218, 0.017659493])
# speech_array, sampling_rate = librosa.load(os.path.join(wav_path), sr=16000)
audio = torch.FloatTensor(speech_array).unsqueeze(0).to(args.device)
level = torch.tensor([1]).to(args.device)
person = torch.tensor([0]).to(args.device)
prediction = model.predict(audio, level, person)
prediction = prediction.squeeze().detach().cpu().numpy()
if args.post_processing:
output = np.zeros((prediction.shape[0], prediction.shape[1]))
for i in range(prediction.shape[1]):
output[:, i] = savgol_filter(prediction[:, i], 5, 2)
output[:, 8] = 0
output[:, 9] = 0
i = random.randint(0, 60)
while i < output.shape[0] - 7:
eye_num = random.randint(1, 4)
if eye_num == 1:
output[i:i + 7, 8] = eye1
output[i:i + 7, 9] = eye1
elif eye_num == 2:
output[i:i + 7, 8] = eye2
output[i:i + 7, 9] = eye2
elif eye_num == 3:
output[i:i + 7, 8] = eye3
output[i:i + 7, 9] = eye3
else:
output[i:i + 7, 8] = eye4
output[i:i + 7, 9] = eye4
time1 = random.randint(60, 180)
i = i + time1
return output
else:
return prediction
def render_video(wav_name, model_name):
args = Munch(
bs_dim=52,
feature_dim=832,
period=30,
device="cuda",
model_path="./pretrain_model/EmoTalk.pth",
max_seq_len=5000,
num_workers=0,
batch_size=1,
post_processing=True,
blender_path="./blender/blender")
# wav_name = args.wav_path.split('/')[-1].split('.')[0]
image_path = os.path.join("./audio", wav_name)
os.makedirs(image_path, exist_ok=True)
blender_path = args.blender_path
python_path = f"./{model_name}.py"
blend_path = f"./{model_name}.blend"
print(python_path, blend_path)
# python_path = "./render.py"
# blend_path = "./render.blend"
cmd = '{} -t 64 -b {} -P {} -- "{}" "{}" '.format(blender_path,
blend_path,
python_path,
"./audio/",
wav_name)
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while p.poll() is None:
line = p.stdout.readline().decode('utf-8')
line = line.strip()
if line and line.startswith('Saved: '):
fname = parse("Saved: '{}'", line).fixed[0]
yield fname
else:
print(line)
if p.returncode == 0:
print('Subprogram success')
else:
print('Subprogram failed')
def construct_video(wav_name):
image_path = os.path.join("./audio", wav_name)
os.makedirs(image_path, exist_ok=True)
image_temp = image_path + "/%d.png"
output_path = os.path.join("./audio", wav_name + ".mp4")
cmd = 'ffmpeg -r 30 -i "{}" -i "{}" -pix_fmt yuv420p -s 512x768 "{}" -y'.format(image_temp,
f"./audio/{wav_name}.wav",
output_path)
subprocess.call(cmd, shell=True)
cmd = 'rm -rf "{}"'.format(image_path)
subprocess.call(cmd, shell=True)
class PieInfer(object):
def __init__(self):
args = Munch(
bs_dim=52,
feature_dim=832,
period=30,
device="cuda",
model_path="./pretrain_model/EmoTalk.pth",
max_seq_len=5000,
num_workers=0,
batch_size=1,
post_processing=True,
blender_path="./blender/blender")
#"""
model = EmoTalk(args)
model.load_state_dict(torch.load(args.model_path, map_location=torch.device(args.device)), strict=False)
model = model.to(args.device)
model.eval()
#"""
# model = None
self.model = model
def __call__(self,
speech_array,
sampling_rate):
return test(self.model, speech_array, sampling_rate)