Разработка систем AI-анимации фотографий
AI-анимация фото добавляет движение в статичные изображения: «оживление» портретов, анимация пейзажей (вода, листья), создание cinemagraph-эффекта. Применяется в маркетинге, memorial apps, соцсетях.
AnimateDiff — анимация из одного изображения
from diffusers import AnimateDiffImg2VideoPipeline, DDIMScheduler, MotionAdapter
from diffusers.utils import export_to_gif, export_to_video
import torch
from PIL import Image
import io
class PhotoAnimator:
def __init__(self):
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
self.pipe = AnimateDiffImg2VideoPipeline.from_pretrained(
"SG161222/Realistic_Vision_V5.1_noVAE",
motion_adapter=adapter
)
self.pipe.scheduler = DDIMScheduler.from_config(
self.pipe.scheduler.config,
beta_schedule="linear",
clip_sample=False,
timestep_spacing="linspace",
steps_offset=1
)
self.pipe.to("cuda")
def animate_photo(
self,
photo_bytes: bytes,
motion_prompt: str,
negative_prompt: str = "deformed, ugly, static, no motion",
num_frames: int = 16,
guidance_scale: float = 7.5,
output_format: str = "mp4" # mp4 или gif
) -> bytes:
image = Image.open(io.BytesIO(photo_bytes)).convert("RGB")
image = image.resize((512, 512)) # AnimateDiff базовый размер
frames = self.pipe(
image=image,
prompt=motion_prompt,
negative_prompt=negative_prompt,
num_frames=num_frames,
guidance_scale=guidance_scale,
num_inference_steps=25,
generator=torch.Generator("cpu").manual_seed(42)
).frames[0]
import tempfile
with tempfile.NamedTemporaryFile(suffix=f".{output_format}", delete=False) as f:
if output_format == "gif":
export_to_gif(frames, f.name)
else:
export_to_video(frames, f.name, fps=8)
return open(f.name, "rb").read()
Промпты для типичных анимаций
ANIMATION_PRESETS = {
"portrait_breathe": "person breathing naturally, slight head movement, eye blinking, realistic",
"landscape_wind": "gentle wind blowing through trees, grass swaying, clouds moving slowly",
"water_ripple": "water surface rippling, reflections moving, gentle waves",
"fire_flicker": "fire flickering, flames dancing, warm light pulsing",
"falling_leaves": "autumn leaves falling gently, light breeze, natural motion",
"rain_window": "rain drops on glass, water trails, reflective surface",
"candle_flame": "candle flame flickering, wax melting slightly, warm ambiance",
"portrait_smile": "person smiling gently, natural expression, subtle movement",
}
Live Portrait — анимация лица по управляющему видео
# LivePortrait — SOTA для face reenactment
# github.com/KwaiVGI/LivePortrait
class LivePortraitAnimator:
def animate_face_from_driving_video(
self,
source_portrait: bytes,
driving_video: bytes,
relative_motion: bool = True # True = относительное движение
) -> bytes:
import subprocess
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
src_path = f"{tmpdir}/source.jpg"
drv_path = f"{tmpdir}/driving.mp4"
out_path = f"{tmpdir}/output.mp4"
with open(src_path, "wb") as f: f.write(source_portrait)
with open(drv_path, "wb") as f: f.write(driving_video)
subprocess.run([
"python", "LivePortrait/inference.py",
"--source_image", src_path,
"--driving_video", drv_path,
"--output", out_path,
"--relative" if relative_motion else "--absolute"
], check=True)
return open(out_path, "rb").read()
Cinemagraph (частичная анимация)
def create_cinemagraph(
image: bytes,
motion_mask: bytes, # белый = анимировать, чёрный = статика
animation_prompt: str
) -> bytes:
"""Анимируем только часть изображения (вода, огонь, волосы)"""
# Используем AnimateDiff с маскированием:
# - Генерируем анимированную версию полного кадра
# - Composite с оригиналом по маске
animated = animate_photo(image, animation_prompt)
return composite_with_mask(animated, image, motion_mask)
Сроки: базовый сервис AnimateDiff анимации фото — 1–2 недели. LivePortrait face reenactment + веб-интерфейс — 2–3 недели.







