Blur Bake

This commit is contained in:
Keisuke Hirata 2026-02-16 13:51:25 +09:00
parent fc2dc0a478
commit 67178e0f52
10 changed files with 826 additions and 285 deletions

View File

@ -16,7 +16,7 @@ bl_info = {
def register():
"""Register all extension components."""
import bpy
from bpy.props import FloatProperty
from bpy.props import FloatProperty, IntProperty, EnumProperty, StringProperty
from . import operators
from . import panels
@ -49,6 +49,32 @@ def register():
step=0.1,
)
bpy.types.Scene.facemask_cache_dir = StringProperty(
name="Cache Directory",
description="Optional cache root directory (empty = default .mask_cache)",
default="",
subtype='DIR_PATH',
)
bpy.types.Scene.facemask_bake_blur_size = IntProperty(
name="Bake Blur Size",
description="Gaussian blur size (pixels) used for bake",
default=50,
min=1,
max=501,
)
bpy.types.Scene.facemask_bake_format = EnumProperty(
name="Bake Format",
description="Output format for baked blur video",
items=[
("MP4", "MP4", "Export as .mp4"),
("AVI", "AVI", "Export as .avi"),
("MOV", "MOV", "Export as .mov"),
],
default="MP4",
)
operators.register()
panels.register()
@ -66,6 +92,9 @@ def unregister():
del bpy.types.Scene.facemask_conf_threshold
del bpy.types.Scene.facemask_iou_threshold
del bpy.types.Scene.facemask_mask_scale
del bpy.types.Scene.facemask_cache_dir
del bpy.types.Scene.facemask_bake_blur_size
del bpy.types.Scene.facemask_bake_format
if __name__ == "__main__":

View File

@ -1,4 +1,5 @@
"""Core module exports."""
from .async_bake_generator import AsyncBakeGenerator, get_bake_generator
from .async_generator import AsyncMaskGenerator, get_generator
from .compositor_setup import create_mask_blur_node_tree, get_or_create_blur_node_tree

View File

@ -0,0 +1,161 @@
"""
Async blur bake generator using Thread + Queue + Timer pattern.
This module mirrors AsyncMaskGenerator behavior for bake-and-swap workflow,
so Blender UI remains responsive during server-side bake processing.
"""
import threading
import queue
from typing import Optional, Callable
# Will be imported when running inside Blender
bpy = None
class AsyncBakeGenerator:
"""Asynchronous bake generator for non-blocking blur bake tasks."""
def __init__(self):
self.result_queue: queue.Queue = queue.Queue()
self.progress_queue: queue.Queue = queue.Queue()
self.worker_thread: Optional[threading.Thread] = None
self.is_running: bool = False
self.total_frames: int = 0
self.current_frame: int = 0
self._on_complete: Optional[Callable] = None
self._on_progress: Optional[Callable] = None
def start(
self,
video_path: str,
mask_path: str,
output_path: str,
blur_size: int,
fmt: str,
on_complete: Optional[Callable] = None,
on_progress: Optional[Callable] = None,
):
"""Start asynchronous bake request and progress polling."""
global bpy
import bpy as _bpy
bpy = _bpy
if self.is_running:
raise RuntimeError("Blur bake already in progress")
self.is_running = True
self.total_frames = 0
self.current_frame = 0
self._on_complete = on_complete
self._on_progress = on_progress
self.worker_thread = threading.Thread(
target=self._worker,
args=(video_path, mask_path, output_path, blur_size, fmt),
daemon=True,
)
self.worker_thread.start()
bpy.app.timers.register(
self._check_progress,
first_interval=0.1,
)
def cancel(self):
"""Cancel the current bake processing."""
self.is_running = False
if self.worker_thread and self.worker_thread.is_alive():
self.worker_thread.join(timeout=2.0)
def _worker(
self,
video_path: str,
mask_path: str,
output_path: str,
blur_size: int,
fmt: str,
):
import time
from .inference_client import get_client
task_id = None
try:
client = get_client()
task_id = client.bake_blur(
video_path=video_path,
mask_path=mask_path,
output_path=output_path,
blur_size=blur_size,
fmt=fmt,
)
while self.is_running:
status = client.get_task_status(task_id)
state = status.get("status")
total = status.get("total", 0)
if total > 0:
self.total_frames = total
progress = status.get("progress", 0)
if progress >= 0:
self.progress_queue.put(("progress", progress))
if state == "completed":
result_path = status.get("result_path", output_path)
self.result_queue.put(("done", result_path))
return
if state == "failed":
error_msg = status.get("message", "Unknown server error")
self.result_queue.put(("error", error_msg))
return
if state == "cancelled":
self.result_queue.put(("cancelled", None))
return
time.sleep(0.5)
# Local cancel path
if task_id:
client.cancel_task(task_id)
self.result_queue.put(("cancelled", None))
except Exception as e:
self.result_queue.put(("error", str(e)))
def _check_progress(self) -> Optional[float]:
while not self.progress_queue.empty():
try:
msg_type, data = self.progress_queue.get_nowait()
if msg_type == "progress":
self.current_frame = data
if self._on_progress:
self._on_progress(self.current_frame, self.total_frames)
except queue.Empty:
break
if not self.result_queue.empty():
try:
msg_type, data = self.result_queue.get_nowait()
self.is_running = False
if self._on_complete:
self._on_complete(msg_type, data)
return None
except queue.Empty:
pass
if self.is_running:
return 0.1
return None
_bake_generator: Optional[AsyncBakeGenerator] = None
def get_bake_generator() -> AsyncBakeGenerator:
global _bake_generator
if _bake_generator is None:
_bake_generator = AsyncBakeGenerator()
return _bake_generator

View File

@ -9,6 +9,7 @@ import json
import os
import signal
import subprocess
import sys
import threading
import time
import urllib.error
@ -96,9 +97,16 @@ class InferenceClient:
self.log_file = open(self.log_file_path, "w", buffering=1) # Line buffered
print(f"[FaceMask] Server log: {self.log_file_path}")
# Start process with 'python' command (will use venv if PATH is set correctly)
# Start server with explicit Python executable when available.
python_executable = "python"
venv_python = os.path.join(venv_bin, "python")
if os.path.isfile(venv_python):
python_executable = venv_python
else:
python_executable = sys.executable
self.server_process = subprocess.Popen(
["python", "-u", server_script], # -u for unbuffered output
[python_executable, "-u", server_script], # -u for unbuffered output
cwd=root_dir,
text=True,
env=server_env,
@ -241,6 +249,45 @@ class InferenceClient:
except urllib.error.HTTPError:
return {"status": "unknown"}
def bake_blur(
self,
video_path: str,
mask_path: str,
output_path: str,
blur_size: int,
fmt: str,
) -> str:
"""
Request blur bake for a source video + mask video.
Returns:
task_id (str)
"""
if not self.is_server_running():
self.start_server()
data = {
"video_path": video_path,
"mask_path": mask_path,
"output_path": output_path,
"blur_size": blur_size,
"format": fmt,
}
req = urllib.request.Request(
f"{self.SERVER_URL}/bake_blur",
data=json.dumps(data).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="POST",
)
try:
with urllib.request.urlopen(req) as response:
result = json.loads(response.read().decode("utf-8"))
return result["id"]
except urllib.error.HTTPError as e:
raise RuntimeError(f"Server error: {e.read().decode('utf-8')}")
def cancel_task(self, task_id: str):
"""Cancel a task."""
try:

View File

@ -47,6 +47,37 @@ def get_server_status() -> Dict:
return result
def get_cache_root() -> str:
"""
Resolve cache root directory from scene setting or defaults.
Priority:
1) Scene setting: facemask_cache_dir (if non-empty)
2) Saved blend file directory + .mask_cache
3) Temp directory + blender_mask_cache
"""
import bpy
scene = getattr(bpy.context, "scene", None)
cache_setting = ""
if scene is not None:
cache_setting = (getattr(scene, "facemask_cache_dir", "") or "").strip()
if cache_setting:
return bpy.path.abspath(cache_setting)
blend_file = bpy.data.filepath
if blend_file:
project_dir = os.path.dirname(blend_file)
return os.path.join(project_dir, ".mask_cache")
return os.path.join(tempfile.gettempdir(), "blender_mask_cache")
def get_cache_dir_for_strip(strip_name: str) -> str:
"""Get cache directory path for a specific strip."""
return os.path.join(get_cache_root(), strip_name)
def get_cache_info(strip_name: Optional[str] = None) -> Tuple[str, int, int]:
"""
Get cache directory information.
@ -59,22 +90,10 @@ def get_cache_info(strip_name: Optional[str] = None) -> Tuple[str, int, int]:
"""
import bpy
blend_file = bpy.data.filepath
if strip_name:
# Get cache for specific strip
if blend_file:
project_dir = os.path.dirname(blend_file)
cache_path = os.path.join(project_dir, ".mask_cache", strip_name)
else:
cache_path = os.path.join(tempfile.gettempdir(), "blender_mask_cache", strip_name)
cache_path = get_cache_dir_for_strip(strip_name)
else:
# Get cache root
if blend_file:
project_dir = os.path.dirname(blend_file)
cache_path = os.path.join(project_dir, ".mask_cache")
else:
cache_path = os.path.join(tempfile.gettempdir(), "blender_mask_cache")
cache_path = get_cache_root()
# Calculate size and count
total_size = 0
@ -83,13 +102,12 @@ def get_cache_info(strip_name: Optional[str] = None) -> Tuple[str, int, int]:
if os.path.exists(cache_path):
for root, dirs, files in os.walk(cache_path):
for file in files:
if file.endswith('.png'): # Only count mask images
file_path = os.path.join(root, file)
try:
total_size += os.path.getsize(file_path)
file_count += 1
except OSError:
pass
file_path = os.path.join(root, file)
try:
total_size += os.path.getsize(file_path)
file_count += 1
except OSError:
pass
return cache_path, total_size, file_count

View File

@ -1,242 +1,257 @@
"""
Apply Blur Operator for masked face blur in VSE.
Bake-and-swap blur operators for VSE.
Provides operators to apply blur effects using mask strips
generated by the face detection operators.
This module bakes masked blur into a regular video file using the inference
server, then swaps the active strip's source filepath to the baked result.
"""
import os
import bpy
from bpy.props import FloatProperty, IntProperty, StringProperty
from bpy.props import IntProperty
from bpy.types import Operator
from ..core.async_bake_generator import get_bake_generator
from ..core.async_generator import get_generator as get_mask_generator
class SEQUENCER_OT_apply_mask_blur(Operator):
"""Apply blur effect using mask strip."""
bl_idname = "sequencer.apply_mask_blur"
bl_label = "Apply Mask Blur"
bl_description = "Apply blur effect to video using mask strip"
bl_options = {'REGISTER', 'UNDO'}
KEY_ORIGINAL = "facemask_original_filepath"
KEY_BAKED = "facemask_baked_filepath"
KEY_MODE = "facemask_source_mode"
KEY_FORMAT = "facemask_bake_format"
KEY_BLUR_SIZE = "facemask_bake_blur_size"
blur_size: IntProperty(
name="Blur Size",
description="Size of the blur effect in pixels",
default=50,
min=1,
max=500,
)
FORMAT_EXT = {
"MP4": "mp4",
"AVI": "avi",
"MOV": "mov",
}
def _find_mask_strip(seq_editor, strip_name: str):
return seq_editor.strips.get(f"{strip_name}_mask")
def _resolve_mask_path(mask_strip) -> str:
if mask_strip.type == "MOVIE":
return bpy.path.abspath(mask_strip.filepath)
return ""
def _output_path(video_strip, mask_path: str, fmt: str) -> str:
ext = FORMAT_EXT.get(fmt, "mp4")
out_dir = os.path.dirname(mask_path)
safe_name = video_strip.name.replace("/", "_").replace("\\", "_")
return os.path.join(out_dir, f"{safe_name}_blurred.{ext}")
def _reload_movie_strip(strip):
if hasattr(strip, "reload"):
try:
strip.reload()
except Exception:
pass
def _set_strip_source(strip, filepath: str):
strip.filepath = filepath
_reload_movie_strip(strip)
class SEQUENCER_OT_bake_and_swap_blur_source(Operator):
"""Bake masked blur and replace active strip source with baked video."""
bl_idname = "sequencer.bake_and_swap_blur_source"
bl_label = "Bake & Swap Source"
bl_description = "Bake masked blur to video and swap active strip source"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
"""Check if operator can run."""
if not context.scene.sequence_editor:
return False
seq_editor = context.scene.sequence_editor
strip = seq_editor.active_strip
if not strip:
# Prevent overlapping heavy tasks
if get_mask_generator().is_running:
return False
if strip.type not in {'MOVIE', 'IMAGE'}:
if get_bake_generator().is_running:
return False
# Check if corresponding mask strip exists
mask_name = f"{strip.name}_mask"
return mask_name in seq_editor.strips
strip = context.scene.sequence_editor.active_strip
return bool(strip and strip.type == "MOVIE")
def execute(self, context):
seq_editor = context.scene.sequence_editor
scene = context.scene
video_strip = seq_editor.active_strip
# Auto-detect mask strip
mask_name = f"{video_strip.name}_mask"
mask_strip = seq_editor.strips.get(mask_name)
mask_strip = _find_mask_strip(seq_editor, video_strip.name)
if not mask_strip:
self.report({'ERROR'}, f"Mask strip not found: {mask_name}")
return {'CANCELLED'}
self.report({"ERROR"}, f"Mask strip not found: {video_strip.name}_mask")
return {"CANCELLED"}
video_path = bpy.path.abspath(video_strip.filepath)
mask_path = _resolve_mask_path(mask_strip)
if not os.path.exists(video_path):
self.report({"ERROR"}, f"Source video not found: {video_path}")
return {"CANCELLED"}
if not mask_path or not os.path.exists(mask_path):
self.report({"ERROR"}, f"Mask video not found: {mask_path}")
return {"CANCELLED"}
bake_format = scene.facemask_bake_format
output_path = _output_path(video_strip, mask_path, bake_format)
blur_size = int(scene.facemask_bake_blur_size)
# Reuse baked cache when parameters match and file still exists.
cached_baked_path = video_strip.get(KEY_BAKED)
cached_format = video_strip.get(KEY_FORMAT)
cached_blur_size = video_strip.get(KEY_BLUR_SIZE)
try:
cached_blur_size_int = int(cached_blur_size)
except (TypeError, ValueError):
cached_blur_size_int = None
if (
cached_baked_path
and os.path.exists(cached_baked_path)
and cached_format == bake_format
and cached_blur_size_int == blur_size
):
if video_strip.get(KEY_MODE) != "baked":
video_strip[KEY_MODE] = "baked"
_set_strip_source(video_strip, cached_baked_path)
self.report({"INFO"}, "Using cached baked blur")
return {"FINISHED"}
bake_generator = get_bake_generator()
wm = context.window_manager
def on_complete(status, data):
strip = context.scene.sequence_editor.strips.get(video_strip.name)
if not strip:
print(f"[FaceMask] Bake complete but strip no longer exists: {video_strip.name}")
return
if status == "done":
result_path = data or output_path
original_path = strip.get(KEY_ORIGINAL)
current_mode = strip.get(KEY_MODE, "original")
if not original_path or current_mode != "baked":
strip[KEY_ORIGINAL] = video_path
strip[KEY_BAKED] = result_path
strip[KEY_MODE] = "baked"
strip[KEY_FORMAT] = bake_format
strip[KEY_BLUR_SIZE] = blur_size
_set_strip_source(strip, result_path)
print(f"[FaceMask] Bake completed and source swapped: {result_path}")
elif status == "error":
print(f"[FaceMask] Bake failed: {data}")
elif status == "cancelled":
print("[FaceMask] Bake cancelled")
for area in context.screen.areas:
if area.type == "SEQUENCE_EDITOR":
area.tag_redraw()
def on_progress(current, total):
wm.bake_progress = current
wm.bake_total = max(total, 1)
for area in context.screen.areas:
if area.type == "SEQUENCE_EDITOR":
area.tag_redraw()
wm.bake_progress = 0
wm.bake_total = 1
try:
# Use Mask Modifier approach (Blender 5.0 compatible)
self._apply_with_mask_modifier(context, video_strip, mask_strip)
bake_generator.start(
video_path=video_path,
mask_path=mask_path,
output_path=output_path,
blur_size=blur_size,
fmt=bake_format.lower(),
on_complete=on_complete,
on_progress=on_progress,
)
except Exception as e:
self.report({'ERROR'}, f"Failed to apply blur: {e}")
return {'CANCELLED'}
self.report({"ERROR"}, f"Failed to start bake: {e}")
return {"CANCELLED"}
return {'FINISHED'}
self.report({"INFO"}, "Started blur bake in background")
return {"FINISHED"}
def _apply_with_mask_modifier(self, context, video_strip: "bpy.types.Strip", mask_strip: "bpy.types.Strip"):
"""
Apply blur using Mask Modifier, grouped in a Meta Strip.
Workflow:
1. Duplicate the video strip
2. Create Gaussian Blur effect on the duplicate
3. Add Mask modifier to the blur effect (references mask strip)
4. Group all into a Meta Strip
class SEQUENCER_OT_restore_original_source(Operator):
"""Restore active strip source filepath to original video."""
The blur effect with mask will automatically composite over the original
video due to VSE's channel layering system.
"""
seq_editor = context.scene.sequence_editor
bl_idname = "sequencer.restore_original_source"
bl_label = "Restore Original Source"
bl_description = "Restore active strip to original source filepath"
bl_options = {"REGISTER", "UNDO"}
# Find available channels
used_channels = {s.channel for s in seq_editor.strips}
duplicate_channel = video_strip.channel + 1
while duplicate_channel in used_channels:
duplicate_channel += 1
@classmethod
def poll(cls, context):
if not context.scene.sequence_editor:
return False
if get_bake_generator().is_running:
return False
strip = context.scene.sequence_editor.active_strip
if not strip or strip.type != "MOVIE":
return False
return bool(strip.get(KEY_ORIGINAL))
blur_channel = duplicate_channel + 1
while blur_channel in used_channels:
blur_channel += 1
def execute(self, context):
strip = context.scene.sequence_editor.active_strip
original_path = strip.get(KEY_ORIGINAL)
if not original_path:
self.report({"ERROR"}, "Original source path is not stored")
return {"CANCELLED"}
if not os.path.exists(original_path):
self.report({"ERROR"}, f"Original source not found: {original_path}")
return {"CANCELLED"}
# Step 1: Duplicate the video strip
if video_strip.type == 'MOVIE':
video_copy = seq_editor.strips.new_movie(
name=f"{video_strip.name}_copy",
filepath=bpy.path.abspath(video_strip.filepath),
channel=duplicate_channel,
frame_start=video_strip.frame_final_start,
)
elif video_strip.type == 'IMAGE':
# For image sequences, duplicate differently
video_copy = seq_editor.strips.new_image(
name=f"{video_strip.name}_copy",
filepath=bpy.path.abspath(video_strip.elements[0].filename) if video_strip.elements else "",
channel=duplicate_channel,
frame_start=video_strip.frame_final_start,
)
# Copy all elements
for elem in video_strip.elements[1:]:
video_copy.elements.append(elem.filename)
_set_strip_source(strip, original_path)
strip[KEY_MODE] = "original"
self.report({"INFO"}, "Restored original source")
return {"FINISHED"}
class SEQUENCER_OT_apply_mask_blur(Operator):
"""Compatibility alias: run bake-and-swap blur workflow."""
bl_idname = "sequencer.apply_mask_blur"
bl_label = "Apply Mask Blur"
bl_description = "Compatibility alias for Bake & Swap Source"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
return SEQUENCER_OT_bake_and_swap_blur_source.poll(context)
def execute(self, context):
return bpy.ops.sequencer.bake_and_swap_blur_source("EXEC_DEFAULT")
class SEQUENCER_OT_cancel_bake_blur(Operator):
"""Cancel ongoing blur bake."""
bl_idname = "sequencer.cancel_bake_blur"
bl_label = "Cancel Blur Bake"
bl_description = "Cancel current blur bake process"
bl_options = {"REGISTER"}
def execute(self, context):
bake_generator = get_bake_generator()
if bake_generator.is_running:
bake_generator.cancel()
self.report({"INFO"}, "Blur bake cancelled")
else:
raise ValueError(f"Unsupported strip type: {video_strip.type}")
# Match strip length
strip_length = video_strip.frame_final_end - video_strip.frame_final_start
video_copy.frame_final_end = video_copy.frame_final_start + strip_length
# Step 2: Create Gaussian Blur effect on the duplicate
blur_effect = seq_editor.strips.new_effect(
name=f"{video_strip.name}_blur",
type='GAUSSIAN_BLUR',
channel=blur_channel,
frame_start=video_strip.frame_final_start,
length=strip_length,
input1=video_copy,
)
# Set blur size (Blender 5.0 API)
if hasattr(blur_effect, 'size_x'):
blur_effect.size_x = self.blur_size
blur_effect.size_y = self.blur_size
elif hasattr(blur_effect, 'size'):
blur_effect.size = self.blur_size
# Step 3: Add Mask modifier to the blur effect
mask_mod = blur_effect.modifiers.new(
name="FaceMask",
type='MASK'
)
# Set mask input (Blender 5.0 API)
if hasattr(mask_mod, 'input_mask_strip'):
mask_mod.input_mask_strip = mask_strip
elif hasattr(mask_mod, 'input_mask_id'):
mask_mod.input_mask_type = 'STRIP'
mask_mod.input_mask_id = mask_strip
# Hide the mask strip (but keep it active for the modifier)
mask_strip.mute = True
# Step 4: Create Meta Strip to group everything
# Deselect all first
for strip in seq_editor.strips:
strip.select = False
# Select the strips to group
video_copy.select = True
blur_effect.select = True
mask_strip.select = True
# Set active strip for context
seq_editor.active_strip = blur_effect
# Create meta strip using operator
bpy.ops.sequencer.meta_make()
# Find the newly created meta strip (it will be selected)
meta_strip = None
for strip in seq_editor.strips:
if strip.select and strip.type == 'META':
meta_strip = strip
break
if meta_strip:
meta_strip.name = f"{video_strip.name}_blurred_meta"
self.report({'INFO'}, f"Applied blur with Mask Modifier (grouped in Meta Strip)")
else:
self.report({'INFO'}, f"Applied blur with Mask Modifier (blur on channel {blur_channel})")
def _apply_with_meta_strip(self, context, video_strip: "bpy.types.Strip", mask_strip: "bpy.types.Strip"):
"""
Fallback method using Meta Strip and effects.
This is less elegant but works on all Blender versions.
"""
seq_editor = context.scene.sequence_editor
# Find available channels
base_channel = video_strip.channel
blur_channel = base_channel + 1
effect_channel = blur_channel + 1
# Ensure mask is in correct position
mask_strip.channel = blur_channel
mask_strip.frame_start = video_strip.frame_final_start
# Create Gaussian Blur effect on the video strip
# First, we need to duplicate the video for the blurred version
video_copy = seq_editor.strips.new_movie(
name=f"{video_strip.name}_blur",
filepath=bpy.path.abspath(video_strip.filepath) if hasattr(video_strip, 'filepath') else "",
channel=blur_channel,
frame_start=video_strip.frame_final_start,
) if video_strip.type == 'MOVIE' else None
if video_copy:
# Calculate length (Blender 5.0 uses length instead of frame_end)
strip_length = video_strip.frame_final_end - video_strip.frame_final_start
# Apply Gaussian blur effect (Blender 5.0 API)
blur_effect = seq_editor.strips.new_effect(
name=f"{video_strip.name}_gaussian",
type='GAUSSIAN_BLUR',
channel=effect_channel,
frame_start=video_strip.frame_final_start,
length=strip_length,
input1=video_copy,
)
# Set blur size (Blender 5.0 uses size property, not size_x/size_y)
if hasattr(blur_effect, 'size_x'):
blur_effect.size_x = self.blur_size
blur_effect.size_y = self.blur_size
elif hasattr(blur_effect, 'size'):
blur_effect.size = self.blur_size
# Create Alpha Over to combine original with blurred (using mask)
# Note: Full implementation would require compositing
# This is a simplified version
self.report({'INFO'}, "Created blur effect (full compositing in development)")
else:
# For image sequences, different approach needed
self.report({'WARNING'}, "Image sequence blur not yet fully implemented")
self.report({"WARNING"}, "No blur bake in progress")
return {"FINISHED"}
# Registration
classes = [
SEQUENCER_OT_bake_and_swap_blur_source,
SEQUENCER_OT_restore_original_source,
SEQUENCER_OT_cancel_bake_blur,
SEQUENCER_OT_apply_mask_blur,
]
@ -244,8 +259,12 @@ classes = [
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.WindowManager.bake_progress = IntProperty(default=0)
bpy.types.WindowManager.bake_total = IntProperty(default=0)
def unregister():
del bpy.types.WindowManager.bake_progress
del bpy.types.WindowManager.bake_total
for cls in reversed(classes):
bpy.utils.unregister_class(cls)

View File

@ -10,6 +10,8 @@ import bpy
from bpy.types import Operator
from bpy.props import BoolProperty
from ..core.utils import get_cache_root, get_cache_dir_for_strip
class SEQUENCER_OT_clear_mask_cache(Operator):
"""Clear mask cache directories."""
@ -26,21 +28,12 @@ class SEQUENCER_OT_clear_mask_cache(Operator):
)
def execute(self, context):
import tempfile
blend_file = bpy.data.filepath
total_size = 0
cleared_count = 0
if self.all_strips:
# Clear all cache directories
if blend_file:
# Project cache
project_dir = os.path.dirname(blend_file)
cache_root = os.path.join(project_dir, ".mask_cache")
else:
# Temp cache
cache_root = os.path.join(tempfile.gettempdir(), "blender_mask_cache")
cache_root = get_cache_root()
if os.path.exists(cache_root):
# Calculate size before deletion
@ -72,11 +65,7 @@ class SEQUENCER_OT_clear_mask_cache(Operator):
return {'CANCELLED'}
strip = seq_editor.active_strip
if blend_file:
project_dir = os.path.dirname(blend_file)
cache_dir = os.path.join(project_dir, ".mask_cache", strip.name)
else:
cache_dir = os.path.join(tempfile.gettempdir(), "blender_mask_cache", strip.name)
cache_dir = get_cache_dir_for_strip(strip.name)
if os.path.exists(cache_dir):
# Calculate size

View File

@ -11,6 +11,7 @@ from bpy.props import IntProperty
from bpy.types import Operator
from ..core.async_generator import get_generator
from ..core.utils import get_cache_dir_for_strip
class SEQUENCER_OT_generate_face_mask(Operator):
@ -125,19 +126,7 @@ class SEQUENCER_OT_generate_face_mask(Operator):
def _get_cache_dir(self, context, strip) -> str:
"""Get or create cache directory for mask images."""
import tempfile
# Use temp directory with project-specific subdirectory
# This avoids issues with extension_path_user package name resolution
blend_file = bpy.data.filepath
if blend_file:
# Use blend file directory if saved
project_dir = os.path.dirname(blend_file)
cache_dir = os.path.join(project_dir, ".mask_cache", strip.name)
else:
# Use temp directory for unsaved projects
cache_dir = os.path.join(tempfile.gettempdir(), "blender_mask_cache", strip.name)
cache_dir = get_cache_dir_for_strip(strip.name)
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
@ -157,16 +146,22 @@ class SEQUENCER_OT_generate_face_mask(Operator):
# Check for MP4 video (new format)
mask_video = os.path.join(cache_dir, "mask.mp4")
if os.path.exists(mask_video):
# Verify video has expected number of frames
import cv2
cap = cv2.VideoCapture(mask_video)
if cap.isOpened():
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Prefer frame-count verification when cv2 is available, but do not
# hard-fail on Blender Python environments without cv2.
try:
import cv2
cap = cv2.VideoCapture(mask_video)
if cap.isOpened():
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
# Accept cache if at least 90% of frames exist
return frame_count >= expected_frames * 0.9
cap.release()
# Accept cache if at least 90% of frames exist
return frame_count >= expected_frames * 0.9
cap.release()
return False
return False
except Exception:
# Fallback: treat existing MP4 cache as valid when cv2 is unavailable.
return True
# Fallback: check for PNG sequence (backward compatibility)
mask_files = [f for f in os.listdir(cache_dir)

View File

@ -8,6 +8,7 @@ for controlling mask generation and blur application.
import bpy
from bpy.types import Panel
from ..core.async_bake_generator import get_bake_generator
from ..core.async_generator import get_generator
from ..core.utils import get_server_status, get_cache_info, format_size
@ -29,20 +30,18 @@ class SEQUENCER_PT_face_mask(Panel):
# Note: Blender 5.0 uses 'strips' instead of 'sequences'
generator = get_generator()
bake_generator = get_bake_generator()
# Always show parameters and status
self._draw_parameters(layout, scene)
self._draw_server_status(layout)
self._draw_cache_info(layout, seq_editor)
layout.separator()
# Show progress if generating
# Show progress if generating masks
if generator.is_running:
self._draw_progress(layout, wm, generator)
return
# Show progress if baking blur
if bake_generator.is_running:
self._draw_bake_progress(layout, wm, bake_generator)
return
# Show controls if strip selected
# Show primary controls first (top priority in UI)
if seq_editor and seq_editor.active_strip:
strip = seq_editor.active_strip
@ -54,6 +53,13 @@ class SEQUENCER_PT_face_mask(Panel):
else:
layout.label(text="No strip selected")
layout.separator()
# Secondary sections
self._draw_parameters(layout, scene)
self._draw_server_status(layout)
self._draw_cache_info(layout, context, seq_editor)
def _draw_parameters(self, layout, scene):
"""Draw detection parameters."""
box = layout.box()
@ -94,7 +100,7 @@ class SEQUENCER_PT_face_mask(Panel):
row.label(text="GPU:", icon='ERROR')
row.label(text="Not Available")
def _draw_cache_info(self, layout, seq_editor):
def _draw_cache_info(self, layout, context, seq_editor):
"""Draw cache information and clear button."""
box = layout.box()
box.label(text="Cache", icon='FILE_CACHE')
@ -115,6 +121,9 @@ class SEQUENCER_PT_face_mask(Panel):
row.label(text="Files:")
row.label(text=str(file_count))
# Cache directory setting
box.prop(context.scene, "facemask_cache_dir")
# Clear cache buttons
row = box.row(align=True)
if seq_editor and seq_editor.active_strip:
@ -151,6 +160,23 @@ class SEQUENCER_PT_face_mask(Panel):
icon='CANCEL',
)
def _draw_bake_progress(self, layout, wm, generator):
"""Draw progress bar during blur bake."""
box = layout.box()
box.label(text="Baking Blur...", icon='RENDER_ANIMATION')
progress = wm.bake_progress / max(wm.bake_total, 1)
box.progress(
factor=progress,
text=f"Frame {wm.bake_progress} / {wm.bake_total}",
)
box.operator(
"sequencer.cancel_bake_blur",
text="Cancel",
icon='CANCEL',
)
def _draw_generation_controls(self, layout, context, strip):
"""Draw mask generation controls."""
box = layout.box()
@ -179,7 +205,7 @@ class SEQUENCER_PT_face_mask(Panel):
def _draw_blur_controls(self, layout, context, strip):
"""Draw blur application controls."""
box = layout.box()
box.label(text="Blur Application", icon='MATFLUID')
box.label(text="Blur Bake", icon='MATFLUID')
# Check for mask strip
seq_editor = context.scene.sequence_editor
@ -190,11 +216,28 @@ class SEQUENCER_PT_face_mask(Panel):
box.label(text="Generate a mask first", icon='INFO')
return
# Apply blur button
op = box.operator(
"sequencer.apply_mask_blur",
text="Apply Mask Blur",
icon='PROP_CON',
# Bake parameters
col = box.column(align=True)
col.prop(context.scene, "facemask_bake_blur_size")
col.prop(context.scene, "facemask_bake_format")
# Source status
source_mode = strip.get("facemask_source_mode", "original")
if source_mode == "baked":
box.label(text="Source: Baked", icon='CHECKMARK')
else:
box.label(text="Source: Original", icon='FILE_MOVIE')
# Bake and restore buttons
box.operator(
"sequencer.bake_and_swap_blur_source",
text="Bake & Swap Source",
icon='RENDER_STILL',
)
box.operator(
"sequencer.restore_original_source",
text="Restore Original Source",
icon='LOOP_BACK',
)

View File

@ -83,6 +83,45 @@ class GenerateRequest(BaseModel):
iou_threshold: float = 0.45
mask_scale: float = 1.5
class BakeRequest(BaseModel):
video_path: str
mask_path: str
output_path: str
blur_size: int = 50
format: str = "mp4"
def _build_video_writer(
output_path: str,
fmt: str,
fps: float,
width: int,
height: int,
) -> cv2.VideoWriter:
"""Create VideoWriter with codec fallback per format."""
format_key = fmt.lower()
codec_candidates = {
"mp4": ["avc1", "mp4v"],
"mov": ["avc1", "mp4v"],
"avi": ["MJPG", "XVID"],
}.get(format_key, ["mp4v"])
for codec in codec_candidates:
writer = cv2.VideoWriter(
output_path,
cv2.VideoWriter_fourcc(*codec),
fps,
(width, height),
isColor=True,
)
if writer.isOpened():
print(f"[FaceMask] Using output codec: {codec}")
return writer
writer.release()
raise RuntimeError(f"Failed to create video writer for format='{fmt}'")
def process_video_task(task_id: str, req: GenerateRequest):
"""Background task to process video with async MP4 output."""
writer = None
@ -162,6 +201,72 @@ def process_video_task(task_id: str, req: GenerateRequest):
# Batch processing configuration
BATCH_SIZE = 5 # Optimal batch size for 4K video (72.9% improvement)
frame_buffer = []
TEMPORAL_SIDE_WEIGHT = 0.7
TEMPORAL_CENTER_WEIGHT = 1.0
# Temporal blending state (streaming, low-memory)
prev_mask = None
curr_mask = None
wrote_first_frame = False
def _scale_mask(mask: np.ndarray, weight: float) -> np.ndarray:
"""Scale mask intensity for temporal blending."""
if weight == 1.0:
return mask
return cv2.convertScaleAbs(mask, alpha=weight, beta=0)
def _blend_edge(base: np.ndarray, neighbor: np.ndarray) -> np.ndarray:
"""Blend for first/last frame (one-sided temporal context)."""
base_w = _scale_mask(base, TEMPORAL_CENTER_WEIGHT)
neighbor_w = _scale_mask(neighbor, TEMPORAL_SIDE_WEIGHT)
return cv2.max(base_w, neighbor_w)
def _blend_middle(prev: np.ndarray, cur: np.ndarray, nxt: np.ndarray) -> np.ndarray:
"""Blend for middle frames (previous/current/next temporal context)."""
prev_w = _scale_mask(prev, TEMPORAL_SIDE_WEIGHT)
cur_w = _scale_mask(cur, TEMPORAL_CENTER_WEIGHT)
nxt_w = _scale_mask(nxt, TEMPORAL_SIDE_WEIGHT)
return cv2.max(cur_w, cv2.max(prev_w, nxt_w))
def push_mask_temporal(raw_mask: np.ndarray):
"""Push mask and emit blended output in frame order."""
nonlocal prev_mask, curr_mask, wrote_first_frame
if prev_mask is None:
prev_mask = raw_mask
return
if curr_mask is None:
curr_mask = raw_mask
return
if not wrote_first_frame:
write_queue.put(_blend_edge(prev_mask, curr_mask))
wrote_first_frame = True
# Emit blended current frame using prev/current/next
write_queue.put(_blend_middle(prev_mask, curr_mask, raw_mask))
# Slide temporal window
prev_mask = curr_mask
curr_mask = raw_mask
def flush_temporal_tail():
"""Flush remaining masks after all frames are processed."""
if prev_mask is None:
return
# Single-frame case
if curr_mask is None:
write_queue.put(_scale_mask(prev_mask, TEMPORAL_CENTER_WEIGHT))
return
# Two-frame case
if not wrote_first_frame:
write_queue.put(_blend_edge(prev_mask, curr_mask))
# Always emit last frame with one-sided blend
write_queue.put(_blend_edge(curr_mask, prev_mask))
def process_batch():
"""Process accumulated batch of frames."""
@ -182,8 +287,8 @@ def process_video_task(task_id: str, req: GenerateRequest):
mask_scale=req.mask_scale
)
# Async write to queue
write_queue.put(mask)
# Temporal blend before async write
push_mask_temporal(mask)
# Clear buffer
frame_buffer.clear()
@ -231,6 +336,7 @@ def process_video_task(task_id: str, req: GenerateRequest):
# Process remaining frames in buffer
if frame_buffer:
process_batch()
flush_temporal_tail()
# Cleanup
writer_running.clear()
@ -258,6 +364,128 @@ def process_video_task(task_id: str, req: GenerateRequest):
if task_id in cancel_events:
del cancel_events[task_id]
def process_bake_task(task_id: str, req: BakeRequest):
"""Background task to bake blur into a regular video file."""
src_cap = None
mask_cap = None
writer = None
try:
tasks[task_id].status = TaskStatus.PROCESSING
cancel_event = cancel_events.get(task_id)
if not os.path.exists(req.video_path):
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = f"Video not found: {req.video_path}"
return
if not os.path.exists(req.mask_path):
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = f"Mask video not found: {req.mask_path}"
return
src_cap = cv2.VideoCapture(req.video_path)
mask_cap = cv2.VideoCapture(req.mask_path)
if not src_cap.isOpened():
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Failed to open source video"
return
if not mask_cap.isOpened():
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Failed to open mask video"
return
src_fps = src_cap.get(cv2.CAP_PROP_FPS) or 30.0
src_width = int(src_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
src_height = int(src_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
src_frames = int(src_cap.get(cv2.CAP_PROP_FRAME_COUNT))
mask_frames = int(mask_cap.get(cv2.CAP_PROP_FRAME_COUNT))
if src_width <= 0 or src_height <= 0:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Invalid source video dimensions"
return
total = min(src_frames, mask_frames) if src_frames > 0 and mask_frames > 0 else 0
if total <= 0:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Source/mask frame count is zero"
return
tasks[task_id].total = total
output_dir = os.path.dirname(req.output_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
writer = _build_video_writer(req.output_path, req.format, src_fps, src_width, src_height)
# Kernel size must be odd and >= 1
blur_size = max(1, int(req.blur_size))
if blur_size % 2 == 0:
blur_size += 1
print(f"[FaceMask] Starting blur bake: {req.video_path} + {req.mask_path} -> {req.output_path}")
if src_frames != mask_frames:
print(
f"[FaceMask] Warning: frame count mismatch "
f"(src={src_frames}, mask={mask_frames}), processing {total} frames"
)
for idx in range(total):
if cancel_event and cancel_event.is_set():
tasks[task_id].status = TaskStatus.CANCELLED
tasks[task_id].message = "Cancelled by user"
break
src_ok, src_frame = src_cap.read()
mask_ok, mask_frame = mask_cap.read()
if not src_ok or not mask_ok:
break
if mask_frame.ndim == 3:
mask_gray = cv2.cvtColor(mask_frame, cv2.COLOR_BGR2GRAY)
else:
mask_gray = mask_frame
if mask_gray.shape[0] != src_height or mask_gray.shape[1] != src_width:
mask_gray = cv2.resize(
mask_gray,
(src_width, src_height),
interpolation=cv2.INTER_LINEAR,
)
blurred = cv2.GaussianBlur(src_frame, (blur_size, blur_size), 0)
alpha = (mask_gray.astype(np.float32) / 255.0)[..., np.newaxis]
composed = (src_frame.astype(np.float32) * (1.0 - alpha)) + (
blurred.astype(np.float32) * alpha
)
writer.write(np.clip(composed, 0, 255).astype(np.uint8))
tasks[task_id].progress = idx + 1
if tasks[task_id].status == TaskStatus.PROCESSING:
tasks[task_id].status = TaskStatus.COMPLETED
tasks[task_id].result_path = req.output_path
tasks[task_id].message = "Blur bake completed"
print(f"[FaceMask] Bake completed: {req.output_path}")
except Exception as e:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = str(e)
print(f"Error in bake task {task_id}: {e}")
traceback.print_exc()
finally:
if src_cap:
src_cap.release()
if mask_cap:
mask_cap.release()
if writer:
writer.release()
if task_id in cancel_events:
del cancel_events[task_id]
def check_gpu_available() -> dict:
"""
Check if GPU is available for inference.
@ -418,6 +646,17 @@ def generate_mask_endpoint(req: GenerateRequest, background_tasks: BackgroundTas
background_tasks.add_task(process_video_task, task_id, req)
return task
@app.post("/bake_blur", response_model=Task)
def bake_blur_endpoint(req: BakeRequest, background_tasks: BackgroundTasks):
task_id = str(uuid.uuid4())
task = Task(id=task_id, status=TaskStatus.PENDING)
tasks[task_id] = task
cancel_events[task_id] = threading.Event()
background_tasks.add_task(process_bake_task, task_id, req)
return task
@app.get("/tasks/{task_id}", response_model=Task)
def get_task(task_id: str):
if task_id not in tasks: