Compare commits

..

No commits in common. "dc41327ceadcbf90b65a5e9d8d2e971174caadf7" and "0fdff5423e16343b547628a2301d43a77c566523" have entirely different histories.

10 changed files with 102 additions and 795 deletions

View File

@ -1,5 +1,5 @@
"""Core module exports.""" """Core module exports."""
from .async_bake_generator import AsyncBakeGenerator as AsyncBakeGenerator, get_bake_generator as get_bake_generator from .async_bake_generator import AsyncBakeGenerator, get_bake_generator
from .async_generator import AsyncMaskGenerator as AsyncMaskGenerator, get_generator as get_generator from .async_generator import AsyncMaskGenerator, get_generator
from .compositor_setup import create_mask_blur_node_tree as create_mask_blur_node_tree, get_or_create_blur_node_tree as get_or_create_blur_node_tree from .compositor_setup import create_mask_blur_node_tree, get_or_create_blur_node_tree

View File

@ -64,95 +64,12 @@ class AsyncBakeGenerator:
first_interval=0.1, first_interval=0.1,
) )
def start_images(
self,
image_dir: str,
filenames: list,
output_dir: str,
detections_path: str,
blur_size: int,
display_scale: float,
on_complete=None,
on_progress=None,
):
"""画像シーケンスのぼかしBakeを非同期で開始する。"""
global bpy
import bpy as _bpy
bpy = _bpy
if self.is_running:
raise RuntimeError("Blur bake already in progress")
self.is_running = True
self.total_frames = len(filenames)
self.current_frame = 0
self._on_complete = on_complete
self._on_progress = on_progress
self.worker_thread = threading.Thread(
target=self._worker_images,
args=(image_dir, filenames, output_dir, detections_path, blur_size, display_scale),
daemon=True,
)
self.worker_thread.start()
bpy.app.timers.register(self._check_progress, first_interval=0.1)
def cancel(self): def cancel(self):
"""Cancel the current bake processing.""" """Cancel the current bake processing."""
self.is_running = False self.is_running = False
if self.worker_thread and self.worker_thread.is_alive(): if self.worker_thread and self.worker_thread.is_alive():
self.worker_thread.join(timeout=2.0) self.worker_thread.join(timeout=2.0)
def _worker_images(
self,
image_dir: str,
filenames: list,
output_dir: str,
detections_path: str,
blur_size: int,
display_scale: float,
):
import time
from .inference_client import get_client
task_id = None
try:
client = get_client()
task_id = client.bake_image_blur(
image_dir=image_dir,
filenames=filenames,
output_dir=output_dir,
detections_path=detections_path,
blur_size=blur_size,
display_scale=display_scale,
)
while self.is_running:
status = client.get_task_status(task_id)
state = status.get("status")
total = status.get("total", 0)
if total > 0:
self.total_frames = total
progress = status.get("progress", 0)
if progress >= 0:
self.progress_queue.put(("progress", progress))
if state == "completed":
result_path = status.get("result_path", output_dir)
self.result_queue.put(("done", result_path))
return
if state == "failed":
self.result_queue.put(("error", status.get("message", "Unknown error")))
return
if state == "cancelled":
self.result_queue.put(("cancelled", None))
return
time.sleep(0.5)
if task_id:
client.cancel_task(task_id)
self.result_queue.put(("cancelled", None))
except Exception as e:
self.result_queue.put(("error", str(e)))
def _worker( def _worker(
self, self,
video_path: str, video_path: str,

View File

@ -104,105 +104,12 @@ class AsyncMaskGenerator:
first_interval=0.1, first_interval=0.1,
) )
def start_images(
self,
image_dir: str,
filenames: list,
output_dir: str,
start_index: int,
end_index: int,
conf_threshold: float = 0.5,
iou_threshold: float = 0.45,
on_complete=None,
on_progress=None,
):
"""画像シーケンスの顔検出を非同期で開始する。"""
global bpy
import bpy as _bpy
bpy = _bpy
if self.is_running:
raise RuntimeError("Mask generation already in progress")
self.is_running = True
self.total_frames = end_index - start_index + 1
self.current_frame = 0
self._on_complete = on_complete
self._on_progress = on_progress
os.makedirs(output_dir, exist_ok=True)
self.worker_thread = threading.Thread(
target=self._worker_images,
args=(image_dir, filenames, output_dir, start_index, end_index,
conf_threshold, iou_threshold),
daemon=True,
)
self.worker_thread.start()
bpy.app.timers.register(self._check_progress, first_interval=0.1)
def cancel(self): def cancel(self):
"""Cancel the current processing.""" """Cancel the current processing."""
self.is_running = False self.is_running = False
if self.worker_thread and self.worker_thread.is_alive(): if self.worker_thread and self.worker_thread.is_alive():
self.worker_thread.join(timeout=2.0) self.worker_thread.join(timeout=2.0)
def _worker_images(
self,
image_dir: str,
filenames: list,
output_dir: str,
start_index: int,
end_index: int,
conf_threshold: float,
iou_threshold: float,
):
import time
from .inference_client import get_client
try:
client = get_client()
task_id = client.generate_mask_images(
image_dir=image_dir,
filenames=filenames,
output_dir=output_dir,
start_index=start_index,
end_index=end_index,
conf_threshold=conf_threshold,
iou_threshold=iou_threshold,
)
while self.is_running:
status = client.get_task_status(task_id)
state = status.get("status")
total = status.get("total", 0)
if total > 0:
self.total_frames = total
if state == "completed":
progress = status.get("progress", self.total_frames)
if progress >= 0:
self.progress_queue.put(("progress", progress))
result_path = status.get(
"result_path",
os.path.join(output_dir, "detections.msgpack"),
)
self.result_queue.put(("done", result_path))
return
elif state == "failed":
self.result_queue.put(("error", status.get("message", "Unknown error")))
return
elif state == "cancelled":
self.result_queue.put(("cancelled", None))
return
progress = status.get("progress", 0)
if progress >= 0:
self.progress_queue.put(("progress", progress))
time.sleep(0.5)
client.cancel_task(task_id)
self.result_queue.put(("cancelled", None))
except Exception as e:
self.result_queue.put(("error", str(e)))
def _worker( def _worker(
self, self,
video_path: str, video_path: str,

View File

@ -8,7 +8,7 @@ only to masked regions of a video strip.
def create_mask_blur_node_tree( def create_mask_blur_node_tree(
name: str = "FaceMaskBlur", name: str = "FaceMaskBlur",
blur_size: int = 50, blur_size: int = 50,
) -> "bpy.types.NodeTree": # noqa: F821 ) -> "bpy.types.NodeTree":
""" """
Create a compositing node tree for mask-based blur. Create a compositing node tree for mask-based blur.
@ -107,10 +107,10 @@ def create_mask_blur_node_tree(
def setup_strip_compositor_modifier( def setup_strip_compositor_modifier(
strip: "bpy.types.Strip", # noqa: F821 strip: "bpy.types.Strip",
mask_strip: "bpy.types.Strip", # noqa: F821 mask_strip: "bpy.types.Strip",
node_tree: "bpy.types.NodeTree", # noqa: F821 node_tree: "bpy.types.NodeTree",
) -> "bpy.types.SequenceModifier": # noqa: F821 ) -> "bpy.types.SequenceModifier":
""" """
Add a Compositor modifier to a strip using the mask-blur node tree. Add a Compositor modifier to a strip using the mask-blur node tree.
@ -148,7 +148,7 @@ def setup_strip_compositor_modifier(
return modifier return modifier
def get_or_create_blur_node_tree(blur_size: int = 50) -> "bpy.types.NodeTree": # noqa: F821 def get_or_create_blur_node_tree(blur_size: int = 50) -> "bpy.types.NodeTree":
""" """
Get existing or create new blur node tree with specified blur size. Get existing or create new blur node tree with specified blur size.

View File

@ -305,76 +305,6 @@ class InferenceClient:
except urllib.error.HTTPError as e: except urllib.error.HTTPError as e:
raise RuntimeError(f"Server error: {e.read().decode('utf-8')}") raise RuntimeError(f"Server error: {e.read().decode('utf-8')}")
def generate_mask_images(
self,
image_dir: str,
filenames: list,
output_dir: str,
start_index: int,
end_index: int,
conf_threshold: float,
iou_threshold: float,
) -> str:
"""画像シーケンスの顔検出タスクを開始して task_id を返す。"""
if not self.is_server_running():
self.start_server()
data = {
"image_dir": image_dir,
"filenames": filenames,
"output_dir": output_dir,
"start_index": start_index,
"end_index": end_index,
"conf_threshold": conf_threshold,
"iou_threshold": iou_threshold,
}
req = urllib.request.Request(
f"{self.SERVER_URL}/generate_images",
data=json.dumps(data).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="POST",
)
try:
with urllib.request.urlopen(req) as response:
result = json.loads(response.read().decode("utf-8"))
return result["id"]
except urllib.error.HTTPError as e:
raise RuntimeError(f"Server error: {e.read().decode('utf-8')}")
def bake_image_blur(
self,
image_dir: str,
filenames: list,
output_dir: str,
detections_path: str,
blur_size: int,
display_scale: float,
) -> str:
"""画像シーケンスのぼかしBakeタスクを開始して task_id を返す。"""
if not self.is_server_running():
self.start_server()
data = {
"image_dir": image_dir,
"filenames": filenames,
"output_dir": output_dir,
"detections_path": detections_path,
"blur_size": blur_size,
"display_scale": display_scale,
}
req = urllib.request.Request(
f"{self.SERVER_URL}/bake_image_blur",
data=json.dumps(data).encode("utf-8"),
headers={"Content-Type": "application/json"},
method="POST",
)
try:
with urllib.request.urlopen(req) as response:
result = json.loads(response.read().decode("utf-8"))
return result["id"]
except urllib.error.HTTPError as e:
raise RuntimeError(f"Server error: {e.read().decode('utf-8')}")
def cancel_task(self, task_id: str): def cancel_task(self, task_id: str):
"""Cancel a task.""" """Cancel a task."""
try: try:

View File

@ -37,12 +37,6 @@ def _output_path(video_strip, detections_path: str, fmt: str) -> str:
return os.path.join(out_dir, f"{safe_name}_blurred.{ext}") return os.path.join(out_dir, f"{safe_name}_blurred.{ext}")
def _output_dir_for_images(strip, detections_path: str) -> str:
out_dir = os.path.dirname(detections_path)
safe_name = strip.name.replace("/", "_").replace("\\", "_")
return os.path.join(out_dir, f"{safe_name}_blurred")
def _reload_movie_strip(strip): def _reload_movie_strip(strip):
if hasattr(strip, "reload"): if hasattr(strip, "reload"):
try: try:
@ -51,12 +45,9 @@ def _reload_movie_strip(strip):
pass pass
def _set_strip_source(strip, path: str): def _set_strip_source(strip, filepath: str):
if strip.type == "IMAGE": strip.filepath = filepath
strip.directory = path _reload_movie_strip(strip)
else:
strip.filepath = path
_reload_movie_strip(strip)
def _start_bake_impl(operator, context, force: bool = False, strip=None, on_complete_extra=None): def _start_bake_impl(operator, context, force: bool = False, strip=None, on_complete_extra=None):
@ -65,42 +56,29 @@ def _start_bake_impl(operator, context, force: bool = False, strip=None, on_comp
strip: 処理対象のstripNone の場合は active_strip を使用 strip: 処理対象のstripNone の場合は active_strip を使用
on_complete_extra: 非同期Bake完了時に追加で呼ばれるコールバック (status, data) on_complete_extra: 非同期Bake完了時に追加で呼ばれるコールバック (status, data)
キャッシュヒット即時完了の場合は呼ばれない キャッシュヒット即時完了の場合は呼ばれない
MOVIE / IMAGE 両対応
""" """
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
scene = context.scene scene = context.scene
video_strip = strip if strip is not None else seq_editor.active_strip video_strip = strip if strip is not None else seq_editor.active_strip
is_image = video_strip.type == "IMAGE"
video_path = bpy.path.abspath(video_strip.filepath)
detections_path = get_detections_path_for_strip(video_strip.name) detections_path = get_detections_path_for_strip(video_strip.name)
if not os.path.exists(video_path):
operator.report({"ERROR"}, f"Source video not found: {video_path}")
return {"CANCELLED"}
if not os.path.exists(detections_path): if not os.path.exists(detections_path):
operator.report({"ERROR"}, f"Detection cache not found: {detections_path}") operator.report({"ERROR"}, f"Detection cache not found: {detections_path}")
return {"CANCELLED"} return {"CANCELLED"}
bake_format = scene.facemask_bake_format
output_path = _output_path(video_strip, detections_path, bake_format)
blur_size = int(scene.facemask_bake_blur_size) blur_size = int(scene.facemask_bake_blur_size)
display_scale = float(scene.facemask_bake_display_scale) display_scale = float(scene.facemask_bake_display_scale)
if is_image:
image_dir = bpy.path.abspath(video_strip.directory)
filenames = [elem.filename for elem in video_strip.elements]
if not os.path.isdir(image_dir):
operator.report({"ERROR"}, f"Image directory not found: {image_dir}")
return {"CANCELLED"}
output_dir = _output_dir_for_images(video_strip, detections_path)
original_source = image_dir
bake_format = None # IMAGE strips don't use format
else:
video_path = bpy.path.abspath(video_strip.filepath)
if not os.path.exists(video_path):
operator.report({"ERROR"}, f"Source video not found: {video_path}")
return {"CANCELLED"}
bake_format = scene.facemask_bake_format
output_path = _output_path(video_strip, detections_path, bake_format)
original_source = video_path
if not force: if not force:
# パラメータが一致するキャッシュがあればswapのみ # パラメータが一致するキャッシュがあればswapのみ
cached_baked_path = video_strip.get(KEY_BAKED) cached_baked_path = video_strip.get(KEY_BAKED)
cached_format = video_strip.get(KEY_FORMAT)
cached_blur_size = video_strip.get(KEY_BLUR_SIZE) cached_blur_size = video_strip.get(KEY_BLUR_SIZE)
cached_display_scale = video_strip.get(KEY_DISPLAY_SCALE) cached_display_scale = video_strip.get(KEY_DISPLAY_SCALE)
try: try:
@ -111,16 +89,13 @@ def _start_bake_impl(operator, context, force: bool = False, strip=None, on_comp
cached_display_scale_f = float(cached_display_scale) cached_display_scale_f = float(cached_display_scale)
except (TypeError, ValueError): except (TypeError, ValueError):
cached_display_scale_f = None cached_display_scale_f = None
if (
cache_exists = ( cached_baked_path
cached_baked_path and os.path.exists(cached_baked_path) and os.path.exists(cached_baked_path)
and cached_format == bake_format
and cached_blur_size_int == blur_size and cached_blur_size_int == blur_size
and cached_display_scale_f == display_scale and cached_display_scale_f == display_scale
) ):
if not is_image:
cache_exists = cache_exists and video_strip.get(KEY_FORMAT) == bake_format
if cache_exists:
if video_strip.get(KEY_MODE) != "baked": if video_strip.get(KEY_MODE) != "baked":
video_strip[KEY_MODE] = "baked" video_strip[KEY_MODE] = "baked"
_set_strip_source(video_strip, cached_baked_path) _set_strip_source(video_strip, cached_baked_path)
@ -137,18 +112,18 @@ def _start_bake_impl(operator, context, force: bool = False, strip=None, on_comp
return return
if status == "done": if status == "done":
result = data or (output_dir if is_image else output_path) result_path = data or output_path
original_path = strip.get(KEY_ORIGINAL)
current_mode = strip.get(KEY_MODE, "original") current_mode = strip.get(KEY_MODE, "original")
if not strip.get(KEY_ORIGINAL) or current_mode != "baked": if not original_path or current_mode != "baked":
strip[KEY_ORIGINAL] = original_source strip[KEY_ORIGINAL] = video_path
strip[KEY_BAKED] = result strip[KEY_BAKED] = result_path
strip[KEY_MODE] = "baked" strip[KEY_MODE] = "baked"
strip[KEY_FORMAT] = bake_format
strip[KEY_BLUR_SIZE] = blur_size strip[KEY_BLUR_SIZE] = blur_size
strip[KEY_DISPLAY_SCALE] = display_scale strip[KEY_DISPLAY_SCALE] = display_scale
if not is_image: _set_strip_source(strip, result_path)
strip[KEY_FORMAT] = bake_format print(f"[FaceMask] Bake completed and source swapped: {result_path}")
_set_strip_source(strip, result)
print(f"[FaceMask] Bake completed and source swapped: {result}")
elif status == "error": elif status == "error":
print(f"[FaceMask] Bake failed: {data}") print(f"[FaceMask] Bake failed: {data}")
elif status == "cancelled": elif status == "cancelled":
@ -172,28 +147,16 @@ def _start_bake_impl(operator, context, force: bool = False, strip=None, on_comp
wm.bake_total = 1 wm.bake_total = 1
try: try:
if is_image: bake_generator.start(
bake_generator.start_images( video_path=video_path,
image_dir=image_dir, detections_path=detections_path,
filenames=filenames, output_path=output_path,
output_dir=output_dir, blur_size=blur_size,
detections_path=detections_path, display_scale=display_scale,
blur_size=blur_size, fmt=bake_format.lower(),
display_scale=display_scale, on_complete=on_complete,
on_complete=on_complete, on_progress=on_progress,
on_progress=on_progress, )
)
else:
bake_generator.start(
video_path=video_path,
detections_path=detections_path,
output_path=output_path,
blur_size=blur_size,
display_scale=display_scale,
fmt=bake_format.lower(),
on_complete=on_complete,
on_progress=on_progress,
)
except Exception as e: except Exception as e:
operator.report({"ERROR"}, f"Failed to start bake: {e}") operator.report({"ERROR"}, f"Failed to start bake: {e}")
return {"CANCELLED"} return {"CANCELLED"}
@ -219,7 +182,7 @@ class SEQUENCER_OT_bake_and_swap_blur_source(Operator):
if get_bake_generator().is_running: if get_bake_generator().is_running:
return False return False
strip = context.scene.sequence_editor.active_strip strip = context.scene.sequence_editor.active_strip
return bool(strip and strip.type in {"MOVIE", "IMAGE"}) return bool(strip and strip.type == "MOVIE")
def execute(self, context): def execute(self, context):
return _start_bake_impl(self, context, force=False) return _start_bake_impl(self, context, force=False)
@ -242,7 +205,7 @@ class SEQUENCER_OT_force_rebake_blur(Operator):
if get_bake_generator().is_running: if get_bake_generator().is_running:
return False return False
strip = context.scene.sequence_editor.active_strip strip = context.scene.sequence_editor.active_strip
return bool(strip and strip.type in {"MOVIE", "IMAGE"}) return bool(strip and strip.type == "MOVIE")
def execute(self, context): def execute(self, context):
return _start_bake_impl(self, context, force=True) return _start_bake_impl(self, context, force=True)
@ -263,7 +226,7 @@ class SEQUENCER_OT_swap_to_baked_blur(Operator):
if get_bake_generator().is_running: if get_bake_generator().is_running:
return False return False
strip = context.scene.sequence_editor.active_strip strip = context.scene.sequence_editor.active_strip
if not strip or strip.type not in {"MOVIE", "IMAGE"}: if not strip or strip.type != "MOVIE":
return False return False
baked_path = strip.get(KEY_BAKED) baked_path = strip.get(KEY_BAKED)
return bool(baked_path and os.path.exists(baked_path)) return bool(baked_path and os.path.exists(baked_path))
@ -292,7 +255,7 @@ class SEQUENCER_OT_restore_original_source(Operator):
if get_bake_generator().is_running: if get_bake_generator().is_running:
return False return False
strip = context.scene.sequence_editor.active_strip strip = context.scene.sequence_editor.active_strip
if not strip or strip.type not in {"MOVIE", "IMAGE"}: if not strip or strip.type != "MOVIE":
return False return False
if strip.get(KEY_MODE, "original") == "original": if strip.get(KEY_MODE, "original") == "original":
return False return False

View File

@ -15,11 +15,11 @@ from .apply_blur import KEY_ORIGINAL, KEY_MODE, _set_strip_source
class SEQUENCER_OT_batch_bake_selected(Operator): class SEQUENCER_OT_batch_bake_selected(Operator):
"""Generate detection cache and bake blur for all selected MOVIE/IMAGE strips.""" """Generate detection cache and bake blur for all selected MOVIE strips."""
bl_idname = "sequencer.batch_bake_selected" bl_idname = "sequencer.batch_bake_selected"
bl_label = "Batch Bake Selected" bl_label = "Batch Bake Selected"
bl_description = "Generate detection cache and bake blur for all selected MOVIE/IMAGE strips" bl_description = "Generate detection cache and bake blur for all selected MOVIE strips"
bl_options = {"REGISTER"} bl_options = {"REGISTER"}
@classmethod @classmethod
@ -33,14 +33,14 @@ class SEQUENCER_OT_batch_bake_selected(Operator):
if get_bake_generator().is_running: if get_bake_generator().is_running:
return False return False
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
return any(s.select and s.type in {"MOVIE", "IMAGE"} for s in seq_editor.strips) return any(s.select and s.type == "MOVIE" for s in seq_editor.strips)
def execute(self, context): def execute(self, context):
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
strips = [s for s in seq_editor.strips if s.select and s.type in {"MOVIE", "IMAGE"}] strips = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
if not strips: if not strips:
self.report({"WARNING"}, "No MOVIE or IMAGE strips selected") self.report({"WARNING"}, "No MOVIE strips selected")
return {"CANCELLED"} return {"CANCELLED"}
batch = get_batch_processor() batch = get_batch_processor()
@ -64,11 +64,11 @@ class SEQUENCER_OT_batch_bake_selected(Operator):
class SEQUENCER_OT_batch_regenerate_cache(Operator): class SEQUENCER_OT_batch_regenerate_cache(Operator):
"""Regenerate detection cache for all selected MOVIE/IMAGE strips (ignore existing cache).""" """Regenerate detection cache for all selected MOVIE strips (ignore existing cache)."""
bl_idname = "sequencer.batch_regenerate_cache" bl_idname = "sequencer.batch_regenerate_cache"
bl_label = "Batch Regenerate Cache" bl_label = "Batch Regenerate Cache"
bl_description = "Regenerate detection cache for all selected MOVIE/IMAGE strips" bl_description = "Regenerate detection cache for all selected MOVIE strips"
bl_options = {"REGISTER"} bl_options = {"REGISTER"}
@classmethod @classmethod
@ -82,14 +82,14 @@ class SEQUENCER_OT_batch_regenerate_cache(Operator):
if get_bake_generator().is_running: if get_bake_generator().is_running:
return False return False
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
return any(s.select and s.type in {"MOVIE", "IMAGE"} for s in seq_editor.strips) return any(s.select and s.type == "MOVIE" for s in seq_editor.strips)
def execute(self, context): def execute(self, context):
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
strips = [s for s in seq_editor.strips if s.select and s.type in {"MOVIE", "IMAGE"}] strips = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
if not strips: if not strips:
self.report({"WARNING"}, "No MOVIE or IMAGE strips selected") self.report({"WARNING"}, "No MOVIE strips selected")
return {"CANCELLED"} return {"CANCELLED"}
batch = get_batch_processor() batch = get_batch_processor()
@ -109,11 +109,11 @@ class SEQUENCER_OT_batch_regenerate_cache(Operator):
class SEQUENCER_OT_batch_restore_original(Operator): class SEQUENCER_OT_batch_restore_original(Operator):
"""Restore original source for all selected MOVIE/IMAGE strips.""" """Restore original source for all selected MOVIE strips."""
bl_idname = "sequencer.batch_restore_original" bl_idname = "sequencer.batch_restore_original"
bl_label = "Batch Restore Original" bl_label = "Batch Restore Original"
bl_description = "Restore original source filepath for all selected MOVIE/IMAGE strips" bl_description = "Restore original source filepath for all selected MOVIE strips"
bl_options = {"REGISTER", "UNDO"} bl_options = {"REGISTER", "UNDO"}
@classmethod @classmethod
@ -123,11 +123,11 @@ class SEQUENCER_OT_batch_restore_original(Operator):
if get_batch_processor().is_running: if get_batch_processor().is_running:
return False return False
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
return any(s.select and s.type in {"MOVIE", "IMAGE"} for s in seq_editor.strips) return any(s.select and s.type == "MOVIE" for s in seq_editor.strips)
def execute(self, context): def execute(self, context):
seq_editor = context.scene.sequence_editor seq_editor = context.scene.sequence_editor
strips = [s for s in seq_editor.strips if s.select and s.type in {"MOVIE", "IMAGE"}] strips = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
restored = 0 restored = 0
skipped = 0 skipped = 0

View File

@ -34,28 +34,11 @@ def compute_strip_frame_range(strip, scene, client) -> tuple:
return start_frame, end_frame, source_fps return start_frame, end_frame, source_fps
def get_image_strip_files(strip) -> tuple:
"""IMAGE strip の (abs_image_dir, filenames_list) を返す。"""
image_dir = bpy.path.abspath(strip.directory)
filenames = [elem.filename for elem in strip.elements]
return image_dir, filenames
def compute_image_strip_range(strip) -> tuple:
"""IMAGE strip のアクティブ範囲 (start_index, end_index) を返す。"""
total_elements = len(strip.elements)
start_idx = max(0, int(strip.frame_offset_start))
end_idx = start_idx + int(strip.frame_final_duration) - 1
start_idx = min(start_idx, total_elements - 1)
end_idx = max(start_idx, min(end_idx, total_elements - 1))
return start_idx, end_idx
def start_mask_gen_for_strip(context, strip, on_complete, on_progress): def start_mask_gen_for_strip(context, strip, on_complete, on_progress):
"""Strip のマスク生成を開始する共通処理MOVIE / IMAGE 両対応) """Strip のマスク生成を開始する共通処理。
generator.is_running 等のエラー時は例外を送出する generator.is_running 等のエラー時は例外を送出する
wm.mask_progress / mask_total を初期化してから generator.start*() を呼ぶ wm.mask_progress / mask_total を初期化してから generator.start() を呼ぶ
""" """
scene = context.scene scene = context.scene
wm = context.window_manager wm = context.window_manager
@ -64,42 +47,26 @@ def start_mask_gen_for_strip(context, strip, on_complete, on_progress):
if generator.is_running: if generator.is_running:
raise RuntimeError("Mask generation already in progress") raise RuntimeError("Mask generation already in progress")
client = get_client()
start_frame, end_frame, source_fps = compute_strip_frame_range(strip, scene, client)
output_dir = get_cache_dir_for_strip(strip.name) output_dir = get_cache_dir_for_strip(strip.name)
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
wm.mask_progress = 0
if strip.type == "IMAGE": wm.mask_progress = 0
image_dir, filenames = get_image_strip_files(strip) wm.mask_total = end_frame - start_frame + 1
if not filenames:
raise ValueError("Image strip has no elements") generator.start(
start_idx, end_idx = compute_image_strip_range(strip) video_path=bpy.path.abspath(strip.filepath),
wm.mask_total = end_idx - start_idx + 1 output_dir=output_dir,
generator.start_images( start_frame=start_frame,
image_dir=image_dir, end_frame=end_frame,
filenames=filenames, fps=source_fps,
output_dir=output_dir, conf_threshold=scene.facemask_conf_threshold,
start_index=start_idx, iou_threshold=scene.facemask_iou_threshold,
end_index=end_idx, on_complete=on_complete,
conf_threshold=scene.facemask_conf_threshold, on_progress=on_progress,
iou_threshold=scene.facemask_iou_threshold, )
on_complete=on_complete,
on_progress=on_progress,
)
else:
client = get_client()
start_frame, end_frame, source_fps = compute_strip_frame_range(strip, scene, client)
wm.mask_total = end_frame - start_frame + 1
generator.start(
video_path=bpy.path.abspath(strip.filepath),
output_dir=output_dir,
start_frame=start_frame,
end_frame=end_frame,
fps=source_fps,
conf_threshold=scene.facemask_conf_threshold,
iou_threshold=scene.facemask_iou_threshold,
on_complete=on_complete,
on_progress=on_progress,
)
class SEQUENCER_OT_generate_face_mask(Operator): class SEQUENCER_OT_generate_face_mask(Operator):

View File

@ -232,10 +232,10 @@ class SEQUENCER_PT_face_mask(Panel):
) )
def _draw_batch_controls(self, layout, context, seq_editor): def _draw_batch_controls(self, layout, context, seq_editor):
"""Draw batch bake button when multiple MOVIE/IMAGE strips are selected.""" """Draw batch bake button when multiple MOVIE strips are selected."""
if not seq_editor: if not seq_editor:
return return
selected_movies = [s for s in seq_editor.strips if s.select and s.type in {"MOVIE", "IMAGE"}] selected_movies = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
if not selected_movies: if not selected_movies:
return return
count = len(selected_movies) count = len(selected_movies)
@ -303,8 +303,7 @@ class SEQUENCER_PT_face_mask(Panel):
col = box.column(align=True) col = box.column(align=True)
col.prop(context.scene, "facemask_bake_blur_size") col.prop(context.scene, "facemask_bake_blur_size")
col.prop(context.scene, "facemask_bake_display_scale") col.prop(context.scene, "facemask_bake_display_scale")
if strip.type == "MOVIE": col.prop(context.scene, "facemask_bake_format")
col.prop(context.scene, "facemask_bake_format")
box.separator() box.separator()

View File

@ -31,62 +31,28 @@ def fix_library_path():
# Fix library path BEFORE any other imports # Fix library path BEFORE any other imports
fix_library_path() fix_library_path()
import queue # noqa: E402 import queue
import threading # noqa: E402 import threading
import uuid # noqa: E402 import uuid
import traceback # noqa: E402 import traceback
import subprocess # noqa: E402 import subprocess
from typing import Dict, Optional, List # noqa: E402 from typing import Dict, Optional, List
from pathlib import Path # noqa: E402 from pathlib import Path
from fastapi import FastAPI, HTTPException, BackgroundTasks # noqa: E402 from fastapi import FastAPI, HTTPException, BackgroundTasks
from pydantic import BaseModel # noqa: E402 from pydantic import BaseModel
import uvicorn # noqa: E402 import uvicorn
import cv2 # noqa: E402 import cv2
import numpy as np # noqa: E402 import numpy as np
import msgpack # noqa: E402 import msgpack
# Add project root to path for imports if needed # Add project root to path for imports if needed
sys.path.append(str(Path(__file__).parent.parent)) sys.path.append(str(Path(__file__).parent.parent))
from server.detector import get_detector # noqa: E402 from server.detector import get_detector
app = FastAPI(title="Face Mask Inference Server") app = FastAPI(title="Face Mask Inference Server")
def _get_r_frame_rate(video_path: str) -> tuple:
"""ffprobe でコンテナ宣言の r_frame_rate を取得する。
Returns:
(fps_float, fps_str): fps_str "120/1" のような分数文字列
取得失敗時は (0.0, "")
"""
try:
result = subprocess.run(
[
"ffprobe", "-v", "error",
"-select_streams", "v:0",
"-show_entries", "stream=r_frame_rate",
"-of", "default=noprint_wrappers=1:nokey=1",
video_path,
],
capture_output=True,
text=True,
timeout=10,
)
if result.returncode == 0:
rate_str = result.stdout.strip()
if "/" in rate_str:
num, den = rate_str.split("/")
fps_float = float(num) / float(den)
else:
fps_float = float(rate_str)
rate_str = str(fps_float)
return fps_float, rate_str
except Exception:
pass
return 0.0, ""
# GPU status cache # GPU status cache
_gpu_status_cache = None _gpu_status_cache = None
@ -132,25 +98,6 @@ class BakeRequest(BaseModel):
format: str = "mp4" format: str = "mp4"
class GenerateImagesRequest(BaseModel):
image_dir: str
filenames: List[str]
output_dir: str
start_index: int = 0
end_index: int = -1
conf_threshold: float = 0.5
iou_threshold: float = 0.45
class BakeImagesRequest(BaseModel):
image_dir: str
filenames: List[str]
output_dir: str
detections_path: str
blur_size: int = 50
display_scale: float = 1.0
class _FFmpegPipeWriter: class _FFmpegPipeWriter:
"""Write BGR frames to ffmpeg stdin.""" """Write BGR frames to ffmpeg stdin."""
@ -195,33 +142,8 @@ def _build_ffmpeg_vaapi_writer(
fps: float, fps: float,
width: int, width: int,
height: int, height: int,
out_fps_str: str = "",
) -> _FFmpegPipeWriter: ) -> _FFmpegPipeWriter:
"""Create ffmpeg h264_vaapi writer with QP=24 (balanced quality/speed). """Create ffmpeg h264_vaapi writer with QP=24 (balanced quality/speed)."""
fps: ソース動画の avg_frame_raterawパイプの入力レート
out_fps_str: 出力コンテナに宣言する r_frame_rate"120/1"
ソースと異なる場合は fps フィルタでフレームを補完する
"""
# ソースの avg_fps と出力の r_fps が有意に異なる場合のみ fps フィルタを挿入
needs_fps_filter = bool(out_fps_str)
if needs_fps_filter:
try:
if "/" in out_fps_str:
num, den = out_fps_str.split("/")
out_fps_float = float(num) / float(den)
else:
out_fps_float = float(out_fps_str)
needs_fps_filter = abs(out_fps_float - fps) > 0.01
except ValueError:
needs_fps_filter = False
if needs_fps_filter:
vf = f"format=nv12,fps={out_fps_str},hwupload"
print(f"[FaceMask] fps filter: {fps:.3f} -> {out_fps_str}")
else:
vf = "format=nv12,hwupload"
cmd = [ cmd = [
"ffmpeg", "ffmpeg",
"-hide_banner", "-hide_banner",
@ -242,7 +164,7 @@ def _build_ffmpeg_vaapi_writer(
"-", "-",
"-an", "-an",
"-vf", "-vf",
vf, "format=nv12,hwupload",
"-c:v", "-c:v",
"h264_vaapi", "h264_vaapi",
"-qp", "-qp",
@ -258,14 +180,13 @@ def _build_video_writer(
fps: float, fps: float,
width: int, width: int,
height: int, height: int,
out_fps_str: str = "",
) -> object: ) -> object:
"""Create writer with VAAPI preference and OpenCV fallback.""" """Create writer with VAAPI preference and OpenCV fallback."""
format_key = fmt.lower() format_key = fmt.lower()
if format_key in {"mp4", "mov"}: if format_key in {"mp4", "mov"}:
try: try:
writer = _build_ffmpeg_vaapi_writer(output_path, fps, width, height, out_fps_str) writer = _build_ffmpeg_vaapi_writer(output_path, fps, width, height)
print("[FaceMask] Using output encoder: ffmpeg h264_vaapi (-qp 24)") print("[FaceMask] Using output encoder: ffmpeg h264_vaapi (-qp 24)")
return writer return writer
except Exception as e: except Exception as e:
@ -321,267 +242,6 @@ def _scale_bbox(
return [x1, y1, out_w, out_h] return [x1, y1, out_w, out_h]
def _apply_face_blur_inplace(
frame: np.ndarray,
frame_boxes: list,
src_width: int,
src_height: int,
blur_size: int,
display_scale: float,
blur_margin: int,
) -> None:
"""検出済み顔領域にガウスぼかしを適用するin-place"""
if not frame_boxes:
return
for box in frame_boxes:
if not isinstance(box, list) or len(box) < 4:
continue
x, y, w, h = int(box[0]), int(box[1]), int(box[2]), int(box[3])
if w <= 0 or h <= 0:
continue
cx = x + w / 2
cy = y + h / 2
dw = max(1, int(w * display_scale))
dh = max(1, int(h * display_scale))
dx = int(cx - dw / 2)
dy = int(cy - dh / 2)
roi_x1 = max(0, dx - blur_margin)
roi_y1 = max(0, dy - blur_margin)
roi_x2 = min(src_width, dx + dw + blur_margin)
roi_y2 = min(src_height, dy + dh + blur_margin)
roi_width = roi_x2 - roi_x1
roi_height = roi_y2 - roi_y1
if roi_width <= 0 or roi_height <= 0:
continue
roi_src = frame[roi_y1:roi_y2, roi_x1:roi_x2]
small_w = max(1, roi_width // 2)
small_h = max(1, roi_height // 2)
roi_small = cv2.resize(roi_src, (small_w, small_h), interpolation=cv2.INTER_LINEAR)
small_blur_size = max(3, (blur_size // 2) | 1)
roi_small_blurred = cv2.GaussianBlur(roi_small, (small_blur_size, small_blur_size), 0)
roi_blurred = cv2.resize(roi_small_blurred, (roi_width, roi_height), interpolation=cv2.INTER_LINEAR)
roi_mask = np.zeros((roi_height, roi_width), dtype=np.uint8)
center = (int(cx) - roi_x1, int(cy) - roi_y1)
axes = (max(1, dw // 2), max(1, dh // 2))
cv2.ellipse(roi_mask, center, axes, 0, 0, 360, 255, -1)
result = roi_src.copy()
cv2.copyTo(roi_blurred, roi_mask, result)
frame[roi_y1:roi_y2, roi_x1:roi_x2] = result
def process_images_task(task_id: str, req: GenerateImagesRequest):
"""画像シーケンスから顔を検出して msgpack キャッシュを保存する。"""
try:
tasks[task_id].status = TaskStatus.PROCESSING
cancel_event = cancel_events.get(task_id)
if not os.path.exists(req.image_dir):
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = f"Image directory not found: {req.image_dir}"
return
if not req.filenames:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "No filenames provided"
return
detector = get_detector(
conf_threshold=req.conf_threshold,
iou_threshold=req.iou_threshold,
)
_ = detector.model
total_files = len(req.filenames)
start_idx = max(0, req.start_index)
end_idx = req.end_index if req.end_index >= 0 else total_files - 1
end_idx = min(end_idx, total_files - 1)
if start_idx > end_idx:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Invalid index range"
return
indices = list(range(start_idx, end_idx + 1))
tasks[task_id].total = len(indices)
os.makedirs(req.output_dir, exist_ok=True)
output_msgpack_path = os.path.join(req.output_dir, "detections.msgpack")
# 画像サイズを最初のファイルから取得
first_path = os.path.join(req.image_dir, req.filenames[start_idx])
first_img = cv2.imread(first_path)
if first_img is None:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = f"Cannot read image: {first_path}"
return
height, width = first_img.shape[:2]
frame_buffer: List[np.ndarray] = []
frame_detections: List[List[List[float]]] = []
batch_size = 5
current_count = 0
def process_batch():
nonlocal current_count
if not frame_buffer:
return
batch_det = detector.detect_batch(frame_buffer)
for detections in batch_det:
packed: List[List[float]] = []
for x, y, w, h, conf in detections:
bx, by, bw, bh = int(x), int(y), int(w), int(h)
bx = max(0, bx)
by = max(0, by)
bw = min(width - bx, bw)
bh = min(height - by, bh)
if bw <= 0 or bh <= 0:
continue
packed.append([bx, by, bw, bh, float(conf)])
frame_detections.append(packed)
current_count += 1
tasks[task_id].progress = current_count
frame_buffer.clear()
print(
f"[FaceMask] Starting image detection: {req.image_dir} "
f"({len(indices)} images) -> {output_msgpack_path}"
)
for file_idx in indices:
if cancel_event and cancel_event.is_set():
tasks[task_id].status = TaskStatus.CANCELLED
tasks[task_id].message = "Cancelled by user"
break
img_path = os.path.join(req.image_dir, req.filenames[file_idx])
frame = cv2.imread(img_path)
if frame is None:
frame_detections.append([])
current_count += 1
tasks[task_id].progress = current_count
continue
frame_buffer.append(frame)
if len(frame_buffer) >= batch_size:
process_batch()
if frame_buffer:
process_batch()
if tasks[task_id].status == TaskStatus.PROCESSING:
payload = {
"version": 1,
"image_dir": req.image_dir,
"filenames": req.filenames,
"start_frame": start_idx,
"end_frame": start_idx + len(frame_detections) - 1,
"width": width,
"height": height,
"fps": 0.0,
"mask_scale": 1.0,
"frames": frame_detections,
}
with open(output_msgpack_path, "wb") as f:
f.write(msgpack.packb(payload, use_bin_type=True))
tasks[task_id].status = TaskStatus.COMPLETED
tasks[task_id].result_path = output_msgpack_path
tasks[task_id].message = "Image detection cache completed"
print(f"[FaceMask] Image detection done: {output_msgpack_path}")
except Exception as e:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = str(e)
traceback.print_exc()
finally:
if task_id in cancel_events:
del cancel_events[task_id]
def process_bake_images_task(task_id: str, req: BakeImagesRequest):
"""画像シーケンスに顔ぼかしを適用して新ディレクトリへ書き出す。"""
try:
tasks[task_id].status = TaskStatus.PROCESSING
cancel_event = cancel_events.get(task_id)
if not os.path.exists(req.image_dir):
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = f"Image directory not found: {req.image_dir}"
return
if not os.path.exists(req.detections_path):
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = f"Detections file not found: {req.detections_path}"
return
with open(req.detections_path, "rb") as f:
payload = msgpack.unpackb(f.read(), raw=False)
frames_detections = payload.get("frames")
if not isinstance(frames_detections, list):
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Invalid detections format: 'frames' is missing"
return
det_start_frame = int(payload.get("start_frame", 0))
blur_size = max(1, int(req.blur_size))
if blur_size % 2 == 0:
blur_size += 1
display_scale = max(0.1, float(req.display_scale))
blur_margin = blur_size // 2
os.makedirs(req.output_dir, exist_ok=True)
total = len(req.filenames)
tasks[task_id].total = total
print(
f"[FaceMask] Starting image bake: {req.image_dir} "
f"({total} images) -> {req.output_dir}"
)
for i, filename in enumerate(req.filenames):
if cancel_event and cancel_event.is_set():
tasks[task_id].status = TaskStatus.CANCELLED
tasks[task_id].message = "Cancelled by user"
return
src_path = os.path.join(req.image_dir, filename)
frame = cv2.imread(src_path)
if frame is None:
tasks[task_id].progress = i + 1
continue
h, w = frame.shape[:2]
det_idx = i - det_start_frame
frame_boxes = (
frames_detections[det_idx]
if 0 <= det_idx < len(frames_detections)
else []
)
_apply_face_blur_inplace(frame, frame_boxes, w, h, blur_size, display_scale, blur_margin)
out_path = os.path.join(req.output_dir, filename)
cv2.imwrite(out_path, frame)
tasks[task_id].progress = i + 1
if tasks[task_id].status == TaskStatus.PROCESSING:
tasks[task_id].status = TaskStatus.COMPLETED
tasks[task_id].result_path = req.output_dir
tasks[task_id].message = "Image blur bake completed"
print(f"[FaceMask] Image bake completed: {req.output_dir}")
except Exception as e:
tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = str(e)
traceback.print_exc()
finally:
if task_id in cancel_events:
del cancel_events[task_id]
def process_video_task(task_id: str, req: GenerateRequest): def process_video_task(task_id: str, req: GenerateRequest):
"""Background task to detect faces and save bbox cache as msgpack.""" """Background task to detect faces and save bbox cache as msgpack."""
cap = None cap = None
@ -757,15 +417,6 @@ def process_bake_task(task_id: str, req: BakeRequest):
src_frames = int(temp_cap.get(cv2.CAP_PROP_FRAME_COUNT)) src_frames = int(temp_cap.get(cv2.CAP_PROP_FRAME_COUNT))
temp_cap.release() temp_cap.release()
# ffprobe で r_frame_rate を取得し、出力コンテナの宣言 FPS をソースに合わせる。
# 例: 120fps タイムベースで記録された 60fps 動画は r_frame_rate=120/1 だが
# cv2 は avg_frame_rate=60fps を返すため、Bake 後に Blender がFPSを別値で認識してしまう。
r_fps_float, r_fps_str = _get_r_frame_rate(req.video_path)
if r_fps_float > 0:
print(f"[FaceMask] r_frame_rate={r_fps_str}, avg_fps={src_fps:.3f}")
else:
r_fps_str = ""
if src_width <= 0 or src_height <= 0: if src_width <= 0 or src_height <= 0:
tasks[task_id].status = TaskStatus.FAILED tasks[task_id].status = TaskStatus.FAILED
tasks[task_id].message = "Invalid source video dimensions" tasks[task_id].message = "Invalid source video dimensions"
@ -966,7 +617,7 @@ def process_bake_task(task_id: str, req: BakeRequest):
frame_count = 0 frame_count = 0
writer = None writer = None
try: try:
writer = _build_video_writer(req.output_path, req.format, src_fps, src_width, src_height, r_fps_str) writer = _build_video_writer(req.output_path, req.format, src_fps, src_width, src_height)
while True: while True:
if cancel_event and cancel_event.is_set(): if cancel_event and cancel_event.is_set():
@ -1208,20 +859,13 @@ def get_video_info(req: VideoInfoRequest):
raise HTTPException(status_code=400, detail="Failed to open video") raise HTTPException(status_code=400, detail="Failed to open video")
try: try:
avg_fps = float(cap.get(cv2.CAP_PROP_FPS) or 0.0) fps = float(cap.get(cv2.CAP_PROP_FPS) or 0.0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) or 0) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) or 0)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) or 0) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) or 0)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
finally: finally:
cap.release() cap.release()
# Blender は r_frame_rate でタイムライン配置を計算するため、
# cv2 の avg_frame_rate ではなく r_frame_rate を fps として返す。
# 例: 120fps タイムベース記録の 60fps 動画で r_frame_rate=120 を返すことで
# compute_strip_frame_range の fps_ratio が Blender の解釈と一致する。
r_fps_float, _ = _get_r_frame_rate(req.video_path)
fps = r_fps_float if r_fps_float > 0 else avg_fps
return { return {
"video_path": req.video_path, "video_path": req.video_path,
"fps": fps, "fps": fps,
@ -1251,26 +895,6 @@ def bake_blur_endpoint(req: BakeRequest, background_tasks: BackgroundTasks):
background_tasks.add_task(process_bake_task, task_id, req) background_tasks.add_task(process_bake_task, task_id, req)
return task return task
@app.post("/generate_images", response_model=Task)
def generate_images_endpoint(req: GenerateImagesRequest, background_tasks: BackgroundTasks):
task_id = str(uuid.uuid4())
task = Task(id=task_id, status=TaskStatus.PENDING)
tasks[task_id] = task
cancel_events[task_id] = threading.Event()
background_tasks.add_task(process_images_task, task_id, req)
return task
@app.post("/bake_image_blur", response_model=Task)
def bake_image_blur_endpoint(req: BakeImagesRequest, background_tasks: BackgroundTasks):
task_id = str(uuid.uuid4())
task = Task(id=task_id, status=TaskStatus.PENDING)
tasks[task_id] = task
cancel_events[task_id] = threading.Event()
background_tasks.add_task(process_bake_images_task, task_id, req)
return task
@app.get("/tasks/{task_id}", response_model=Task) @app.get("/tasks/{task_id}", response_model=Task)
def get_task(task_id: str): def get_task(task_id: str):
if task_id not in tasks: if task_id not in tasks: