Batch処理
This commit is contained in:
parent
d67265aa39
commit
0fdff5423e
280
core/batch_processor.py
Normal file
280
core/batch_processor.py
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
"""
|
||||
Batch processor for sequential Generate+Bake across multiple VSE strips.
|
||||
|
||||
Uses timer-based async chaining so Blender's UI stays responsive.
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Callable, Any
|
||||
|
||||
# Lazy-imported inside Blender
|
||||
bpy = None
|
||||
|
||||
|
||||
class _DummyOperator:
|
||||
"""Dummy operator object for _start_bake_impl calls."""
|
||||
|
||||
def report(self, level, msg):
|
||||
print(f"[FaceMask] Batch: {msg}")
|
||||
|
||||
|
||||
class BatchProcessor:
|
||||
"""Manages sequential Generate Detection Cache → Bake across a list of strips."""
|
||||
|
||||
def __init__(self):
|
||||
self.is_running: bool = False
|
||||
self._mode: str = "full" # "full" or "mask_only"
|
||||
self._strip_names: List[str] = []
|
||||
self._current_idx: int = 0
|
||||
self._context: Any = None
|
||||
self._cancelled: bool = False
|
||||
self._results: List[dict] = []
|
||||
self._on_item_complete: Optional[Callable] = None # (idx, total, name, status)
|
||||
self._on_all_complete: Optional[Callable] = None # (results)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def start(self, context, strips, on_item_complete=None, on_all_complete=None, mode="full"):
|
||||
"""Start batch processing for the given strips.
|
||||
|
||||
mode:
|
||||
"full" - マスク生成(キャッシュなければ)→ Bake
|
||||
"mask_only" - キャッシュを無視してマスク生成のみ(Bakeしない)
|
||||
"""
|
||||
global bpy
|
||||
import bpy as _bpy
|
||||
bpy = _bpy
|
||||
|
||||
if self.is_running:
|
||||
raise RuntimeError("Batch already running")
|
||||
|
||||
self.is_running = True
|
||||
self._mode = mode
|
||||
self._strip_names = [s.name for s in strips]
|
||||
self._current_idx = 0
|
||||
self._context = context
|
||||
self._cancelled = False
|
||||
self._results = []
|
||||
self._on_item_complete = on_item_complete
|
||||
self._on_all_complete = on_all_complete
|
||||
|
||||
wm = context.window_manager
|
||||
wm.batch_current = 0
|
||||
wm.batch_total = len(self._strip_names)
|
||||
wm.batch_current_name = ""
|
||||
|
||||
bpy.app.timers.register(self._process_next, first_interval=0.0)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel batch. Stops currently running mask gen / bake."""
|
||||
self._cancelled = True
|
||||
from .async_generator import get_generator
|
||||
from .async_bake_generator import get_bake_generator
|
||||
gen = get_generator()
|
||||
bake_gen = get_bake_generator()
|
||||
if gen.is_running:
|
||||
gen.cancel()
|
||||
if bake_gen.is_running:
|
||||
bake_gen.cancel()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal: queue stepping
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _process_next(self):
|
||||
"""Process the next strip in the queue (called via timer)."""
|
||||
if self._cancelled:
|
||||
self._finish()
|
||||
return None
|
||||
|
||||
if self._current_idx >= len(self._strip_names):
|
||||
self._finish()
|
||||
return None
|
||||
|
||||
strip_name = self._strip_names[self._current_idx]
|
||||
seq_editor = self._context.scene.sequence_editor
|
||||
strip = seq_editor.strips.get(strip_name)
|
||||
|
||||
if strip is None:
|
||||
print(f"[FaceMask] Batch: strip not found, skipping: {strip_name}")
|
||||
self._results.append({"strip": strip_name, "status": "skipped"})
|
||||
if self._on_item_complete:
|
||||
self._on_item_complete(self._current_idx, len(self._strip_names), strip_name, "skipped")
|
||||
self._current_idx += 1
|
||||
bpy.app.timers.register(self._process_next, first_interval=0.0)
|
||||
return None
|
||||
|
||||
# Update wm progress labels
|
||||
wm = self._context.window_manager
|
||||
wm.batch_current = self._current_idx + 1
|
||||
wm.batch_current_name = strip_name
|
||||
for area in self._context.screen.areas:
|
||||
if area.type == "SEQUENCE_EDITOR":
|
||||
area.tag_redraw()
|
||||
|
||||
if self._mode == "mask_only":
|
||||
# キャッシュを無視して常にマスク生成(Bakeしない)
|
||||
self._start_mask_gen(strip)
|
||||
else:
|
||||
from .utils import check_detection_cache
|
||||
if not check_detection_cache(strip.name):
|
||||
self._start_mask_gen(strip)
|
||||
else:
|
||||
self._start_bake(strip)
|
||||
|
||||
return None # one-shot timer
|
||||
|
||||
def _schedule_next(self):
|
||||
bpy.app.timers.register(self._process_next, first_interval=0.0)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Mask generation
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _start_mask_gen(self, strip):
|
||||
from ..operators.generate_mask import start_mask_gen_for_strip
|
||||
|
||||
strip_name = strip.name
|
||||
|
||||
def on_complete(status, data):
|
||||
self._on_mask_done(strip_name, status, data)
|
||||
|
||||
def on_progress(current, total):
|
||||
wm = self._context.window_manager
|
||||
wm.mask_progress = current
|
||||
wm.mask_total = max(total, 1)
|
||||
for area in self._context.screen.areas:
|
||||
if area.type == "SEQUENCE_EDITOR":
|
||||
area.tag_redraw()
|
||||
|
||||
try:
|
||||
start_mask_gen_for_strip(self._context, strip, on_complete, on_progress)
|
||||
print(f"[FaceMask] Batch: started mask gen for {strip_name}")
|
||||
except Exception as e:
|
||||
print(f"[FaceMask] Batch: failed to start mask gen for {strip_name}: {e}")
|
||||
self._on_mask_done(strip_name, "error", str(e))
|
||||
|
||||
def _on_mask_done(self, strip_name, status, data):
|
||||
if self._cancelled or status == "cancelled":
|
||||
self._results.append({"strip": strip_name, "status": "cancelled"})
|
||||
self._finish()
|
||||
return
|
||||
|
||||
if status == "error":
|
||||
print(f"[FaceMask] Batch: mask gen failed for {strip_name}: {data}")
|
||||
self._results.append({"strip": strip_name, "status": "error", "reason": str(data)})
|
||||
if self._on_item_complete:
|
||||
self._on_item_complete(self._current_idx, len(self._strip_names), strip_name, "error")
|
||||
self._current_idx += 1
|
||||
self._schedule_next()
|
||||
return
|
||||
|
||||
# Mask gen succeeded
|
||||
if self._mode == "mask_only":
|
||||
# Bakeしない:結果を記録して次へ
|
||||
self._results.append({"strip": strip_name, "status": "done"})
|
||||
if self._on_item_complete:
|
||||
self._on_item_complete(self._current_idx, len(self._strip_names), strip_name, "done")
|
||||
self._current_idx += 1
|
||||
self._schedule_next()
|
||||
return
|
||||
|
||||
# full mode: proceed to bake
|
||||
seq_editor = self._context.scene.sequence_editor
|
||||
strip = seq_editor.strips.get(strip_name)
|
||||
if strip is None:
|
||||
print(f"[FaceMask] Batch: strip removed after mask gen: {strip_name}")
|
||||
self._results.append({"strip": strip_name, "status": "skipped"})
|
||||
if self._on_item_complete:
|
||||
self._on_item_complete(self._current_idx, len(self._strip_names), strip_name, "skipped")
|
||||
self._current_idx += 1
|
||||
self._schedule_next()
|
||||
return
|
||||
|
||||
self._start_bake(strip)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Bake
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _start_bake(self, strip):
|
||||
from .async_bake_generator import get_bake_generator
|
||||
from ..operators.apply_blur import _start_bake_impl
|
||||
|
||||
strip_name = strip.name
|
||||
|
||||
def on_complete_extra(status, data):
|
||||
self._on_bake_done(strip_name, status, data)
|
||||
|
||||
bake_gen = get_bake_generator()
|
||||
result = _start_bake_impl(
|
||||
_DummyOperator(),
|
||||
self._context,
|
||||
force=False,
|
||||
strip=strip,
|
||||
on_complete_extra=on_complete_extra,
|
||||
)
|
||||
|
||||
if result == {"CANCELLED"}:
|
||||
# Error starting bake
|
||||
print(f"[FaceMask] Batch: bake failed to start for {strip_name}")
|
||||
self._results.append({"strip": strip_name, "status": "error", "reason": "bake failed to start"})
|
||||
if self._on_item_complete:
|
||||
self._on_item_complete(self._current_idx, len(self._strip_names), strip_name, "error")
|
||||
self._current_idx += 1
|
||||
self._schedule_next()
|
||||
elif not bake_gen.is_running:
|
||||
# Cache hit: on_complete_extra was NOT called by _start_bake_impl
|
||||
print(f"[FaceMask] Batch: bake cache hit for {strip_name}")
|
||||
self._on_bake_done(strip_name, "done", None)
|
||||
|
||||
def _on_bake_done(self, strip_name, status, data):
|
||||
if self._cancelled or status == "cancelled":
|
||||
self._results.append({"strip": strip_name, "status": "cancelled"})
|
||||
self._finish()
|
||||
return
|
||||
|
||||
if status == "error":
|
||||
print(f"[FaceMask] Batch: bake failed for {strip_name}: {data}")
|
||||
self._results.append({"strip": strip_name, "status": "error", "reason": str(data)})
|
||||
else:
|
||||
self._results.append({"strip": strip_name, "status": "done"})
|
||||
print(f"[FaceMask] Batch: completed {strip_name}")
|
||||
|
||||
if self._on_item_complete:
|
||||
self._on_item_complete(self._current_idx, len(self._strip_names), strip_name, status)
|
||||
|
||||
self._current_idx += 1
|
||||
self._schedule_next()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Finish
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _finish(self):
|
||||
self.is_running = False
|
||||
wm = self._context.window_manager
|
||||
wm.batch_current = 0
|
||||
wm.batch_total = 0
|
||||
wm.batch_current_name = ""
|
||||
|
||||
print(f"[FaceMask] Batch: all done. Results: {self._results}")
|
||||
|
||||
if self._on_all_complete:
|
||||
self._on_all_complete(self._results)
|
||||
|
||||
for area in self._context.screen.areas:
|
||||
if area.type == "SEQUENCE_EDITOR":
|
||||
area.tag_redraw()
|
||||
|
||||
|
||||
# Singleton
|
||||
_batch_processor: Optional[BatchProcessor] = None
|
||||
|
||||
|
||||
def get_batch_processor() -> BatchProcessor:
|
||||
global _batch_processor
|
||||
if _batch_processor is None:
|
||||
_batch_processor = BatchProcessor()
|
||||
return _batch_processor
|
||||
|
|
@ -247,6 +247,23 @@ class InferenceClient:
|
|||
except urllib.error.HTTPError:
|
||||
return {"status": "unknown"}
|
||||
|
||||
def get_video_info(self, video_path: str) -> Dict[str, Any]:
|
||||
"""Get video metadata from the inference server."""
|
||||
if not self.is_server_running():
|
||||
self.start_server()
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{self.SERVER_URL}/video_info",
|
||||
data=json.dumps({"video_path": video_path}).encode("utf-8"),
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req) as response:
|
||||
return json.loads(response.read().decode("utf-8"))
|
||||
except urllib.error.HTTPError as e:
|
||||
raise RuntimeError(f"Server error: {e.read().decode('utf-8')}")
|
||||
|
||||
def bake_blur(
|
||||
self,
|
||||
video_path: str,
|
||||
|
|
|
|||
|
|
@ -83,6 +83,15 @@ def get_detections_path_for_strip(strip_name: str) -> str:
|
|||
return os.path.join(get_cache_dir_for_strip(strip_name), "detections.msgpack")
|
||||
|
||||
|
||||
def check_detection_cache(strip_name: str) -> bool:
|
||||
"""Detection cache ファイルが存在し有効かどうか確認する。"""
|
||||
path = get_detections_path_for_strip(strip_name)
|
||||
try:
|
||||
return os.path.exists(path) and os.path.getsize(path) > 0
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def get_cache_info(strip_name: Optional[str] = None) -> Tuple[str, int, int]:
|
||||
"""
|
||||
Get cache directory information.
|
||||
|
|
|
|||
|
|
@ -3,15 +3,18 @@
|
|||
from . import generate_mask
|
||||
from . import apply_blur
|
||||
from . import clear_cache
|
||||
from . import batch_bake
|
||||
|
||||
|
||||
def register():
|
||||
generate_mask.register()
|
||||
apply_blur.register()
|
||||
clear_cache.register()
|
||||
batch_bake.register()
|
||||
|
||||
|
||||
def unregister():
|
||||
batch_bake.unregister()
|
||||
clear_cache.unregister()
|
||||
apply_blur.unregister()
|
||||
generate_mask.unregister()
|
||||
|
|
|
|||
|
|
@ -50,11 +50,16 @@ def _set_strip_source(strip, filepath: str):
|
|||
_reload_movie_strip(strip)
|
||||
|
||||
|
||||
def _start_bake_impl(operator, context, force: bool = False):
|
||||
"""Bakeの共通実装。force=True でキャッシュを無視して再Bakeする。"""
|
||||
def _start_bake_impl(operator, context, force: bool = False, strip=None, on_complete_extra=None):
|
||||
"""Bakeの共通実装。force=True でキャッシュを無視して再Bakeする。
|
||||
|
||||
strip: 処理対象のstrip。None の場合は active_strip を使用。
|
||||
on_complete_extra: 非同期Bake完了時に追加で呼ばれるコールバック (status, data)。
|
||||
キャッシュヒット即時完了の場合は呼ばれない。
|
||||
"""
|
||||
seq_editor = context.scene.sequence_editor
|
||||
scene = context.scene
|
||||
video_strip = seq_editor.active_strip
|
||||
video_strip = strip if strip is not None else seq_editor.active_strip
|
||||
|
||||
video_path = bpy.path.abspath(video_strip.filepath)
|
||||
detections_path = get_detections_path_for_strip(video_strip.name)
|
||||
|
|
@ -128,6 +133,9 @@ def _start_bake_impl(operator, context, force: bool = False):
|
|||
if area.type == "SEQUENCE_EDITOR":
|
||||
area.tag_redraw()
|
||||
|
||||
if on_complete_extra:
|
||||
on_complete_extra(status, data)
|
||||
|
||||
def on_progress(current, total):
|
||||
wm.bake_progress = current
|
||||
wm.bake_total = max(total, 1)
|
||||
|
|
|
|||
191
operators/batch_bake.py
Normal file
191
operators/batch_bake.py
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
"""
|
||||
Batch Bake operator: sequentially Generate Detection Cache → Bake
|
||||
for all selected MOVIE strips in the VSE.
|
||||
"""
|
||||
|
||||
import os
|
||||
import bpy
|
||||
from bpy.props import IntProperty, StringProperty
|
||||
from bpy.types import Operator
|
||||
|
||||
from ..core.batch_processor import get_batch_processor
|
||||
from ..core.async_generator import get_generator as get_mask_generator
|
||||
from ..core.async_bake_generator import get_bake_generator
|
||||
from .apply_blur import KEY_ORIGINAL, KEY_MODE, _set_strip_source
|
||||
|
||||
|
||||
class SEQUENCER_OT_batch_bake_selected(Operator):
|
||||
"""Generate detection cache and bake blur for all selected MOVIE strips."""
|
||||
|
||||
bl_idname = "sequencer.batch_bake_selected"
|
||||
bl_label = "Batch Bake Selected"
|
||||
bl_description = "Generate detection cache and bake blur for all selected MOVIE strips"
|
||||
bl_options = {"REGISTER"}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
if not context.scene.sequence_editor:
|
||||
return False
|
||||
if get_batch_processor().is_running:
|
||||
return False
|
||||
if get_mask_generator().is_running:
|
||||
return False
|
||||
if get_bake_generator().is_running:
|
||||
return False
|
||||
seq_editor = context.scene.sequence_editor
|
||||
return any(s.select and s.type == "MOVIE" for s in seq_editor.strips)
|
||||
|
||||
def execute(self, context):
|
||||
seq_editor = context.scene.sequence_editor
|
||||
strips = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
|
||||
|
||||
if not strips:
|
||||
self.report({"WARNING"}, "No MOVIE strips selected")
|
||||
return {"CANCELLED"}
|
||||
|
||||
batch = get_batch_processor()
|
||||
|
||||
def on_item_complete(idx, total, strip_name, status):
|
||||
pass # wm properties already updated by BatchProcessor
|
||||
|
||||
def on_all_complete(results):
|
||||
done = sum(1 for r in results if r["status"] == "done")
|
||||
total = len(results)
|
||||
print(f"[FaceMask] Batch finished: {done}/{total} strips completed")
|
||||
|
||||
wm = context.window_manager
|
||||
wm.batch_current = 0
|
||||
wm.batch_total = len(strips)
|
||||
wm.batch_current_name = ""
|
||||
|
||||
batch.start(context, strips, on_item_complete=on_item_complete, on_all_complete=on_all_complete)
|
||||
self.report({"INFO"}, f"Batch bake started for {len(strips)} strips")
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
class SEQUENCER_OT_batch_regenerate_cache(Operator):
|
||||
"""Regenerate detection cache for all selected MOVIE strips (ignore existing cache)."""
|
||||
|
||||
bl_idname = "sequencer.batch_regenerate_cache"
|
||||
bl_label = "Batch Regenerate Cache"
|
||||
bl_description = "Regenerate detection cache for all selected MOVIE strips"
|
||||
bl_options = {"REGISTER"}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
if not context.scene.sequence_editor:
|
||||
return False
|
||||
if get_batch_processor().is_running:
|
||||
return False
|
||||
if get_mask_generator().is_running:
|
||||
return False
|
||||
if get_bake_generator().is_running:
|
||||
return False
|
||||
seq_editor = context.scene.sequence_editor
|
||||
return any(s.select and s.type == "MOVIE" for s in seq_editor.strips)
|
||||
|
||||
def execute(self, context):
|
||||
seq_editor = context.scene.sequence_editor
|
||||
strips = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
|
||||
|
||||
if not strips:
|
||||
self.report({"WARNING"}, "No MOVIE strips selected")
|
||||
return {"CANCELLED"}
|
||||
|
||||
batch = get_batch_processor()
|
||||
|
||||
def on_all_complete(results):
|
||||
done = sum(1 for r in results if r["status"] == "done")
|
||||
print(f"[FaceMask] Batch regenerate finished: {done}/{len(results)} strips")
|
||||
|
||||
batch.start(
|
||||
context,
|
||||
strips,
|
||||
on_all_complete=on_all_complete,
|
||||
mode="mask_only",
|
||||
)
|
||||
self.report({"INFO"}, f"Batch regenerate cache started for {len(strips)} strips")
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
class SEQUENCER_OT_batch_restore_original(Operator):
|
||||
"""Restore original source for all selected MOVIE strips."""
|
||||
|
||||
bl_idname = "sequencer.batch_restore_original"
|
||||
bl_label = "Batch Restore Original"
|
||||
bl_description = "Restore original source filepath for all selected MOVIE strips"
|
||||
bl_options = {"REGISTER", "UNDO"}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
if not context.scene.sequence_editor:
|
||||
return False
|
||||
if get_batch_processor().is_running:
|
||||
return False
|
||||
seq_editor = context.scene.sequence_editor
|
||||
return any(s.select and s.type == "MOVIE" for s in seq_editor.strips)
|
||||
|
||||
def execute(self, context):
|
||||
seq_editor = context.scene.sequence_editor
|
||||
strips = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
|
||||
|
||||
restored = 0
|
||||
skipped = 0
|
||||
for strip in strips:
|
||||
original_path = strip.get(KEY_ORIGINAL)
|
||||
if not original_path or not os.path.exists(original_path):
|
||||
skipped += 1
|
||||
continue
|
||||
if strip.get(KEY_MODE, "original") != "original":
|
||||
_set_strip_source(strip, original_path)
|
||||
strip[KEY_MODE] = "original"
|
||||
restored += 1
|
||||
|
||||
self.report(
|
||||
{"INFO"},
|
||||
f"Restored {restored} strip(s)"
|
||||
+ (f", skipped {skipped} (no original stored)" if skipped else ""),
|
||||
)
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
class SEQUENCER_OT_cancel_batch_bake(Operator):
|
||||
"""Cancel ongoing batch bake."""
|
||||
|
||||
bl_idname = "sequencer.cancel_batch_bake"
|
||||
bl_label = "Cancel Batch Bake"
|
||||
bl_description = "Cancel the current batch bake process"
|
||||
bl_options = {"REGISTER"}
|
||||
|
||||
def execute(self, context):
|
||||
batch = get_batch_processor()
|
||||
if batch.is_running:
|
||||
batch.cancel()
|
||||
self.report({"INFO"}, "Batch bake cancelled")
|
||||
else:
|
||||
self.report({"WARNING"}, "No batch bake in progress")
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
classes = [
|
||||
SEQUENCER_OT_batch_bake_selected,
|
||||
SEQUENCER_OT_batch_regenerate_cache,
|
||||
SEQUENCER_OT_batch_restore_original,
|
||||
SEQUENCER_OT_cancel_batch_bake,
|
||||
]
|
||||
|
||||
|
||||
def register():
|
||||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
bpy.types.WindowManager.batch_current = IntProperty(default=0)
|
||||
bpy.types.WindowManager.batch_total = IntProperty(default=0)
|
||||
bpy.types.WindowManager.batch_current_name = StringProperty(default="")
|
||||
|
||||
|
||||
def unregister():
|
||||
del bpy.types.WindowManager.batch_current_name
|
||||
del bpy.types.WindowManager.batch_total
|
||||
del bpy.types.WindowManager.batch_current
|
||||
for cls in reversed(classes):
|
||||
bpy.utils.unregister_class(cls)
|
||||
|
|
@ -7,11 +7,66 @@ from video strips in the Video Sequence Editor.
|
|||
|
||||
import os
|
||||
import bpy
|
||||
from bpy.props import IntProperty
|
||||
from bpy.props import IntProperty, BoolProperty
|
||||
from bpy.types import Operator
|
||||
|
||||
from ..core.async_generator import get_generator
|
||||
from ..core.utils import get_cache_dir_for_strip
|
||||
from ..core.inference_client import get_client
|
||||
from ..core.utils import get_cache_dir_for_strip, check_detection_cache
|
||||
|
||||
|
||||
def compute_strip_frame_range(strip, scene, client) -> tuple:
|
||||
"""(start_frame, end_frame, source_fps) を返す。失敗時は例外を送出。"""
|
||||
video_path = bpy.path.abspath(strip.filepath)
|
||||
video_info = client.get_video_info(video_path)
|
||||
total_video_frames = int(video_info.get("frame_count", 0))
|
||||
source_fps = float(video_info.get("fps", 0.0))
|
||||
if total_video_frames <= 0:
|
||||
raise ValueError(f"Could not read frame count from video: {video_path}")
|
||||
if source_fps <= 0:
|
||||
source_fps = scene.render.fps / scene.render.fps_base
|
||||
project_fps = scene.render.fps / scene.render.fps_base
|
||||
fps_ratio = source_fps / project_fps
|
||||
start_frame = int(round(strip.frame_offset_start * fps_ratio))
|
||||
end_frame = start_frame + int(round(strip.frame_final_duration * fps_ratio)) - 1
|
||||
start_frame = max(0, min(start_frame, total_video_frames - 1))
|
||||
end_frame = max(start_frame, min(end_frame, total_video_frames - 1))
|
||||
return start_frame, end_frame, source_fps
|
||||
|
||||
|
||||
def start_mask_gen_for_strip(context, strip, on_complete, on_progress):
|
||||
"""Strip のマスク生成を開始する共通処理。
|
||||
|
||||
generator.is_running 等のエラー時は例外を送出する。
|
||||
wm.mask_progress / mask_total を初期化してから generator.start() を呼ぶ。
|
||||
"""
|
||||
scene = context.scene
|
||||
wm = context.window_manager
|
||||
generator = get_generator()
|
||||
|
||||
if generator.is_running:
|
||||
raise RuntimeError("Mask generation already in progress")
|
||||
|
||||
client = get_client()
|
||||
start_frame, end_frame, source_fps = compute_strip_frame_range(strip, scene, client)
|
||||
|
||||
output_dir = get_cache_dir_for_strip(strip.name)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
wm.mask_progress = 0
|
||||
wm.mask_total = end_frame - start_frame + 1
|
||||
|
||||
generator.start(
|
||||
video_path=bpy.path.abspath(strip.filepath),
|
||||
output_dir=output_dir,
|
||||
start_frame=start_frame,
|
||||
end_frame=end_frame,
|
||||
fps=source_fps,
|
||||
conf_threshold=scene.facemask_conf_threshold,
|
||||
iou_threshold=scene.facemask_iou_threshold,
|
||||
on_complete=on_complete,
|
||||
on_progress=on_progress,
|
||||
)
|
||||
|
||||
|
||||
class SEQUENCER_OT_generate_face_mask(Operator):
|
||||
|
|
@ -22,62 +77,42 @@ class SEQUENCER_OT_generate_face_mask(Operator):
|
|||
bl_description = "Detect faces and generate mask image sequence"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
force: BoolProperty(
|
||||
name="Force Regenerate",
|
||||
description="既存のキャッシュを無視して再生成する",
|
||||
default=False,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
"""Check if operator can run."""
|
||||
if not context.scene.sequence_editor:
|
||||
return False
|
||||
|
||||
strip = context.scene.sequence_editor.active_strip
|
||||
if not strip:
|
||||
return False
|
||||
|
||||
return strip.type in {'MOVIE', 'IMAGE'}
|
||||
|
||||
def execute(self, context):
|
||||
strip = context.scene.sequence_editor.active_strip
|
||||
scene = context.scene
|
||||
|
||||
# Get video path
|
||||
# ファイル存在確認
|
||||
if strip.type == 'MOVIE':
|
||||
video_path = bpy.path.abspath(strip.filepath)
|
||||
else:
|
||||
# Image sequence - get directory
|
||||
video_path = bpy.path.abspath(strip.directory)
|
||||
|
||||
if not os.path.exists(video_path):
|
||||
self.report({'ERROR'}, f"Video file not found: {video_path}")
|
||||
return {'CANCELLED'}
|
||||
|
||||
# Determine output directory
|
||||
output_dir = self._get_cache_dir(context, strip)
|
||||
|
||||
# Check cache - if masks already exist, use them
|
||||
expected_frame_count = strip.frame_final_end - strip.frame_final_start + 1
|
||||
if self._check_cache(output_dir, expected_frame_count):
|
||||
self.report({'INFO'}, f"Using cached detections from {output_dir}")
|
||||
# キャッシュ確認(force=True の場合はスキップ)
|
||||
if not self.force and check_detection_cache(strip.name):
|
||||
self.report({'INFO'}, f"Using cached detections for {strip.name}")
|
||||
return {'FINISHED'}
|
||||
|
||||
# 動画の実際のフレーム数を取得(Blenderプロジェクトのfpsと動画のfpsが
|
||||
# 異なる場合にタイムライン上のフレーム数では不足するため)
|
||||
import cv2 as _cv2
|
||||
_cap = _cv2.VideoCapture(video_path)
|
||||
total_video_frames = int(_cap.get(_cv2.CAP_PROP_FRAME_COUNT))
|
||||
fps = _cap.get(_cv2.CAP_PROP_FPS) or (scene.render.fps / scene.render.fps_base)
|
||||
_cap.release()
|
||||
if total_video_frames <= 0:
|
||||
self.report({'ERROR'}, f"Could not read frame count from video: {video_path}")
|
||||
return {'CANCELLED'}
|
||||
|
||||
# Start async generation
|
||||
generator = get_generator()
|
||||
|
||||
if generator.is_running:
|
||||
self.report({'WARNING'}, "Mask generation already in progress")
|
||||
return {'CANCELLED'}
|
||||
|
||||
def on_complete(status, data):
|
||||
"""Called when mask generation completes."""
|
||||
wm = context.window_manager
|
||||
wm.mask_total = max(wm.mask_total, generator.total_frames)
|
||||
if status == "done":
|
||||
|
|
@ -97,83 +132,25 @@ class SEQUENCER_OT_generate_face_mask(Operator):
|
|||
area.tag_redraw()
|
||||
|
||||
def on_progress(current, total):
|
||||
"""Called on progress updates."""
|
||||
# Update window manager properties for UI
|
||||
wm = context.window_manager
|
||||
wm.mask_progress = current
|
||||
wm.mask_total = total
|
||||
|
||||
# Force UI redraw
|
||||
for area in context.screen.areas:
|
||||
if area.type == 'SEQUENCE_EDITOR':
|
||||
area.tag_redraw()
|
||||
|
||||
# Initialize progress
|
||||
wm = context.window_manager
|
||||
wm.mask_progress = 0
|
||||
wm.mask_total = total_video_frames
|
||||
|
||||
# Get parameters from scene properties
|
||||
conf_threshold = scene.facemask_conf_threshold
|
||||
iou_threshold = scene.facemask_iou_threshold
|
||||
|
||||
# Start generation
|
||||
generator.start(
|
||||
video_path=video_path,
|
||||
output_dir=output_dir,
|
||||
start_frame=0,
|
||||
end_frame=total_video_frames - 1,
|
||||
fps=fps,
|
||||
conf_threshold=conf_threshold,
|
||||
iou_threshold=iou_threshold,
|
||||
on_complete=on_complete,
|
||||
on_progress=on_progress,
|
||||
)
|
||||
try:
|
||||
start_mask_gen_for_strip(context, strip, on_complete, on_progress)
|
||||
except RuntimeError as e:
|
||||
self.report({'WARNING'}, str(e))
|
||||
return {'CANCELLED'}
|
||||
except Exception as e:
|
||||
self.report({'ERROR'}, f"Failed to start mask generation: {e}")
|
||||
return {'CANCELLED'}
|
||||
|
||||
self.report({'INFO'}, f"Started mask generation for {strip.name}")
|
||||
return {'FINISHED'}
|
||||
|
||||
def _get_cache_dir(self, context, strip) -> str:
|
||||
"""Get or create cache directory for mask images."""
|
||||
cache_dir = get_cache_dir_for_strip(strip.name)
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
return cache_dir
|
||||
|
||||
def _check_cache(self, cache_dir: str, expected_frames: int) -> bool:
|
||||
"""Check if cached masks exist and are complete.
|
||||
|
||||
Args:
|
||||
cache_dir: Path to cache directory
|
||||
expected_frames: Number of frames expected
|
||||
|
||||
Returns:
|
||||
True if cache exists and is valid
|
||||
"""
|
||||
if not os.path.exists(cache_dir):
|
||||
return False
|
||||
|
||||
detections_path = os.path.join(cache_dir, "detections.msgpack")
|
||||
if not os.path.exists(detections_path):
|
||||
return False
|
||||
|
||||
# Quick sanity check: non-empty file
|
||||
try:
|
||||
if os.path.getsize(detections_path) <= 0:
|
||||
return False
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
# Optional frame count verification if msgpack is available
|
||||
try:
|
||||
import msgpack
|
||||
|
||||
with open(detections_path, "rb") as f:
|
||||
payload = msgpack.unpackb(f.read(), raw=False)
|
||||
frames = payload.get("frames", [])
|
||||
return len(frames) >= expected_frames * 0.9
|
||||
except Exception:
|
||||
return True
|
||||
|
||||
|
||||
class SEQUENCER_OT_cancel_mask_generation(Operator):
|
||||
"""Cancel ongoing mask generation."""
|
||||
|
|
@ -206,13 +183,11 @@ def register():
|
|||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
|
||||
# Add progress properties to window manager
|
||||
bpy.types.WindowManager.mask_progress = IntProperty(default=0)
|
||||
bpy.types.WindowManager.mask_total = IntProperty(default=0)
|
||||
|
||||
|
||||
def unregister():
|
||||
# Remove properties
|
||||
del bpy.types.WindowManager.mask_progress
|
||||
del bpy.types.WindowManager.mask_total
|
||||
|
||||
|
|
|
|||
|
|
@ -11,11 +11,12 @@ from bpy.types import Panel
|
|||
|
||||
from ..core.async_bake_generator import get_bake_generator
|
||||
from ..core.async_generator import get_generator
|
||||
from ..core.batch_processor import get_batch_processor
|
||||
from ..core.utils import (
|
||||
get_server_status,
|
||||
get_cache_info,
|
||||
format_size,
|
||||
get_detections_path_for_strip,
|
||||
check_detection_cache,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -35,9 +36,15 @@ class SEQUENCER_PT_face_mask(Panel):
|
|||
seq_editor = context.scene.sequence_editor
|
||||
# Note: Blender 5.0 uses 'strips' instead of 'sequences'
|
||||
|
||||
batch = get_batch_processor()
|
||||
generator = get_generator()
|
||||
bake_generator = get_bake_generator()
|
||||
|
||||
# Batch progress (highest priority)
|
||||
if batch.is_running:
|
||||
self._draw_batch_progress(layout, wm, batch, generator, bake_generator)
|
||||
return
|
||||
|
||||
# Show progress if generating masks
|
||||
if generator.is_running:
|
||||
self._draw_progress(layout, wm, generator)
|
||||
|
|
@ -65,6 +72,7 @@ class SEQUENCER_PT_face_mask(Panel):
|
|||
self._draw_parameters(layout, scene)
|
||||
self._draw_server_status(layout)
|
||||
self._draw_cache_info(layout, context, seq_editor)
|
||||
self._draw_batch_controls(layout, context, seq_editor)
|
||||
|
||||
def _draw_parameters(self, layout, scene):
|
||||
"""Draw detection parameters."""
|
||||
|
|
@ -182,6 +190,74 @@ class SEQUENCER_PT_face_mask(Panel):
|
|||
icon='CANCEL',
|
||||
)
|
||||
|
||||
def _draw_batch_progress(self, layout, wm, batch, generator, bake_generator):
|
||||
"""Draw batch bake progress."""
|
||||
box = layout.box()
|
||||
if batch._mode == "mask_only":
|
||||
box.label(text="Batch Generating Cache...", icon='RENDER_ANIMATION')
|
||||
else:
|
||||
box.label(text="Batch Baking...", icon='RENDER_ANIMATION')
|
||||
|
||||
# Overall progress
|
||||
total = max(wm.batch_total, 1)
|
||||
# Show n-1/total while current strip is in progress, n/total when moving to next
|
||||
done_count = max(wm.batch_current - 1, 0)
|
||||
overall_factor = done_count / total
|
||||
box.progress(
|
||||
factor=overall_factor,
|
||||
text=f"{wm.batch_current} / {wm.batch_total}",
|
||||
)
|
||||
|
||||
if wm.batch_current_name:
|
||||
box.label(text=f"Strip: {wm.batch_current_name}")
|
||||
|
||||
# Inner progress (mask gen or bake)
|
||||
if generator.is_running:
|
||||
inner = wm.mask_progress / max(wm.mask_total, 1)
|
||||
box.progress(
|
||||
factor=inner,
|
||||
text=f"Detecting: {wm.mask_progress} / {wm.mask_total}",
|
||||
)
|
||||
elif bake_generator.is_running:
|
||||
inner = wm.bake_progress / max(wm.bake_total, 1)
|
||||
box.progress(
|
||||
factor=inner,
|
||||
text=f"Baking: {wm.bake_progress} / {wm.bake_total}",
|
||||
)
|
||||
|
||||
box.operator(
|
||||
"sequencer.cancel_batch_bake",
|
||||
text="Cancel Batch",
|
||||
icon='CANCEL',
|
||||
)
|
||||
|
||||
def _draw_batch_controls(self, layout, context, seq_editor):
|
||||
"""Draw batch bake button when multiple MOVIE strips are selected."""
|
||||
if not seq_editor:
|
||||
return
|
||||
selected_movies = [s for s in seq_editor.strips if s.select and s.type == "MOVIE"]
|
||||
if not selected_movies:
|
||||
return
|
||||
count = len(selected_movies)
|
||||
label = f"Batch ({count} strip{'s' if count > 1 else ''} selected)"
|
||||
box = layout.box()
|
||||
box.label(text=label, icon='RENDER_ANIMATION')
|
||||
box.operator(
|
||||
"sequencer.batch_bake_selected",
|
||||
text="Batch Bake Selected",
|
||||
icon='RENDER_ANIMATION',
|
||||
)
|
||||
box.operator(
|
||||
"sequencer.batch_regenerate_cache",
|
||||
text="Batch Regenerate Cache",
|
||||
icon='FILE_REFRESH',
|
||||
)
|
||||
box.operator(
|
||||
"sequencer.batch_restore_original",
|
||||
text="Batch Restore Original",
|
||||
icon='LOOP_BACK',
|
||||
)
|
||||
|
||||
def _draw_generation_controls(self, layout, context, strip):
|
||||
"""Draw mask generation controls."""
|
||||
box = layout.box()
|
||||
|
|
@ -191,31 +267,33 @@ class SEQUENCER_PT_face_mask(Panel):
|
|||
row = box.row()
|
||||
row.label(text=f"Strip: {strip.name}")
|
||||
|
||||
detections_path = get_detections_path_for_strip(strip.name)
|
||||
has_mask = bpy.path.abspath(detections_path) and os.path.exists(
|
||||
bpy.path.abspath(detections_path)
|
||||
)
|
||||
has_mask = check_detection_cache(strip.name)
|
||||
|
||||
if has_mask:
|
||||
row = box.row()
|
||||
row.label(text="✓ Detection cache exists", icon='CHECKMARK')
|
||||
|
||||
# Generate button
|
||||
box.operator(
|
||||
"sequencer.generate_face_mask",
|
||||
text="Generate Detection Cache" if not has_mask else "Regenerate Cache",
|
||||
icon='FACE_MAPS',
|
||||
)
|
||||
# Generate / Regenerate button
|
||||
if not has_mask:
|
||||
box.operator(
|
||||
"sequencer.generate_face_mask",
|
||||
text="Generate Detection Cache",
|
||||
icon='FACE_MAPS',
|
||||
)
|
||||
else:
|
||||
op = box.operator(
|
||||
"sequencer.generate_face_mask",
|
||||
text="Regenerate Cache",
|
||||
icon='FILE_REFRESH',
|
||||
)
|
||||
op.force = True
|
||||
|
||||
def _draw_blur_controls(self, layout, context, strip):
|
||||
"""Draw blur application controls."""
|
||||
box = layout.box()
|
||||
box.label(text="Blur Bake", icon='MATFLUID')
|
||||
|
||||
detections_path = get_detections_path_for_strip(strip.name)
|
||||
has_mask = bpy.path.abspath(detections_path) and os.path.exists(
|
||||
bpy.path.abspath(detections_path)
|
||||
)
|
||||
has_mask = check_detection_cache(strip.name)
|
||||
|
||||
if not has_mask:
|
||||
box.label(text="Generate detection cache first", icon='INFO')
|
||||
|
|
|
|||
|
|
@ -85,6 +85,10 @@ class GenerateRequest(BaseModel):
|
|||
iou_threshold: float = 0.45
|
||||
|
||||
|
||||
class VideoInfoRequest(BaseModel):
|
||||
video_path: str
|
||||
|
||||
|
||||
class BakeRequest(BaseModel):
|
||||
video_path: str
|
||||
detections_path: str
|
||||
|
|
@ -397,6 +401,9 @@ def process_bake_task(task_id: str, req: BakeRequest):
|
|||
tasks[task_id].message = "Invalid detections format: 'frames' is missing"
|
||||
return
|
||||
|
||||
# 検出キャッシュの開始フレーム(ソース動画のフレームインデックス)
|
||||
det_start_frame = int(payload.get("start_frame", 0))
|
||||
|
||||
# Get video info
|
||||
temp_cap = cv2.VideoCapture(req.video_path)
|
||||
if not temp_cap.isOpened():
|
||||
|
|
@ -415,7 +422,8 @@ def process_bake_task(task_id: str, req: BakeRequest):
|
|||
tasks[task_id].message = "Invalid source video dimensions"
|
||||
return
|
||||
|
||||
total = min(src_frames, len(frames_detections)) if src_frames > 0 else len(frames_detections)
|
||||
# ソース動画の全フレームを出力(スワップ後もトリム設定が正しく機能するよう)
|
||||
total = src_frames if src_frames > 0 else (det_start_frame + len(frames_detections))
|
||||
if total <= 0:
|
||||
tasks[task_id].status = TaskStatus.FAILED
|
||||
tasks[task_id].message = "Source/detections frame count is zero"
|
||||
|
|
@ -504,7 +512,8 @@ def process_bake_task(task_id: str, req: BakeRequest):
|
|||
break
|
||||
|
||||
idx, frame = item
|
||||
frame_boxes = frames_detections[idx] if idx < len(frames_detections) else []
|
||||
det_idx = idx - det_start_frame
|
||||
frame_boxes = frames_detections[det_idx] if 0 <= det_idx < len(frames_detections) else []
|
||||
|
||||
if not frame_boxes:
|
||||
process_queue.put((idx, frame))
|
||||
|
|
@ -839,6 +848,32 @@ def get_status():
|
|||
"rocm_version": gpu_info["rocm_version"]
|
||||
}
|
||||
|
||||
|
||||
@app.post("/video_info")
|
||||
def get_video_info(req: VideoInfoRequest):
|
||||
if not os.path.exists(req.video_path):
|
||||
raise HTTPException(status_code=404, detail=f"Video not found: {req.video_path}")
|
||||
|
||||
cap = cv2.VideoCapture(req.video_path)
|
||||
if not cap.isOpened():
|
||||
raise HTTPException(status_code=400, detail="Failed to open video")
|
||||
|
||||
try:
|
||||
fps = float(cap.get(cv2.CAP_PROP_FPS) or 0.0)
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) or 0)
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) or 0)
|
||||
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
|
||||
finally:
|
||||
cap.release()
|
||||
|
||||
return {
|
||||
"video_path": req.video_path,
|
||||
"fps": fps,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"frame_count": frame_count,
|
||||
}
|
||||
|
||||
@app.post("/generate", response_model=Task)
|
||||
def generate_mask_endpoint(req: GenerateRequest, background_tasks: BackgroundTasks):
|
||||
task_id = str(uuid.uuid4())
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user