Skip to content

Instantly share code, notes, and snippets.

@JupyterJones
Created August 11, 2025 02:07
Show Gist options
  • Save JupyterJones/18c0589261d2fdee35eec0d5dfaa8366 to your computer and use it in GitHub Desktop.
Save JupyterJones/18c0589261d2fdee35eec0d5dfaa8366 to your computer and use it in GitHub Desktop.
create subtle head movement jpg to mp4
import cv2
import dlib
import numpy as np
from icecream import ic
from moviepy.editor import ImageSequenceClip
from sys import argv
import sys
import os
"""
# Face Subtle Motion Video Generator
This Python script takes a **static image** containing a face, detects the face landmarks, and applies **subtle natural motion** to the face region over time.
The result is exported as an MP4 video with smooth, gentle movement β€” giving the illusion of a live recording from a still photo.
---
## ✨ Features
- **Automatic face detection** using `dlib`.
- **Smooth sinusoidal motion** with slight random drift for realism.
- **Customizable amplitude, frequency, and feather blending** for a natural look.
- **Feathered mask blending** so movement integrates seamlessly into the original image.
- **Debug output** using `icecream` for easier tuning.
- Outputs a **high-quality MP4 video** without audio.
---
## πŸ“¦ Dependencies
Install the required Python packages:
```bash
pip install opencv-python dlib numpy icecream moviepy
Note: dlib requires CMake and a C++ build environment installed on your system.
On Ubuntu/Debian:
sudo apt update
sudo apt install cmake build-essential python3-dev
βš™οΈ Additional Files Required
You must have the shape_predictor_68_face_landmarks.dat file for face landmark detection.
Download it from the official dlib model repository:
Direct Download: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
Extract it:
bunzip2 shape_predictor_68_face_landmarks.dat.bz2
Place it in your project folder and update the PREDICTOR_PATH variable in the script to point to it.
πŸš€ Usage
Place your input image in a folder, e.g., cavemen/.
Update the IMAGE_PATH variable in the script:
IMAGE_PATH = "cavemen/your_image.jpg"
Update the OUTPUT_PATH to your desired video file name:
OUTPUT_PATH = "cavemen/output_video.mp4"
Run the script:
python3 face_motion.py
πŸ”§ Adjustable Parameters
Inside the script, you can tweak these variables for different motion effects:
VIDEO_LENGTH_SECONDS = 20 # total video duration
FPS = 30 # frames per second
BASE_AMPLITUDE_X = 8.0 # horizontal movement in pixels
BASE_AMPLITUDE_Y = 5.0 # vertical movement in pixels
BASE_FREQ_X = 0.08 # horizontal motion frequency
BASE_FREQ_Y = 0.12 # vertical motion frequency
FEATHER_PIXELS = 60 # softness of mask edge
RANDOM_DRIFT_STRENGTH = 0.5
DRIFT_SPEED = 0.003
ROI_MARGIN = 150 # extra margin around detected face
πŸ“‚ Output
The output is an MP4 file (H.264 codec) without audio, located at OUTPUT_PATH.
Example:
Saving video...
Moviepy - Building video cavemen/image_output_.mp4
Done
πŸ“ Notes
For more visible movement, increase BASE_AMPLITUDE_X and BASE_AMPLITUDE_Y.
For smoother blending, increase FEATHER_PIXELS (but too high can hide motion).
The script is optimized for portraits; performance may vary for non-face images.
Works best with high-resolution images where the face occupies a large portion of the frame.
πŸ“œ License
This script is provided "as-is" without warranty. You may modify and use it for personal or commercial projects.
# ===== Adjustable variables =====
IMAGE_PATH = "cavemen/1080_2025-08-10-08:421754786576.jpg"
OUTPUT_PATH = "cavemen/image_output_.mp4"
PREDICTOR_PATH = "/home/jack/shape_predictor_68_face_landmarks.dat"
VIDEO_LENGTH_SECONDS = 20
"""
def generate_head_movement(IMAGE_PATH,OUTPUT_PATH,PREDICTOR_PATH,VIDEO_LENGTH_SECONDS):
FPS = 30
BASE_AMPLITUDE_X = 8.0 # noticeable horizontal motion
BASE_AMPLITUDE_Y = 5.0 # noticeable vertical motion
BASE_FREQ_X = 0.08 # horizontal wave
BASE_FREQ_Y = 0.12 # vertical wave
FEATHER_PIXELS = 60 # smaller feather to let movement show
'''
EXAMPLES
BASE_AMPLITUDE_X = 15.0
BASE_AMPLITUDE_Y = 10.0
FEATHER_PIXELS = 40
#BASE_AMPLITUDE_X = 1.0 # horizontal motion in pixels
#BASE_AMPLITUDE_Y = 0.8 # vertical motion in pixels
BASE_FREQ_X = 0.08 # slow horizontal frequency
BASE_FREQ_Y = 0.12 # slow vertical frequency
'''
RANDOM_DRIFT_STRENGTH = 0.5 # mild randomness
DRIFT_SPEED = 0.003 # slow drift change
#FEATHER_PIXELS = 180 # wide feather for smooth blend
ROI_MARGIN = 150 # extra margin around face
# ===== Load predictor =====
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
# ===== Load image =====
img = cv2.imread(IMAGE_PATH)
if img is None:
raise FileNotFoundError(f"Could not load image {IMAGE_PATH}")
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w = img_rgb.shape[:2]
ic(h, w)
# ===== Detect face landmarks =====
faces = detector(img_rgb)
if not faces:
raise RuntimeError("No face detected in image")
face = faces[0]
landmarks = predictor(img_rgb, face)
# ===== Get bounding box with margin =====
xs = [landmarks.part(i).x for i in range(68)]
ys = [landmarks.part(i).y for i in range(68)]
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
x1 = max(min_x - ROI_MARGIN, 0)
y1 = max(min_y - ROI_MARGIN, 0)
x2 = min(max_x + ROI_MARGIN, w)
y2 = min(max_y + ROI_MARGIN, h)
roi_w = x2 - x1
roi_h = y2 - y1
ic(x1, y1, x2, y2, roi_w, roi_h)
# ===== Create feather mask =====
mask = np.zeros((roi_h, roi_w), dtype=np.float32)
cv2.ellipse(mask, (roi_w//2, roi_h//2),
(roi_w//2 - FEATHER_PIXELS, roi_h//2 - FEATHER_PIXELS),
0, 0, 360, 1, -1)
mask = cv2.GaussianBlur(mask, (0, 0), FEATHER_PIXELS)
mask = mask[..., np.newaxis]
# ===== Generate frames =====
frames = []
total_frames = VIDEO_LENGTH_SECONDS * FPS
amp_x, amp_y = BASE_AMPLITUDE_X, BASE_AMPLITUDE_Y
freq_x, freq_y = BASE_FREQ_X, BASE_FREQ_Y
for frame_idx in range(total_frames):
t = frame_idx / FPS
# Smooth sinusoidal motion with drift
dx = amp_x * np.sin(2 * np.pi * freq_x * t)
dy = amp_y * np.sin(2 * np.pi * freq_y * t)
# Apply slow random drift
amp_x += (np.random.rand() - 0.5) * RANDOM_DRIFT_STRENGTH * DRIFT_SPEED
amp_y += (np.random.rand() - 0.5) * RANDOM_DRIFT_STRENGTH * DRIFT_SPEED
freq_x += (np.random.rand() - 0.5) * RANDOM_DRIFT_STRENGTH * DRIFT_SPEED
freq_y += (np.random.rand() - 0.5) * RANDOM_DRIFT_STRENGTH * DRIFT_SPEED
# Clamp to avoid runaway drift
amp_x = np.clip(amp_x, BASE_AMPLITUDE_X * 0.5, BASE_AMPLITUDE_X * 1.5)
amp_y = np.clip(amp_y, BASE_AMPLITUDE_Y * 0.5, BASE_AMPLITUDE_Y * 1.5)
freq_x = np.clip(freq_x, BASE_FREQ_X * 0.5, BASE_FREQ_X * 1.5)
freq_y = np.clip(freq_y, BASE_FREQ_Y * 0.5, BASE_FREQ_Y * 1.5)
# Shift ROI
roi = img_rgb[y1:y2, x1:x2]
M = np.float32([[1, 0, dx], [0, 1, dy]])
shifted_roi = cv2.warpAffine(roi, M, (roi_w, roi_h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
# Blend shifted ROI into original image
frame = img_rgb.copy()
blended_roi = (shifted_roi * mask + roi * (1 - mask)).astype(np.uint8)
frame[y1:y2, x1:x2] = blended_roi
frames.append(frame)
# ===== Save video =====
ic("Saving video...")
clip = ImageSequenceClip(frames, fps=FPS)
clip.write_videofile(OUTPUT_PATH, codec="libx264", audio=False)
ic("Done")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python your_script.py <image_path>")
sys.exit(1)
IMAGE_PATH = sys.argv[1] # image file path
base, _ = os.path.splitext(IMAGE_PATH) # ('path/to/file', '.ext')
OUTPUT_PATH = f"{base}.mp4" # replace ext with .mp4
PREDICTOR_PATH = "/home/jack/shape_predictor_68_face_landmarks.dat"
VIDEO_LENGTH_SECONDS = 36
generate_head_movement(IMAGE_PATH, OUTPUT_PATH, PREDICTOR_PATH,VIDEO_LENGTH_SECONDS)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment