# @title Install dependencies
!uv pip install -q diffusers["torch"]==0.35.1 transformers==4.56.2 accelerate==1.10.1Image-to-Image using Stable Diffusion 1.5
Load SD 1.5 model using pipeline
import torch
from diffusers import StableDiffusionImg2ImgPipeline
# Load the img2img pipeline
print("Loading Stable Diffusion img2img pipeline...")
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
dtype=torch.float16
)
pipe = pipe.to("cuda")Loading Stable Diffusion img2img pipeline...
Keyword arguments {'dtype': torch.float16} are not expected by StableDiffusionImg2ImgPipeline and will be ignored.
Generate Intermediate Images
from PIL import Image
import requests
from io import BytesIO
IMAGE_URL = "https://raw.githubusercontent.com/simonguest/CS-394/refs/heads/main/src/04/images/luna.jpg"
SEED = 128763
PROMPT = "a goldendoodle wearing sunglasses, high quality, detailed"
NEGATIVE_PROMPT = "blurry, low quality, distorted"
response = requests.get(IMAGE_URL)
init_image = Image.open(BytesIO(response.content)).convert("RGB")
# Resize to standard size for faster processing
init_image = init_image.resize((512, 712))
display(init_image)
Function to generate using strength param
def generate_image(strength):
return pipe(
prompt=PROMPT,
negative_prompt=NEGATIVE_PROMPT,
image=init_image,
strength=strength,
guidance_scale=7.5,
num_inference_steps=30,
generator=torch.Generator().manual_seed(SEED) if SEED != -1 else None,
).images[0]Strength 0.3
display(generate_image(0.3))
Strength 0.5
display(generate_image(0.5))
Strength 0.7
display(generate_image(0.7))
Strength 0.9
display(generate_image(0.9))