from fastapi import FastAPI, Response from fastapi.responses import FileResponse from pydantic import BaseModel import cv2 import subprocess import base64 class Item(BaseModel): prompt: str | None = None size: str | None = "512x512" num_inference_steps: int | None = 4 seed: int | None = 123456 def generate_image(item): print(item) # Parameters seed = item.seed num_inference_steps = item.num_inference_steps size = item.size prompt = item.prompt command = 'python ./run_rknn-lcm.py --seed {} -i ./model -o ./images --num-inference-steps {} -s {} --prompt "{}"'.format(seed, num_inference_steps, size, prompt) # Inference output = subprocess.run(command, shell=True, capture_output=True) print(output, "\n") # Path to image path_img = "./images/image.png" # glob.glob("./images/*")[0] # Read img = cv2.imread(path_img) return img app = FastAPI() @app.get("/") def read_root(): return {"Hello": "World"} @app.post("/image") def get_image(item: Item): # Generate image = generate_image(item) # Encode retval, buffer = cv2.imencode('.png', image) png_as_text = base64.b64encode(buffer) # Return return Response(png_as_text)