Files
matitos_news/app_cv/Demo.ipynb
2025-04-30 15:50:54 +02:00

158 lines
4.5 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import base64\n",
"import json\n",
"import requests\n",
"import io\n",
"import numpy as np\n",
"import PIL.Image\n",
"import cv2\n",
"from pprint import pprint\n",
"\n",
"def process_image(path_img):\n",
" with open(path_img, \"rb\") as image_file:\n",
" encoded_string = base64.b64encode(image_file.read()).decode('utf-8')\n",
" response = requests.post(\n",
" 'http://localhost:5000/process',\n",
" headers={'Content-Type': 'application/json'},\n",
" data=json.dumps({'image': encoded_string})\n",
" )\n",
" response_dict = response.json()\n",
" pprint(response_dict)\n",
" # Decode\n",
" image_bytes = base64.b64decode(response_dict.get(\"image_b64\"))\n",
" img_array = np.frombuffer(io.BytesIO(image_bytes).getvalue(), dtype=np.uint8)\n",
" img_bgr = cv2.imdecode(img_array, cv2.IMREAD_COLOR)\n",
" img_rgb = img_bgr[:, :, ::-1]\n",
" return img_rgb"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"path_img = \"imgs/img_1p.jpg\"\n",
"PIL.Image.fromarray( process_image(path_img) )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"path_img = \"imgs/img_nude.jpg\"\n",
"PIL.Image.fromarray( process_image(path_img) )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"'''\n",
"# !git clone https://github.com/wildchlamydia/mivolo\n",
"# !pip install ultralytics yt_dlp pandas scipy timm==0.8.13.dev0\n",
"# !pip install ./mivolo\n",
"\n",
"!python mivolo/demo.py \\\n",
" --input \"face_data/sample_image.jpg\" \\\n",
" --output \"output\" \\\n",
" --detector-weights \"mivolo/pretrained/yolov8x_person_face.pt\" \\\n",
" --checkpoint \"mivolo/pretrained/model_imdb_cross_person_4.22_99.46.pth.tar\" \\\n",
" --device \"cpu\" \\\n",
" --draw\n",
"'''\n",
"\n",
"'''\n",
"# !git clone https://github.com/Kartik-3004/facexformer.git\n",
"# !pip install huggingface_hub torch torchvision torchaudio opencv-python facenet_pytorch\n",
"from huggingface_hub import hf_hub_download\n",
"hf_hub_download(repo_id=\"kartiknarayan/facexformer\", filename=\"ckpts/model.pt\", local_dir=\"./facexformer\")\n",
"\n",
"!python facexformer/inference.py \\\n",
" --model_path facexformer/ckpts/model.pt \\\n",
" --image_path face_data/sample_image.jpg \\\n",
" --results_path face_data \\\n",
" --task parsing\n",
" x\n",
"!python facexformer/inference.py \\\n",
" --model_path facexformer/ckpts/model.pt \\\n",
" --image_path face_data/face.png \\\n",
" --results_path face_data \\\n",
" --task landmarks\n",
"\n",
"!python facexformer/inference.py \\\n",
" --model_path facexformer/ckpts/model.pt \\\n",
" --image_path face_data/face.png \\\n",
" --results_path face_data \\\n",
" --task headpose\n",
"\n",
"!python facexformer/inference.py \\\n",
" --model_path facexformer/ckpts/model.pt \\\n",
" --image_path face_data/face.png \\\n",
" --results_path face_data \\\n",
" --task attributes\n",
"\n",
"!python facexformer/inference.py \\\n",
" --model_path facexformer/ckpts/model.pt \\\n",
" --image_path face_data/face.png \\\n",
" --results_path face_data \\\n",
" --task age_gender_race\n",
"'''"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "matitos_cv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}