-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain2.py
88 lines (62 loc) · 2.09 KB
/
main2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import fastapi
from fastapi import FastAPI, File, UploadFile
import cv2
import numpy as np
from PIL import Image
from io import BytesIO
import keras
import uvicorn
import os
# from pydantic import BaseModel
app = FastAPI()
import requests
API_URL = "https://api-inference.huggingface.co/models/google/gemma-7b"
headers = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
model = keras.models.load_model("Ai714")
def read_imagefile(file) -> Image.Image:
image = Image.open(BytesIO(file))
return image
def preprocessing(image: Image.Image):
# Convert PIL image to NumPy array
image_np = np.array(image)
# Resize the image
resized_image = cv2.resize(image_np, (420, 420))
# Add a channel dimension to match model input shape
resized_image = np.expand_dims(resized_image, axis=0)
return resized_image
@app.get('/')
def index():
return{'message':'hello world'}
@app.post("/predict/")
async def predict_image(name: str,file: UploadFile = File(...)):
# Load the model
# Read the uploaded image file
image = read_imagefile(await file.read())
# Preprocess the image
image = preprocessing(image)
# Maprediction
predictione = model.predict(image)
max_idx=predictione.argmax()
x=""
if(max_idx==0):
x="first degree burn"
elif(max_idx==1):
x="second degree burn"
else:
x="third degree burn"
output = query({
"inputs": "what are remedies for "+x+". If a paitent alredy have "+name+" problem",
})
output= output[0]['generated_text']
output = output.replace('\n', ' ')
# Remove extra spaces
output = ' '.join(output.split())
# Return prediction as response
return {"prediction": max_idx.tolist(),
"llm_op":output}
# Get the port number from the environment variable if available
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=8000)