-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel.py
75 lines (66 loc) · 2.64 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from huggingface_hub import InferenceClient
from huggingface_hub import login
from PIL import Image
import io
import os
import tensorflow as tf
from tensorflow import keras
from keras.models import load_model
from dotenv import load_dotenv
# from keras.preprocessing.image import load_img
# from keras.applications.vgg16 import preprocess_input
import numpy as np
import cv2
# Login to HuggingFace Hub
login(token="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")
# print(os.path.join('models', 'CommercialResidentialF.h5'))
# new_model = tf.keras.models.load_model(os.path.join('models', 'CommercialResidentialF.h5'))
# new_model = load_model(os.path.join('models', 'CommercialResidentialF.h5'))
# #input image file
# img = cv2.imread('BN-JJ952_0715pl_ER_20150715101738.jpg')
# resize = tf.image.resize(img, (256,256))
# yhatnew = new_model.predict(np.expand_dims(resize/255,0))
# print(yhatnew)
# if yhatnew > 0.5:
# print(f'Predicted estate is residential')
# else:
# print(f'Predicted estate is commercial')
# Show the model architecture
# new_model.summary()
# Use the Visual Question Anwering Model from HuggingFace Hub
def analyze_image(image_path = "images/Commercial-Ext1.jpeg"):
image = Image.open(image_path)
# Convert PIL Image to bytes since parameter needs bytes
byte_arr = io.BytesIO()
image.save(byte_arr, format='JPEG')
image_in_bytes = byte_arr.getvalue()
question = "What type of building is this?"
client = InferenceClient()
results = client.visual_question_answering(image_in_bytes, question, model="dandelin/vilt-b32-finetuned-vqa")
print(results)
return results
# https://medium.com/@draj0718/deploying-deep-learning-model-using-flask-api-810047f090ac
# def read_image(image_path = "images/Commercial-Ext1.jpeg"):
# image = load_img(image_path, target_size=(224, 224))
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
# x = preprocess_input(x)
# return x
# Use the Image Classification Model from Justin
# def classify_image(image_path = "images/Commercial-Ext1.jpeg"):
# # init Justin's model
# model = load_model(os.path.join('models', 'CommercialResidentialF.h5'))
# model.summary()
# print(tf.__version__)
# print(keras.__version__)
# image_path="images/Commercial-Ext1.jpeg"
# img = cv2.imread(image_path)
# resize = tf.image.resize(img, (256, 256))
# yhat = model.predict(np.expand_dims(resize/255,0))
# print(yhat)
# return yhat
# img = read_image(image_path)
# class_prediction = model.predict(img)
# classes_x = np.argmax(class_prediction, axis=1)
# print(classes_x, class_prediction)
# return {classes_x, class_prediction}