model.py
import tensorflow as tf
import numpy as np
import os
import cv2
from tensorflow.keras import layers, models
from sklearn.model_selection import train_test_split
def create_sample_data():
categories = ['休業', '不休', '赤チン', '重大ヒヤリ']
X = []
y = []
category_area = (1440, 400, 1870, 500)
for idx, category in enumerate(categories):
folder_path = os.path.join('test5/dataset', category)
for image_name in os.listdir(folder_path):
image_path = os.path.join(folder_path, image_name)
img = cv2.imread(image_path)
if img is None:
print(f"画像の読み込みに失敗しました: {image_path}")
continue
# カテゴリ領域の切り出し
x1, y1, x2, y2 = category_area
roi = img[y1:y2, x1:x2]
# グレースケール変換
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
# リサイズ
resized = cv2.resize(gray, (64, 64))
img_array = tf.keras.preprocessing.image.img_to_array(resized)
X.append(img_array)
y.append(idx)
# データの正規化
X = np.array(X) / 255.0
y = np.array(y)
return X, y
def create_model():
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(64, 64, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(4, activation='softmax') # 4クラス分類
])
return model
def main():
# データの準備
X, y = create_sample_data()
# データの分割
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# モデルの構築と学習
model = create_model()
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# モデルの学習
history = model.fit(
X_train, y_train,
epochs=20,
batch_size=4,
validation_data=(X_test, y_test)
)
# モデルの評価
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
print(f'\nテスト精度: {test_acc:.3f}')
# モデルの保存
model.save('test4/disaster_report_model.h5')
if __name__ == '__main__':
main()
predeict.py
import tensorflow as tf
import numpy as np
import cv2
import os
from tensorflow.keras.models import load_model
def predict_image(image_path):
if not os.path.exists(image_path):
raise FileNotFoundError(f"画像ファイルが見つかりません: {image_path}")
categories = ['休業', '不休', '赤チン', '重大ヒヤリ']
category_area = (1440, 400, 1870, 500)
model = load_model('test4/disaster_report_model.h5')
img = cv2.imread(image_path)
if img is None:
raise ValueError(f"画像の読み込みに失敗しました: {image_path}")
# カテゴリ領域の切り出し
x1, y1, x2, y2 = category_area
roi = img[y1:y2, x1:x2]
# グレースケール変換
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
# リサイズ
roi_resized = cv2.resize(gray, (64, 64))
roi_array = np.expand_dims(roi_resized, axis=0) / 255.0
# 予測
predictions = model.predict(roi_array, verbose=0)
predicted_class = np.argmax(predictions[0])
confidence = float(predictions[0][predicted_class])
return {
'category': categories[predicted_class],
'confidence': confidence,
'all_probabilities': {cat: float(prob) for cat, prob in zip(categories, predictions[0])}
}
def visualize_results(image_path, results):
img = cv2.imread(image_path)
img_display = img.copy()
# カテゴリ領域全体を表示
x1, y1, x2, y2 = (1440, 400, 1870, 500)
cv2.rectangle(img_display, (x1, y1), (x2, y2), (0, 255, 0), 2)
# 予測結果を表示
text = f"{results['category']}: {results['confidence']:.2%}"
cv2.putText(img_display, text, (x1, y1-5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow('Detection Results', img_display)
cv2.waitKey(3000) # 3秒間表示
cv2.destroyAllWindows()
if __name__ == '__main__':
image_path = 'image.png'
results = predict_image(image_path)
print(f"検出されたカテゴリー: {results['category']}")
print(f"確信度: {results['confidence']:.2%}")
print("\n全カテゴリーの確率:")
for category, prob in results['all_probabilities'].items():
print(f"{category}: {prob:.2%}")
visualize_results(image_path, results)