ValueError: Layer "model" expects 1 input(s), but it received 8 input tensors.
Discussion
deep learningでのエラー
pythonでdeep learningを実施している初心者です。
Unetを構築しアーチファクト除去を目指しており、とりあえず試しでデータセット10枚(512*512)で学習を行ったところエラーが発生しました。
もう二週間手詰まりしています。どんな些細なことでもいいのでアドバイスください。
発生している問題・エラー
Epoch 1/2
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [3], in <cell line: 3>()
1 #学習
2 data_valid =(x_test, y_test)
----> 3 history = model.fit(x_train, y_train, batch_size=2, epochs=2,verbose=2, validation_data=data_valid,shuffle=True)
File ~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~\AppData\Local\Temp\__autograph_generated_fileqpnspcz3.py:15, in outer_factory.<locals>.inner_factory.<locals>.tf__train_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "C:\Users\+\anaconda3\lib\site-packages\keras\engine\training.py", line 1051, in train_function *
return step_function(self, iterator)
File "C:\Users\+\anaconda3\lib\site-packages\keras\engine\training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\+\anaconda3\lib\site-packages\keras\engine\training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "C:\Users\+\anaconda3\lib\site-packages\keras\engine\training.py", line 889, in train_step
y_pred = self(x, training=True)
File "C:\Users\+\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\+\anaconda3\lib\site-packages\keras\engine\input_spec.py", line 200, in assert_input_compatibility
raise ValueError(f'Layer "{layer_name}" expects {len(input_spec)} input(s),'
ValueError: Layer "model" expects 1 input(s), but it received 8 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:2' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:3' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:4' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:5' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:6' shape=(2, 512, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:7' shape=(2, 512, 1) dtype=float32>]
該当するソースコード
#dataset
ORG_DATA=os.path.join(DATASET,'data4')
SV_DATA=os.path.join(DATASET,'data5')
#Unet
model=Unet()
#netの中身
model.summary()
#np画像
x=ImageLoder(ORG_DATA).load()
y=ImageLoder(SV_DATA).load()
#train,validationに分ける
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)
#学習
history = model.fit(x_train, y_train, batch_size=1, epochs=2,verbose=2, validation_data=(x_test, y_test),shuffle=True)
model.fitでエラーが生じている状態です。
Unetのコード
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Concatenate,BatchNormalization,Activation
#U-Netの定義
def Unet(class_num = 2, gpu_num = 1):
input_layer = Input(shape=(512,512,1))
conv11 = Conv2D(32,(3,3),padding='same')(input_layer)
batch11 = BatchNormalization()(conv11)
acti11 = Activation('relu')(batch11)
conv12 = Conv2D(32,(3,3), padding='same')(acti11)
batch12 = BatchNormalization()(conv12)
acti12 = Activation('relu')(batch12)
conv13 = Conv2D(32,(3,3),padding='same')(acti12)
batch13 = BatchNormalization()(conv13)
acti13 = Activation('relu')(batch13)
conv14 = Conv2D(32,(3,3), padding='same')(acti13)
batch14 = BatchNormalization()(conv14)
acti14 = Activation('relu')(batch14)
pool1 = MaxPooling2D()(acti14)
conv21 = Conv2D(64, (3,3), padding = 'same')(pool1)
batch21 = BatchNormalization()(conv21)
acti21 = Activation('relu')(batch21)
conv22 = Conv2D(64, (3,3), padding = 'same')(acti21)
batch22 = BatchNormalization()(conv22)
acti22 = Activation('relu')(batch22)
conv23 = Conv2D(64, (3,3), padding = 'same')(acti22)
batch23 = BatchNormalization()(conv23)
acti23 = Activation('relu')(batch23)
conv24 = Conv2D(64, (3,3), padding = 'same')(acti23)
batch24 = BatchNormalization()(conv24)
acti24 = Activation('relu')(batch24)
pool2 = MaxPooling2D()(acti24)
conv31 = Conv2D(128, (3,3), padding = 'same')(pool2)
batch31 = BatchNormalization()(conv31)
acti31 = Activation('relu')(batch31)
conv32 = Conv2D(128, (3,3), padding = 'same')(acti31)
batch32 = BatchNormalization()(conv32)
acti32 = Activation('relu')(batch32)
conv33 = Conv2D(128, (3,3), padding = 'same')(acti32)
batch33 = BatchNormalization()(conv33)
acti33 = Activation('relu')(batch33)
conv34 = Conv2D(128, (3,3), padding = 'same')(acti33)
batch34 = BatchNormalization()(conv34)
acti34 = Activation('relu')(batch34)
pool3 = MaxPooling2D()(acti34)
conv41 = Conv2D(256, (3,3), padding = 'same')(pool3)
batch41 = BatchNormalization()(conv41)
acti41 = Activation('relu')(batch41)
conv42 = Conv2D(256, (3,3), padding = 'same')(acti41)
batch42 = BatchNormalization()(conv42)
acti42 = Activation('relu')(batch42)
conv43 = Conv2D(256, (3,3), padding = 'same')(acti42)
batch43 = BatchNormalization()(conv43)
acti43 = Activation('relu')(batch43)
conv44 = Conv2D(256, (3,3), padding = 'same')(acti43)
batch44 = BatchNormalization()(conv44)
acti44 = Activation('relu')(batch44)
pool4 = MaxPooling2D()(acti44)
conv01 = Conv2D(512, (3,3), padding = 'same')(pool4)
batch01 = BatchNormalization()(conv01)
acti01 = Activation('relu')(batch01)
conv02 = Conv2D(512, (3,3), padding = 'same')(acti01)
batch02 = BatchNormalization()(conv02)
acti02 = Activation('relu')(batch02)
conv03 = Conv2D(512, (3,3), padding = 'same')(acti02)
batch03 = BatchNormalization()(conv03)
acti03 = Activation('relu')(batch03)
conv04 = Conv2D(512, (3,3), padding = 'same')(acti03)
batch04 = BatchNormalization()(conv04)
acti04 = Activation('relu')(batch04)
pool0 = MaxPooling2D()(acti04)
conv51 = Conv2D(1024, (3,3),padding = 'same')(pool0)
batch51 = BatchNormalization()(conv51)
acti51 = Activation('relu')(batch51)
conv52 = Conv2D(1024, (3,3), padding = 'same')(acti51)
batch52 = BatchNormalization()(conv52)
acti52 = Activation('relu')(batch52)
up6 = UpSampling2D()(acti52)
concat6 = Concatenate()([up6,acti04])
conv61 = Conv2D(512, (3,3), padding = 'same')(concat6)
batch61 = BatchNormalization()(conv61)
acti61 = Activation('relu')(batch61)
conv62 = Conv2D(512, (3,3), padding = 'same')(acti61)
batch62 = BatchNormalization()(conv62)
acti62 = Activation('relu')(batch62)
conv63 = Conv2D(512, (3,3), padding = 'same')(acti62)
batch63 = BatchNormalization()(conv63)
acti63 = Activation('relu')(batch63)
conv64 = Conv2D(512, (3,3), padding = 'same')(acti63)
batch64 = BatchNormalization()(conv64)
acti64 = Activation('relu')(batch64)
up10 = UpSampling2D()(acti64)
concat10 = Concatenate()([up10,acti44])
conv101 = Conv2D(256, (3,3), padding = 'same')(concat10)
batch101 = BatchNormalization()(conv101)
acti101 = Activation('relu')(batch101)
conv102 = Conv2D(256, (3,3), padding = 'same')(acti101)
batch102 = BatchNormalization()(conv102)
acti102 = Activation('relu')(batch102)
up7 = UpSampling2D()(acti102)
concat7 = Concatenate()([up7,acti34])
conv71 = Conv2D(128, (3,3), padding = 'same')(concat7)
batch71 = BatchNormalization()(conv71)
acti71 = Activation('relu')(batch71)
conv72 = Conv2D(128, (3,3), padding = 'same')(acti71)
batch72 = BatchNormalization()(conv72)
acti72 = Activation('relu')(batch72)
up8 = UpSampling2D()(acti72)
concat8 = Concatenate()([up8,acti24])
conv81 = Conv2D(64, (3,3), padding = 'same')(concat8)
batch81 = BatchNormalization()(conv81)
acti81 = Activation('relu')(batch81)
conv82 = Conv2D(64, (3,3), padding = 'same')(acti81)
batch82 = BatchNormalization()(conv82)
acti82 = Activation('relu')(batch82)
up9 = UpSampling2D()(acti82)
concat9 = Concatenate()([up9,acti14])
conv91 = Conv2D(32, (3,3), padding = 'same')(concat9)
batch91 = BatchNormalization()(conv91)
acti91 = Activation('relu')(batch91)
conv92 = Conv2D(32, (3,3), padding = 'same')(acti91)
batch92 = BatchNormalization()(conv92)
acti92 = Activation('relu')(batch92)
output_layer = Conv2D(1,(3,3), activation='sigmoid',padding='same')(acti92)
model = Model(inputs = input_layer, outputs = output_layer)
if gpu_num >= 2:
model = multi_model(model,gpus=gpu_num)
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='mean_squared_error',metrics=['acc'])
return model
ImageLoderのコード
import numpy as np
import cv2,os,glob
from PIL import Image
class ImageLoder():
def __init__(self,dl_path,image_size=512*512):
self.dl_path=dl_path
self.image_size=image_size
def load(self):
image=[]
file=glob.glob(os.path.join(self.dl_path,'*.png'))#パス内にあるPNGを持ってくる
for file_path in file:
############
###np配列###
############
img=Image.open(file_path)
img=np.array(img)#np配列
############
###正規化###
############
if np.max(img)==0:#ゼロの時はそのまま
m=1
else:#それ以外は最大値で除算
m=np.max(img)
img=img/m
img=img.reshape(512,512,-1)#512の形に
img = img.astype(np.float32)
image.append(img)#image[]内に格納
return image