3
3

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

More than 5 years have passed since last update.

[DeepLearning] Caffeでsin関数近似に挑戦してみた(失敗)

Posted at

DeepLearningの勉強を開始。
自環境がWindowsなのでとりあえずCaffeをインストール。
MNISTサンプルも無事動作したので環境はOK。
ということで、sin関数近似をやらせてみた。

非線形関数近似

Chainerではちらほらやっている人が居るsin関数を近似してみる。

sinApproximation.cpp
#include <iostream>
#include <array>
#include <memory> 
#include <cassert>
#include <random> 
#include <caffe/caffe.hpp> 
#include <stdio.h>
#include <stdlib.h>

using namespace std;
using namespace caffe;

#define SOLVER_PATH "solver.prototxt"
#define DEMENSION 1
#define BATCH_SIZE 256
#define COUNT 10
#define PI 3.141592

int main(int argc, char** argv) { 
	int DataSize = BATCH_SIZE * COUNT; 
	std::array<float, BATCH_SIZE * COUNT * DEMENSION> input_data; 
	std::array<float,BATCH_SIZE * COUNT* DEMENSION> target_data; 

	float x = 0;
	
	for (auto i = 0; i < DataSize; ++i) {
		float target = sin(x);
		input_data[i] = x;
				
		target_data[i] = target;
		x += 0.5;
	} 

	// 回帰でラベルは使用しないため、ダミーデータ用意
	std::array<float, BATCH_SIZE * COUNT> dummy_data; 
	std::fill(dummy_data.begin(), dummy_data.end(), 0.0); 

	// GPUモードで実行
	Caffe::set_mode(Caffe::GPU);

	// Solver 設定 
	caffe::SolverParameter solver_param; 
	caffe::ReadProtoFromTextFileOrDie(SOLVER_PATH, &solver_param); 
	SGDSolver<float> solver(solver_param);
	const auto net = solver.net(); 

	// 入力 : "input" 
	const auto input_layer = 
		boost::dynamic_pointer_cast<caffe::MemoryDataLayer<float>>(net->layer_by_name("input")); 
	input_layer->Reset(input_data.data(), dummy_data.data(), kDataSize);

	// 目標 : "target" 
	const auto target_layer = 
		boost::dynamic_pointer_cast<caffe::MemoryDataLayer<float>>(net->layer_by_name("target")); 
	target_layer->Reset(target_data.data(), dummy_data.data(), kDataSize); 

	// 学習実行 
	solver.Solve(); 

	// 学習された(ハズの)モデルを使って推測
	input_layer->Reset(input_data.data(), dummy_data.data(), kDataSize); 
	net->ForwardPrefilled(nullptr);
	
	FILE *outputfile; 
	outputfile = fopen("output.txt", "w"); 

	for (auto i = 0; i < BATCH_SIZE; ++i) { 
		// 各層のデータをファイル出力
		fprintf(outputfile, "%f\t %f\t %f\t %f\t %f\t %f\t %f\t %f\t %f\t %f\t %f\t %f\t %f \n",
			net->blob_by_name("ip1")->cpu_data()[i],
			net->blob_by_name("ip1")->data_at(i,1,1,1),
			net->blob_by_name("relu1")->cpu_data()[i],
			net->blob_by_name("ip2")->cpu_data()[i],
			net->blob_by_name("ip2")->data_at(i,1,1,1),
			net->blob_by_name("relu2")->cpu_data()[i],
			net->blob_by_name("ip3")->cpu_data()[i],
			net->blob_by_name("ip3")->data_at(i,1,1,1),
			net->blob_by_name("loss")->cpu_data()[i],
			net->blob_by_name("loss")->data_at(i,1,1,1),
			net->blob_by_name("target")->cpu_data()[i],
			input_data[i],
			target_data[i]);
	} 
	fclose(outputfile); 
}

設定系はこんな具合で。

solver.prototxt
train_net:"layers.prototxt"
base_lr:0.0001
momentum:0.9
lr_policy:"inv
gamma:0.01
stepsize:1
max_iter:256
display:10
solver_mode: GPU
layers.prototxt
name:"STUDY"
layers{
	name:"input"
	type:MEMORY_DATA
	top:"ip1"
	top:"dummy_label1"
	memory_data_param{
		batch_size:256
		channels:1
		height:1
		width:1
	}
}
layers{
	name:"ip1"
	type:INNER_PRODUCT
	bottom:"ip1"
	top:"relu1"
	inner_product_param{
		num_output:256
		weight_filler{
    	  type: "xavier"
		}
		bias_filler{
    	  type: "constant"
    	  value:0
		}
	}
}
layers{
	name:"relu1"
	type:RELU
	bottom:"relu1"
	top:"ip2"
}
layers{
	name:"ip2"
	type:INNER_PRODUCT
	bottom:"ip2"
	top:"relu2"
	inner_product_param{
		num_output:256
		weight_filler{
    	  type: "xavier"
		}
		bias_filler{
    	  type: "constant"
    	  value:0
		}
	}
}
layers{
	name:"relu2"
	type:RELU
	bottom:"relu2"
	top:"ip3"
}
layers{
	name:"ip3"
	type:INNER_PRODUCT
	bottom:"ip3"
	top:"loss"
	inner_product_param{
		num_output:1
		weight_filler{
    	  type: "xavier"
		}
		bias_filler{
    	  type: "constant"
    	  value:0
		}
	}
}
layers{
	name:"target"
	type:MEMORY_DATA
	top:"target"
	top:"dummy_label2"
	memory_data_param{
		batch_size:256
		channels:1
		height:1
		width:1
	}
}
layers{
	name:"loss"
	type:EUCLIDEAN_LOSS
	bottom:"target"
	bottom:"loss"
}

活性化にはSIGMOIDかReLUで迷ったけどとりあえずReLUを採用してみることに。

内積→ReLU→内積→ReLU→内積←ここの値が欲しい

ReLU使うときは内積層のユニット数多めにすればなんでも近似できるよ
ってどこかできいたので、とりあえず256個。

いざ実行。
・・・・・

カスリもしない。
というか結果の取り方これで合ってる・・・のか?

Caffeは画像分類系ばかりで回帰方面の情報がWeb上に少ない気がする。。

知見のある方、なにとぞアドバイスをお願いします・・・

3
3
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
3
3

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?