Nengo PES learning
nengo_gui
my model schematic

code
import nengo import numpy as np # 1 neuorn X_small = 0.2*np.random.rand(401) Y_small = 3*X_small + 0.02*np.random.randn(len(X_small)) # plt.scatter(X_small[:160],Y_small[:160]) # plt.title("train data") # plt.show() # plt.scatter(X_small[160:],Y_small[160:]) # plt.title("test data") # plt.show() # many neuron def inhibit(t): return 1.0 if t > 32 else 0.0 model = nengo.Network() with model: process_x = nengo.processes.PresentInput(X_small, presentation_time=0.1) process_y = nengo.processes.PresentInput(Y_small, presentation_time=0.1) x_train = nengo.Node(process_x) y_train = nengo.Node(process_y) pre = nengo.Ensemble(1000, dimensions=1) nengo.Connection(x_train, pre) post = nengo.Ensemble(1000, dimensions=1) conn = nengo.Connection(pre, post) input_data = nengo.Probe(x_train) label_data = nengo.Probe(y_train) pre_p = nengo.Probe(pre, synapse=0.005) post_p = nengo.Probe(post, synapse=0.005) # pre_p = nengo.Probe(pre) # post_p = nengo.Probe(post) error = nengo.Ensemble(1000, dimensions=1) nengo.Connection(post, error) nengo.Connection(y_train, error, transform=-1) conn.learning_rule_type = nengo.PES() nengo.Connection(error, conn.learning_rule) inhib = nengo.Node(inhibit) nengo.Connection(inhib, error.neurons, transform=[[-1]] * error.n_neurons) #dt = 0.001 (default) # with nengo.Simulator(model) as sim: # sim.run(20.0)
regression 결과물
- 실패
-
결과물
neuron ensemble (n=1000), data input duration= 0.2
- input data
- train and test result1
- train and test result2
code
#data X_small = 0.2*np.random.rand(201) Y_small = 3*X_small + 0.02*np.random.randn(len(X_small)) ########## model ########## # test시 learning inhibition def inhibit(t): return 1.0 if t > 32 else 0.0 model = nengo.Network() with model: process_x = nengo.processes.PresentInput(X_small, presentation_time=0.2) process_y = nengo.processes.PresentInput(Y_small, presentation_time=0.2) x_train = nengo.Node(process_x) y_train = nengo.Node(process_y) pre = nengo.Ensemble(1000, dimensions=1) nengo.Connection(x_train, pre) post = nengo.Ensemble(1000, dimensions=1) conn = nengo.Connection(pre, post) input_data = nengo.Probe(x_train) label_data = nengo.Probe(y_train) pre_p = nengo.Probe(pre, synapse=0.01) post_p = nengo.Probe(post, synapse=0.01) # pre_p = nengo.Probe(pre) # post_p = nengo.Probe(post) error = nengo.Ensemble(1000, dimensions=1) nengo.Connection(post, error) nengo.Connection(y_train, error, transform=-1) conn.learning_rule_type = nengo.PES() nengo.Connection(error, conn.learning_rule) inhib = nengo.Node(inhibit) nengo.Connection(inhib, error.neurons, transform=[[-1]] * error.n_neurons) #dt = 0.001 (default) with nengo.Simulator(model) as sim: sim.run(40.0) ########## result ########## plt.figure(figsize=(12, 6)) end_t = sim.trange().shape[0] train_index = range(200,int(end_t*0.75),200) test_index = range(int(end_t*0.75)+200,end_t,200) plt.subplot(1, 2, 1) plt.scatter(sim.data[input_data].T[0][train_index],sim.data[label_data].T[0][train_index], c='k', label='train_data') plt.plot([0,0.2],[0,0.6], c='r',linestyle='dashed',linewidth=2,label='ground truth') plt.scatter(sim.data[pre_p].T[0][train_index],sim.data[post_p].T[0][train_index], c='b', label='model_train') plt.ylabel("training process") plt.legend(loc='best') plt.subplot(1,2,2) plt.scatter(sim.data[input_data].T[0][test_index],sim.data[label_data].T[0][test_index], c='k', label='test_data') plt.plot([0,0.2],[0,0.6], c='r',linestyle='dashed',linewidth=2,label='ground truth') plt.scatter(sim.data[pre_p].T[0][test_index],sim.data[post_p].T[0][test_index], c='b', label='model_test') plt.ylabel("test process") plt.legend(loc='best')
-
결과물
neuron ensemble (n=1000), data input duration= 0.2
- input data
- train and test result
code
#data X_small = np.random.rand(401)-0.5 Y_small = X_small**2 + 0.01*np.random.randn(len(X_small)) ########## model ########## # test시 learning inhibition def inhibit(t): return 1.0 if t > 64 else 0.0 model = nengo.Network() with model: process_x = nengo.processes.PresentInput(X_small, presentation_time=0.2) process_y = nengo.processes.PresentInput(Y_small, presentation_time=0.2) x_train = nengo.Node(process_x) y_train = nengo.Node(process_y) pre = nengo.Ensemble(1000, dimensions=1) nengo.Connection(x_train, pre) post = nengo.Ensemble(1000, dimensions=1) conn = nengo.Connection(pre, post) input_data = nengo.Probe(x_train) label_data = nengo.Probe(y_train) pre_p = nengo.Probe(pre, synapse=0.01) post_p = nengo.Probe(post, synapse=0.01) # pre_p = nengo.Probe(pre) # post_p = nengo.Probe(post) error = nengo.Ensemble(1000, dimensions=1) nengo.Connection(post, error) nengo.Connection(y_train, error, transform=-1) conn.learning_rule_type = nengo.PES() nengo.Connection(error, conn.learning_rule) inhib = nengo.Node(inhibit) nengo.Connection(inhib, error.neurons, transform=[[-1]] * error.n_neurons) #dt = 0.001 (default) with nengo.Simulator(model) as sim: sim.run(80.0) ########## result ########## plt.figure(figsize=(12, 6)) end_t = sim.trange().shape[0] train_index = range(200,int(end_t*0.75),200) test_index = range(int(end_t*0.75)+200,end_t,200) gt_x = np.arange(-0.5,0.5,0.01) gt_y = gt_x**2 plt.subplot(1, 2, 1) plt.scatter(sim.data[input_data].T[0][train_index],sim.data[label_data].T[0][train_index], c='k', label='train_data') plt.plot(gt_x,gt_y, c='r',linestyle='dashed',linewidth=3,label='ground truth') plt.scatter(sim.data[pre_p].T[0][train_index],sim.data[post_p].T[0][train_index], c='b', label='model_train') plt.ylabel("training process") plt.legend(loc='best') plt.ylim([-0.2,0.5]) plt.subplot(1,2,2) plt.scatter(sim.data[input_data].T[0][test_index],sim.data[label_data].T[0][test_index], c='k', label='test_data') plt.plot(gt_x,gt_y, c='r',linestyle='dashed',linewidth=3,label='ground truth') plt.scatter(sim.data[pre_p].T[0][test_index],sim.data[post_p].T[0][test_index], c='b', label='model_test') plt.ylabel("test process") plt.legend(loc='best') plt.ylim([-0.2,0.5])
기타 느낀점
- 간단한 regression도 구현하기 위해 많은 뉴런이 필요함... → 효율이...?!
- 구현하기 위해서 layer 쌓아야 할줄 알았는데 안그래도됌. → 생각해보니 많은 뉴런 + LIF rate coding 이어서 가능했던거 같음. 다른 복잡한 함수면 layer 쌓아햐 할꺼 같음
- post_probe 가 0~1 보는데, 작은 값일때 더 잘 하는 느낌.
- input data 가 어느정도 duration이 필요한데 어느정도가 좋을지는..
- 조절할만한 parameter가 data_duration, neuron_number, learning_rate
이제 할일
- bekolay master thesis chapter 3,4 읽기
- PES learning layer 쌓기
- MNIST classification
Uploaded by N2T
'Nengo' 카테고리의 다른 글
nengo 7주차 (0) | 2022.06.14 |
---|---|
nengo-6주차-2 (0) | 2022.06.14 |
nengo 5주차 (0) | 2022.06.14 |
nengo 하고싶은 것 (0) | 2022.06.14 |
Nengo tutorial (0) | 2022.06.14 |