# Baby steps from # http://deeplearning.net/software/theano/tutorial/adding.html import theano.tensor as T from theano import function x = T.dscalar('x') y = T.dscalar('y') z = x + y f = function([x, y], z) f(2,4) f(16.2, 9.1) ############################################################ import theano import theano.tensor as T import numpy as np #dtype = theano.config.floatX #training data train_x = np.arange(-5., 5., 0.1) def linear_noisy_y_data(x, a, b, sigma): return a + b * x + sigma * np.random.randn(train_x.shape[0]) train_y = linear_noisy_y_data(train_x, 1., 3., 1.2) #theano symbolic variables x = T.scalar() target = T.scalar() def linear_model(x, p0, p1): return p0 + x * p1 param0 = theano.shared(0.) param1 = theano.shared(0.) prediction = linear_model(x, param0, param1) cost = T.mean(T.sqr(target - prediction)) g_param0 = T.grad(cost=cost, wrt=param0) g_param1 = T.grad(cost=cost, wrt=param1) # learning rate alpha = 0.0005 updates = [[param0, param0 - alpha * g_param0], [param1, param1 - alpha * g_param1]] train_func = theano.function(inputs=[x, target], outputs=cost, updates=updates, allow_input_downcast=True) cost_over_epochs = np.ndarray([20]) for epoch in range(20): for a_x, a_y in zip(train_x, train_y): c = train_func(a_x, a_y) cost_over_epochs[epoch] = c %pylab #plt.plot(np.arange(20), cost_over_epochs) plt.plot(train_x, train_y,'.') p0 = param0.eval() p1 = param1.eval() plt.plot(np.array([-5., 5]), np.array([p0-5.*p1, p0+5.*p1]),'-')