# approach using Finite Difference Approximation.# Import necessary librariesimport
matplotlib.pyplot as pltimport numpy as np # an array ranging from -0.5 to 0.5 with a step of 0.01inputX = np.arange(-0.5, 0.5, 0.01) # random values with a normal distribution to add noise to the input.noise = np.random.normal(0, 0.2, inputX.shape)# output values with noise.outputY = inputX+noiseprint("Input Values : ", inputX[:2], " Output Values : ", outputY[:2])# so input dataset is ready inputX, outputYplt.scatter(inputX, outputY, c="blue", label="Dataset") # initial weights and biasweight = 0.1# any valuebias = 1# any value # calculates the predicted values based on the input, weight, and bias.deflinear_regression_equation(input_value, weight, bias):# X = independent value# Y = Dependent Value# M = SLOPE# B = INTERCEPT/BIAS# = y=mx+b predicted_value = (weight * input_value) + biasreturn predicted_value # calculates the mean squared error (MSE)defcost_function(input_value, weight, bias, target_value): predicted_value = linear_regression_equation(input_value, weight, bias) difference = (target_value - predicted_value)**2return sum(difference)/len(difference) # calculates gradients using finite difference approximation.defgradient_value_using_approx(inputX, outputY, weight, bias):# this approach is easy to implement but, # it takes more computation power and time. f = cost_function h = 0.001 w_grad_val = (f(inputX, weight+h, bias, outputY)-f(inputX, weight, bias, outputY))/h b_grad_val = (f(inputX, weight, bias+h, outputY)-f(inputX, weight, bias, outputY))/hreturn (w_grad_val, b_grad_val) # the loss (MSE) before any learningbefore_loss_value = cost_function(inputX, weight, bias, outputY)plt.plot(inputX, linear_regression_equation(inputX, weight, bias), c="orange", label="Before Learning") # training parametersepochs = 300learning_rate = 0.08# Weights and bias are updated using the learning rate # and gradients to minimize the loss.for _ in range(epochs): (w_grad_val, b_grad_val) = gradient_value_using_approx(inputX, outputY, weight, bias) weight = weight - (learning_rate*w_grad_val) bias = bias - (learning_rate*b_grad_val) # the loss (MSE) after the specified number of epochs.after_loss_value = cost_function(inputX, weight, bias, outputY)print(f"Loss Value (Before Learning) : {before_loss_value}, Loss Value (After Learning) : {after_loss_value}")# plot the linear regression line in greenplt.plot(inputX, linear_regression_equation(inputX, weight, bias), c="green", label=f"After {epochs} epochs learning")plt.legend()plt.grid(True)
输出 :
Jupyter 笔记本输出
导数方程
相同的代码,但这次使用导数方程方法。
#.... Same Code as given in previous example .... def gradient_value_using_rules(inputX, outputY, weight, bias): # recommended way # using chain rule to get derivate of # cost_function(linear_regression_equation(input)) #w_grad_val = sum((-2 *inputX)*(outputY - ((weight*inputX)+bias)))/len(inputX)b_grad_val = sum(-2*(outputY - ((weight*inputX)+bias)))/len(inputX)return (w_grad_val, b_grad_val) #.... Justreplacegradient_value_using_approxtogradient_value_using_rules ....