4 Learn Python to A.I Programming – Lesson
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import random import matplotlib.pyplot as plt # ============================================================ # 1. INITIALIZATION # Create random starting values for the perceptron # ============================================================ w1 = random.uniform(-1, 1) # weight for input x1 w2 = random.uniform(-1, 1) # weight for input x2 b = random.uniform(-1, 1) # bias term (shift of decision boundary) lr = 0.01 # learning rate (how fast weights change during training) print("before training parameters:") print(f"w1 = {w1:.2f} w2 = {w2:.2f} b = {b:.2f}\n\n") # ============================================================ # 2. ACTIVATION FUNCTION (STEP FUNCTION) # Converts numeric result into 0 or 1 # ============================================================ def activation(z): return 1 if z >= 0 else 0 # threshold at 0 # ============================================================ # 3. FORWARD PASS (PERCEPTRON OUTPUT) # Computes output y = step(w1*x1 + w2*x2 + b) # ============================================================ def predict(x1, x2): return activation(w1 * x1 + w2 * x2 + b) # ============================================================ # 4. TRAINING DATA (AND LOGIC GATE) # Inputs (X) and expected outputs (y) # ============================================================ X = [(0,0), (0,1), (1,0), (1,1)] # all combinations of AND gate y = [0, 0, 0, 1] # correct outputs of AND # ============================================================ # 5. STORAGE FOR PLOTTING (track weight changes) # ============================================================ history_w1 = [] history_w2 = [] history_b = [] history_err = [] # ============================================================ # 6. TRAINING LOOP (PERCEPTRON LEARNING RULE) # Runs for 130 epochs (full passes through data) # ============================================================ for epoch in range(130): total_error = 0 # count mistakes in this epoch # zip() lets us loop input + target together for (x1, x2), target in zip(X, y): out = predict(x1, x2) # model prediction (0 or 1) error = target - out # difference from expected value total_error += abs(error) # count errors (0 or 1) # Update rule: w = w + lr * error * input # Only updates when prediction is wrong w1 = w1 + lr * error * x1 w2 = w2 + lr * error * x2 b = b + lr * error # Save values for graph history_w1.append(w1) history_w2.append(w2) history_b.append(b) history_err.append(total_error) # ============================================================ # 7. TEST AFTER TRAINING # ============================================================ print("After training:") for x1, x2 in X: print((x1, x2), "->", predict(x1, x2)) print("\nFinal parameters:") print(f"w1 = {w1:.2f} w2 = {w2:.2f} b = {b:.2f}") # ============================================================ # 8. PLOT TRAINING PROGRESS (weights + errors) # ============================================================ plt.figure(figsize=(10,6)) # --- plot weight & bias changes --- plt.subplot(2,1,1) plt.plot(history_w1, label='w1') plt.plot(history_w2, label='w2') plt.plot(history_b, label='b') plt.title("Weights and Bias During Training") plt.xlabel("Epoch") plt.ylabel("Value") plt.legend() # --- plot error per epoch --- plt.subplot(2,1,2) plt.plot(history_err, label='Total Error', color='red') plt.title("Total Error Per Epoch") plt.xlabel("Epoch") plt.ylabel("Errors") plt.tight_layout() plt.show() |