-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathperceptron.py
More file actions
113 lines (82 loc) · 3.19 KB
/
perceptron.py
File metadata and controls
113 lines (82 loc) · 3.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/usr/bin/env python3
# @file: perceptron.py
# @auth: Stephen Marsland (http://stephenmonika.net)
# @date: 2017-09-25 23:11:27 Mon 25 Sep
# Code from Chapter 3 of Machine Learning: An Algorithmic Perspective (2nd
# Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
import numpy as np
class pcn:
""" A basic Perceptron"""
def __init__(self, inputs, targets):
""" Constructor """
# Set up network size
if np.ndim(inputs) > 1:
self.nIn = np.shape(inputs)[1]
else:
self.nIn = 1
if np.ndim(targets) > 1:
self.nOut = np.shape(targets)[1]
else:
self.nOut = 1
self.nData = np.shape(inputs)[0]
# Initialise network
self.weights = np.random.rand(self.nIn + 1, self.nOut) * 0.1 - 0.05
def pcntrain(self, inputs, targets, eta, nIterations):
""" Train the thing """
# Add the inputs that match the bias node
inputs = np.concatenate((inputs, -np.ones((self.nData, 1))), axis=1)
# Training
change = range(self.nData)
for n in range(nIterations):
self.activations = self.pcnfwd(inputs)
self.weights -= eta * \
np.dot(np.transpose(inputs), self.activations - targets)
# Randomise order of inputs
# np.random.shuffle(change)
#inputs = inputs[change, :]
#targets = targets[change, :]
# return self.weights
def pcnfwd(self, inputs):
""" Run the network forward """
# Compute activations
activations = np.dot(inputs, self.weights)
# Threshold the activations
return np.where(activations > 0, 1, 0)
def confmat(self, inputs, targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs, -np.ones((self.nData, 1))), axis=1)
outputs = np.dot(inputs, self.weights)
nClasses = np.shape(targets)[1]
if nClasses == 1:
nClasses = 2
outputs = np.where(outputs > 0, 1, 0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs, 1)
targets = np.argmax(targets, 1)
cm = np.zeros((nClasses, nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i, j] = np.sum(np.where(outputs == i, 1, 0)
* np.where(targets == j, 1, 0))
print(cm)
print(np.trace(cm) / np.sum(cm))
def logic():
# import pcn
""" Run AND and XOR logic functions"""
a = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]])
b = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])
p = pcn(a[:, 0:2], a[:, 2:])
p.pcntrain(a[:, 0:2], a[:, 2:], 0.25, 10)
p.confmat(a[:, 0:2], a[:, 2:])
q = pcn(b[:, 0:2], b[:, 2:])
q.pcntrain(b[:, 0:2], b[:, 2:], 0.25, 10)
q.confmat(b[:, 0:2], b[:, 2:])
if __name__ == '__main__':
logic()