Python numpy ValueError 形状未对齐
声明:本页面是StackOverFlow热门问题的中英对照翻译,遵循CC BY-SA 4.0协议,如果您需要使用它,必须同样遵循CC BY-SA许可,注明原文地址和作者信息,同时你必须将它归于原作者(不是我):StackOverFlow
原文地址: http://stackoverflow.com/questions/35032685/
Warning: these are provided under cc-by-sa 4.0 license. You are free to use/share it, But you must attribute it to the original authors (not me):
StackOverFlow
numpy ValueError shapes not aligned
提问by maazza
So I am trying to adapt the neural network from michael nielson's http://neuralnetworksanddeeplearning.com/chap1.html
所以我试图适应迈克尔尼尔森的神经网络http://neuralnetworksanddeeplearning.com/chap1.html
I modified network.py
to work on python 3
and made a small script to test it with a few 15x10 pictures of digits.
我修改network.py
为在 python 3 上工作,并制作了一个小脚本,用几张 15x10 的数字图片对其进行测试。
import os
import numpy as np
from network import Network
from PIL import Image
BLACK = 0
WHITE = 255
cdir = "cells"
cells = []
for cell in os.listdir(cdir):
img = Image.open(os.path.join(cdir,cell))
number = cell.split(".")[0][-1]
pixels = img.load()
pdata = []
for x in range(img.width):
for y in range(img.height):
pdata.append(1 if pixels[x,y] == WHITE else 0)
cells.append((np.array(pdata), int(number)))
net = Network([150,30,10])
net.SGD(cells,100,1,3.0,cells)
However I have this error:
但是我有这个错误:
File "network.py", line 117, in backprop nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) ValueError: shapes (30,30) and (150,) not aligned: 30 (dim 1) != 150 (dim 0)
文件“network.py”,第 117 行,在反向传播中 nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) ValueError: 形状 (30,30) 和 (150,) not对齐: 30 (dim 1) != 150 (dim 0)
I tried this with a boolean AND without problem, seems like an issue with numpy on python 3 which is incompatible with python 2.7 ?
我用布尔值 AND 尝试了这个没有问题,似乎是 python 3 上的 numpy 问题,它与 python 2.7 不兼容?
EDIT: tried the boolean AND with a different number of neurones number for the hidden layer and the input layer and it fails
编辑:为隐藏层和输入层尝试了不同数量的神经元数的布尔AND,但失败了
EDIT2:it does not work on python 2.7 too Here is the modified network.py
EDIT2:它也不适用于 python 2.7 这是修改后的 network.py
import random
import numpy as np
class Network(object):
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print("Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test))
else:
print("Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
采纳答案by maazza
Ok, found the bug, I had to reshape the data
好的,找到bug了,我不得不重新整理数据
cells.append(np.reshape((np.array(pdata),(150,1)), int(number)))
Seems like an array
with dimensions (x,1) and (x,) are treated differently by numpy
during computations
看起来像array
维度为 (x,1) 和 (x,)numpy
在计算过程中被区别对待