使用TensorFlow的“IndexError:list index超出范围”错误

前端之家收集整理的这篇文章主要介绍了使用TensorFlow的“IndexError:list index超出范围”错误前端之家小编觉得挺不错的,现在分享给大家,也给大家做个参考。

我用张量流复制“GoogLeNet”,数据集是牛津花17.

这是我的代码.

# This code is implementation of GoogLeNet,which is proposed in "https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf"
# This code is referred from "https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py"

from __future__ import division,print_function,absolute_import

# This code is extracted from "https://github.com/tflearn/tflearn/blob/master/tflearn/datasets/oxflower17.py"
import oxflower17

import tensorflow as tf

import numpy as np

X,Y = oxflower17.load_data(one_hot=True,resize_pics=(227,227))

x = tf.placeholder(tf.float32,[None,227,3])
y = tf.placeholder(tf.float32,17])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

trainX,trainY,testX,testY = X[0:1224],Y[0:1224],X[1224:1360],Y[1224:1360] # Divide training sets and test sets
trainX = trainX.reshape(-1,3)
testX = testX.reshape(-1,3)

print (len(trainX))
print (len(testX))

# Parameters
batch_size = 64
test_size = len(testX)

# Create some wrappers
def conv2d(x,W,b,strides):  # Conv2D wrapper,with bias and relu activation
    x = tf.nn.Conv2d(x,strides=[1,strides,1],padding='SAME')
    x = tf.nn.bias_add(x,b)
    return tf.nn.relu(x)

def maxpool2d(x,k,strides):  # MaxPool2D wrapper
    return tf.nn.max_pool(x,ksize=[1,padding='SAME')


def avgpool2d(x,strides):  # AveragePool2D wrapper
    return tf.nn.avg_pool(x,padding='SAME')


def local_response_normalization(incoming,depth_radius=5,bias=1.0,alpha=0.0001,beta=0.75,name="LocalResponseNormalization"):
    return tf.nn.lrn(incoming,depth_radius=depth_radius,bias=bias,alpha=alpha,beta=beta,name=name)


weights = {
    ...
}

biases = {
    ...
}

# Create NN

x = tf.reshape(x,shape=[-1,1])

conv1_7_7 = conv2d(x,weights['w_c1_77'],biases['b_c1_77'],strides=2)
pool1_3_3 = maxpool2d(conv1_7_7,k=3,strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)

conv2_1_1 = conv2d(pool1_3_3,weights['w_c2_11'],biases['b_c2_11'],strides=1)
conv2_3_3 = conv2d(conv2_1_1,weights['w_c2_33'],biases['b_c2_33'],strides=1)
conv2_3_3_lrn = local_response_normalization(conv2_3_3)
pool2_3_3 = maxpool2d(conv2_3_3_lrn,strides=2)

# Inception module (3a)
inception_3a_1_1 = conv2d(pool2_3_3,weights['w_inception_3a_11'],biases['b_inception_3a_11'],strides=1)
inception_3a_3_3_reduce = conv2d(pool2_3_3,weights['w_inception_3a_33_reduce'],biases['b_inception_3a_33_reduce'],strides=1)
inception_3a_3_3 = conv2d(inception_3a_3_3_reduce,weights['w_inception_3a_33'],biases['b_inception_3a_33'],strides=1)
inception_3a_5_5_reduce = conv2d(pool2_3_3,weights['w_inception_3a_55_reduce'],biases['b_inception_3a_55_reduce'],strides=1)
inception_3a_5_5 = conv2d(inception_3a_5_5_reduce,weights['w_inception_3a_55'],biases['b_inception_3a_55'],strides=1)
inception_3a_maxpool = maxpool2d(pool2_3_3,strides=1)
inception_3a_maxpool_reduce = conv2d(inception_3a_maxpool,weights['w_inception_3a_mp_reduce'],biases['b_inception_3a_mp_reduce'],strides=1)

inception_3a_concat = tf.concat(3,[inception_3a_1_1,inception_3a_3_3,inception_3a_5_5,inception_3a_maxpool_reduce])

...

# Inception module (5b)
inception_5b_1_1 = conv2d(inception_5a_concat,weights['w_inception_5b_11'],biases['b_inception_5b_11'],strides=1)
inception_5b_3_3_reduce = conv2d(inception_5a_concat,weights['w_inception_5b_33_reduce'],biases['b_inception_5b_33_reduce'],strides=1)
inception_5b_3_3 = conv2d(inception_5b_3_3_reduce,weights['w_inception_5b_33'],biases['b_inception_5b_33'],strides=1)
inception_5b_5_5_reduce = conv2d(inception_5a_concat,weights['w_inception_5b_55_reduce'],biases['b_inception_5b_55_reduce'],strides=1)
inception_5b_5_5 = conv2d(inception_5b_5_5_reduce,weights['w_inception_5b_55'],biases['b_inception_5b_55'],strides=1)
inception_5b_maxpool = maxpool2d(inception_5a_concat,strides=1)
inception_5b_maxpool_reduce = conv2d(inception_5b_maxpool,weights['w_inception_5a_mp_reduce'],biases['b_inception_5a_mp_reduce'],strides=1)

inception_5b_concat = tf.concat(3,[inception_5b_1_1,inception_5b_3_3,inception_5b_5_5,inception_5b_maxpool_reduce])

pool5_7_7 = avgpool2d(inception_5b_concat,7,1)

pool5_7_7_dropout = tf.nn.dropout(pool5_7_7,0.4)

fc = tf.reshape(pool5_7_7_dropout,[-1,weights['w_fc'].get_shape().as_list()[0]])
fc = tf.add(tf.matmul(fc,weights['w_fc']),biases['b_fc'])

#### Network design is finished.

cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(fc,y))

optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,momentum=0.9)

predict = tf.argmax(fc,1)

init = tf.initialize_all_variables()

# Launch the graph
# This code is extracted from "http://pythonkim.tistory.com/56"
# Some variables are changed
with tf.Session() as sess:
    sess.run(init)

    for i in range(1):

        training_batch = zip(range(0,len(trainX),batch_size),range(batch_size,len(trainX)+1,batch_size))
        tf.reset_default_graph() # added by minho,from "https://github.com/tensorflow/tensorflow/issues/1470"
        for start,end in training_batch:
            sess.run(optimizer,Feed_dict={x: trainX[start:end],y: trainY[start:end],keep_prob: 1.0}) # modified by minho

        test_indices = np.arange(len(testX))  # Get A Test Batch
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:test_size]

        print(len(testX[test_indices]))

        print(i,np.mean(np.argmax(testY[test_indices],axis=1) ==
                         sess.run(predict,Feed_dict={x: testX[test_indices],y: testY[test_indices],keep_prob: 1.0}))) # modified by minho

这是一个错误日志.

File “/home/mh0205/GoogLeNet/googlenet.py”,line 443,in
sess.run(predict,keep_prob: 1.0}))) # modified by minho File
“/home/mh0205/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py”,
line 1159,in exit
self._default_graph_context_manager.exit(exec_type,exec_value,exec_tb) File
“/home/mh0205/anaconda2/lib/python2.7/contextlib.py”,line 35,in
exit
self.gen.throw(type,value,traceback) File “/home/mh0205/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py”,
line 3671,in get_controller
if self.stack[-1] is not default: IndexError: list index out of range

我无法修复错误.请帮我.

最佳答案
添加

tf.reset_default_graph()

在您的代码之前.

如果尚未导入tensorflow,请添加

import tensorflow as tf

猜你在找的Python相关文章