需要帮助来解决这个问题(ValueError:不能给张量u' softmax_2:0 '的形状(15,)提供值,它有形状'(?54)”)

时间:2022-06-05 13:48:28

Below is my code for neural network on my own dataset and I got the following error: ValueError: Cannot feed value of shape (15,) for Tensor u'Softmax_2:0', which has shape '(?, 54)'

下面是我在自己的数据集上的神经网络代码,我得到了以下错误:ValueError:不能为张量u' softmax_2:0 '的形状(15,)提供形状值,它有形状'(?现年54岁)

I am quite new.

我很新。

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

inputX = data.drop(['Name', 'md5', 'legitimate'], axis=1).as_matrix()
inputY = data['legitimate'].as_matrix()
n_samples = inputY.size
n_feature = inputX.shape[1]

inputX = array([[  3.32000000e+02,   2.24000000e+02,   8.45000000e+03,
          8.00000000e+00,   0.00000000e+00,   5.32480000e+04,
          1.63840000e+04,   0.00000000e+00,   5.40480000e+04,
          4.09600000e+03,   5.73440000e+04,   2.08594534e+09,
          4.09600000e+03,   4.09600000e+03,   4.00000000e+00,
          0.00000000e+00,   8.00000000e+00,   0.00000000e+00,
          4.00000000e+00,   0.00000000e+00,   7.37280000e+04,
          4.09600000e+03,   1.20607000e+05,   2.00000000e+00,
          3.20000000e+02,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   4.00000000e+00,   2.70373594e+00,
          1.05637876e+00,   6.22819008e+00,   1.63840000e+04,
          4.09600000e+03,   5.32480000e+04,   1.59390000e+04,
          9.92000000e+02,   5.28640000e+04,   6.00000000e+00,
          1.37000000e+02,   8.10000000e+01,   2.50000000e+01,
          1.00000000e+00,   3.52426821e+00,   3.52426821e+00,
          3.52426821e+00,   8.92000000e+02,   8.92000000e+02,
          8.92000000e+02,   7.20000000e+01,   1.60000000e+01], 
       [  3.32000000e+02,   2.24000000e+02,   8.45000000e+03,
          8.00000000e+00,   0.00000000e+00,   5.27360000e+04,
          1.12640000e+04,   0.00000000e+00,   5.35300000e+04,
          4.09600000e+03,   5.73440000e+04,   2.08699392e+09,
          4.09600000e+03,   5.12000000e+02,   4.00000000e+00,
          0.00000000e+00,   8.00000000e+00,   0.00000000e+00,
          4.00000000e+00,   0.00000000e+00,   7.37280000e+04,
          1.02400000e+03,   8.92300000e+04,   2.00000000e+00,
          3.20000000e+02,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   4.00000000e+00,   4.31899422e+00,
          3.30769150e+00,   6.15499505e+00,   1.42080000e+04,
          1.02400000e+03,   5.27360000e+04,   1.57382500e+04,
          9.92000000e+02,   5.22730000e+04,   6.00000000e+00,
          1.33000000e+02,   8.10000000e+01,   2.50000000e+01,
          1.00000000e+00,   3.54207119e+00,   3.54207119e+00,
          3.54207119e+00,   8.92000000e+02,   8.92000000e+02,
          8.92000000e+02,   7.20000000e+01,   1.60000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   8.45000000e+03,
          8.00000000e+00,   0.00000000e+00,   4.09600000e+04,
          2.04800000e+04,   0.00000000e+00,   2.66080000e+04,
          4.09600000e+03,   4.50560000e+04,   1.92151552e+09,
          4.09600000e+03,   4.09600000e+03,   4.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          4.00000000e+00,   0.00000000e+00,   6.55360000e+04,
          4.09600000e+03,   1.21734000e+05,   2.00000000e+00,
          3.20000000e+02,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   3.58061262e+00,
          8.04176679e-02,   6.23193618e+00,   1.22880000e+04,
          4.09600000e+03,   4.09600000e+04,   1.04442000e+04,
          9.64000000e+02,   3.76480000e+04,   2.00000000e+00,
          6.80000000e+01,   0.00000000e+00,   1.12000000e+02,
          6.00000000e+00,   3.00438262e+00,   2.40651198e+00,
          3.59262288e+00,   6.10333333e+02,   1.24000000e+02,
          1.41200000e+03,   7.20000000e+01,   1.60000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.58000000e+02,
          1.10000000e+01,   0.00000000e+00,   3.54816000e+05,
          2.57024000e+05,   0.00000000e+00,   1.83632000e+05,
          4.09600000e+03,   3.60448000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          1.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   1.00000000e+00,   6.26688000e+05,
          1.02400000e+03,   0.00000000e+00,   2.00000000e+00,
          3.30880000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   4.59039653e+00,
          2.37894684e+00,   6.29682587e+00,   1.20524800e+05,
          7.68000000e+03,   3.54816000e+05,   1.22148600e+05,
          1.64680000e+04,   3.54799000e+05,   7.00000000e+00,
          1.38000000e+02,   0.00000000e+00,   0.00000000e+00,
          7.00000000e+00,   3.91441476e+00,   1.44168828e+00,
          7.67709054e+00,   7.29842857e+03,   1.60000000e+01,
          2.84380000e+04,   7.20000000e+01,   0.00000000e+00],
       [  3.32000000e+02,   2.24000000e+02,   2.71000000e+02,
          6.00000000e+00,   0.00000000e+00,   2.40640000e+04,
          1.64864000e+05,   1.02400000e+03,   1.25380000e+04,
          4.09600000e+03,   2.86720000e+04,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   4.00000000e+00,
          0.00000000e+00,   6.00000000e+00,   0.00000000e+00,
          4.00000000e+00,   0.00000000e+00,   2.41664000e+05,
          1.02400000e+03,   0.00000000e+00,   2.00000000e+00,
          3.27680000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   4.10454072e+00,
          0.00000000e+00,   6.44010555e+00,   6.75840000e+03,
          0.00000000e+00,   2.40640000e+04,   4.62608000e+04,
          3.14400000e+03,   1.54712000e+05,   8.00000000e+00,
          1.55000000e+02,   1.00000000e+00,   0.00000000e+00,
          6.00000000e+00,   3.19910735e+00,   1.97133529e+00,
          5.21481585e+00,   4.52000000e+02,   3.40000000e+01,
          9.58000000e+02,   0.00000000e+00,   1.50000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.58000000e+02,
          1.00000000e+01,   0.00000000e+00,   1.18784000e+05,
          3.81952000e+05,   0.00000000e+00,   5.99140000e+04,
          4.09600000e+03,   1.22880000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          1.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   1.00000000e+00,   5.20192000e+05,
          1.02400000e+03,   5.58287000e+05,   2.00000000e+00,
          3.30880000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   5.66240790e+00,
          4.18369159e+00,   7.96187140e+00,   1.00147200e+05,
          9.21600000e+03,   3.34848000e+05,   1.01559800e+05,
          9.36800000e+03,   3.34440000e+05,   7.00000000e+00,
          1.14000000e+02,   0.00000000e+00,   0.00000000e+00,
          1.80000000e+01,   6.53094643e+00,   2.45849223e+00,
          7.99268848e+00,   1.85234444e+04,   4.80000000e+01,
          3.39450000e+04,   7.20000000e+01,   1.40000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.58000000e+02,
          1.00000000e+01,   0.00000000e+00,   1.74592000e+05,
          3.00032000e+05,   0.00000000e+00,   1.17140000e+05,
          4.09600000e+03,   1.80224000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          1.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   1.00000000e+00,   4.87424000e+05,
          1.02400000e+03,   5.13173000e+05,   2.00000000e+00,
          3.30880000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   5.73547047e+00,
          4.75826034e+00,   7.36431335e+00,   9.30816000e+04,
          1.53600000e+04,   1.92000000e+05,   9.46988000e+04,
          2.15000000e+04,   1.91664000e+05,   1.10000000e+01,
          2.54000000e+02,   1.50000000e+01,   0.00000000e+00,
          1.50000000e+01,   5.73239307e+00,   2.85236422e+00,
          7.98772639e+00,   1.27061333e+04,   1.18000000e+02,
          6.05000000e+04,   7.20000000e+01,   1.40000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.58000000e+02,
          9.00000000e+00,   0.00000000e+00,   4.75648000e+05,
          3.48672000e+05,   0.00000000e+00,   3.19769000e+05,
          4.09600000e+03,   4.83328000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   0.00000000e+00,   8.56064000e+05,
          1.02400000e+03,   1.82072586e+09,   2.00000000e+00,
          3.30880000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   5.13993423e+00,
          4.48079036e+00,   6.55814891e+00,   1.64864000e+05,
          1.38240000e+04,   4.75648000e+05,   1.68145200e+05,
          3.08400000e+04,   4.75580000e+05,   1.40000000e+01,
          4.21000000e+02,   1.50000000e+01,   0.00000000e+00,
          5.90000000e+01,   2.82782573e+00,   9.60953136e-01,
          7.21232881e+00,   2.63703390e+03,   2.00000000e+01,
          6.76240000e+04,   7.20000000e+01,   0.00000000e+00],
       [  3.32000000e+02,   2.24000000e+02,   2.59000000e+02,
          9.00000000e+00,   0.00000000e+00,   1.57696000e+05,
          6.24640000e+04,   0.00000000e+00,   6.70150000e+04,
          4.09600000e+03,   1.63840000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   0.00000000e+00,   2.33472000e+05,
          1.02400000e+03,   2.72988000e+05,   2.00000000e+00,
          3.30240000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   4.00000000e+00,   4.81988481e+00,
          2.97736539e+00,   6.48512410e+00,   5.50400000e+04,
          3.58400000e+03,   1.57696000e+05,   5.56267500e+04,
          6.70000000e+03,   1.57297000e+05,   2.00000000e+00,
          7.60000000e+01,   0.00000000e+00,   0.00000000e+00,
          1.30000000e+01,   3.94329633e+00,   1.81444345e+00,
          6.12204520e+00,   2.70815385e+03,   1.32000000e+02,
          9.64000000e+03,   7.20000000e+01,   1.40000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.59000000e+02,
          8.30000000e+01,   8.20000000e+01,   7.24992000e+05,
          2.30604800e+06,   0.00000000e+00,   4.24345600e+06,
          3.52256000e+06,   4.30899200e+06,   4.19430400e+06,
          4.09600000e+03,   4.09600000e+03,   5.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   0.00000000e+00,   6.70924800e+06,
          4.09600000e+03,   3.07704700e+06,   2.00000000e+00,
          3.27680000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   9.00000000e+00,   3.78312500e+00,
          0.00000000e+00,   7.99951830e+00,   3.36782222e+05,
          0.00000000e+00,   1.88416000e+06,   7.44182333e+05,
          2.27200000e+03,   3.06129900e+06,   4.00000000e+00,
          2.43000000e+02,   0.00000000e+00,   0.00000000e+00,
          2.10000000e+01,   3.98746295e+00,   2.64215931e+00,
          6.47369968e+00,   1.42880000e+04,   7.60000000e+01,
          2.70376000e+05,   0.00000000e+00,   0.00000000e+00],
       [  3.32000000e+02,   2.24000000e+02,   2.58000000e+02,
          1.00000000e+01,   0.00000000e+00,   1.20320000e+05,
          3.85024000e+05,   0.00000000e+00,   6.15780000e+04,
          4.09600000e+03,   1.26976000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          1.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   1.00000000e+00,   5.28384000e+05,
          1.02400000e+03,   5.66330000e+05,   2.00000000e+00,
          3.30880000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   5.00000000e+00,   5.64644365e+00,
          4.11726412e+00,   7.96277585e+00,   1.01068800e+05,
          9.72800000e+03,   3.30752000e+05,   1.02623800e+05,
          9.40400000e+03,   3.39652000e+05,   3.00000000e+00,
          8.90000000e+01,   0.00000000e+00,   0.00000000e+00,
          6.00000000e+00,   3.72982391e+00,   2.45849223e+00,
          5.31755236e+00,   2.73950000e+03,   4.80000000e+01,
          9.64000000e+03,   7.20000000e+01,   1.50000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.59000000e+02,
          1.00000000e+01,   0.00000000e+00,   2.33984000e+05,
          1.37779200e+06,   0.00000000e+00,   9.31200000e+04,
          4.09600000e+03,   2.41664000e+05,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          1.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   1.00000000e+00,   1.63020800e+06,
          1.02400000e+03,   1.66150900e+06,   2.00000000e+00,
          3.32800000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   3.00000000e+00,   5.46068132e+00,
          3.13962777e+00,   7.09009944e+00,   5.37258667e+05,
          5.63200000e+03,   1.37216000e+06,   5.39602667e+05,
          1.33160000e+04,   1.37185600e+06,   1.00000000e+00,
          8.00000000e+01,   0.00000000e+00,   0.00000000e+00,
          1.80000000e+01,   4.32832189e+00,   2.32321967e+00,
          7.06841290e+00,   7.61582778e+04,   9.00000000e+00,
          1.34273500e+06,   7.20000000e+01,   1.90000000e+01],
       [  3.32000000e+02,   2.24000000e+02,   2.71000000e+02,
          6.00000000e+00,   0.00000000e+00,   4.91520000e+04,
          5.61152000e+05,   0.00000000e+00,   3.38800000e+04,
          4.09600000e+03,   5.32480000e+04,   4.19430400e+06,
          4.09600000e+03,   4.09600000e+03,   4.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          4.00000000e+00,   0.00000000e+00,   6.14400000e+05,
          4.09600000e+03,   0.00000000e+00,   2.00000000e+00,
          0.00000000e+00,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   4.00000000e+00,   3.69925758e+00,
          0.00000000e+00,   6.48297395e+00,   1.94600000e+04,
          1.60000000e+01,   4.91520000e+04,   1.50074000e+05,
          1.60000000e+01,   5.48460000e+05,   4.00000000e+00,
          1.19000000e+02,   1.00000000e+01,   0.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00],
       [  3.32000000e+02,   2.24000000e+02,   2.58000000e+02,
          1.00000000e+01,   0.00000000e+00,   2.91840000e+04,
          4.45952000e+05,   1.68960000e+04,   1.48190000e+04,
          4.09600000e+03,   3.68640000e+04,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   5.00000000e+00,
          0.00000000e+00,   6.00000000e+00,   0.00000000e+00,
          5.00000000e+00,   0.00000000e+00,   1.76537600e+06,
          1.02400000e+03,   5.94294000e+05,   2.00000000e+00,
          3.41120000e+04,   1.04857600e+06,   4.09600000e+03,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   6.00000000e+00,   3.76419176e+00,
          0.00000000e+00,   6.47970818e+00,   7.93600000e+03,
          0.00000000e+00,   2.91840000e+04,   2.92339333e+05,
          2.53600000e+03,   1.28204800e+06,   8.00000000e+00,
          1.71000000e+02,   1.00000000e+00,   0.00000000e+00,
          6.00000000e+00,   3.15203588e+00,   2.16096405e+00,
          5.21367450e+00,   3.54333333e+02,   2.00000000e+01,
          7.44000000e+02,   0.00000000e+00,   0.00000000e+00],
       [  3.32000000e+02,   2.24000000e+02,   3.31670000e+04,
          2.00000000e+00,   2.50000000e+01,   3.78880000e+04,
          1.53600000e+04,   0.00000000e+00,   4.00000000e+04,
          4.09600000e+03,   4.50560000e+04,   4.19430400e+06,
          4.09600000e+03,   5.12000000e+02,   1.00000000e+00,
          0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
          4.00000000e+00,   0.00000000e+00,   8.19200000e+04,
          1.02400000e+03,   6.78554440e+07,   2.00000000e+00,
          0.00000000e+00,   1.04857600e+06,   1.63840000e+04,
          1.04857600e+06,   4.09600000e+03,   0.00000000e+00,
          1.60000000e+01,   8.00000000e+00,   2.33301385e+00,
          0.00000000e+00,   6.63664803e+00,   6.65600000e+03,
          0.00000000e+00,   3.78880000e+04,   7.19800000e+03,
          8.00000000e+00,   3.77320000e+04,   8.00000000e+00,
          9.60000000e+01,   0.00000000e+00,   0.00000000e+00,
          1.40000000e+01,   3.42918455e+00,   2.41356665e+00,
          5.05007355e+00,   7.17142857e+02,   4.40000000e+01,
          2.21600000e+03,   0.00000000e+00,   1.50000000e+01]])

 inputY = array([ 1.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.])

# Parameters
learning_rate = 0.000001
training_epochs = 2000
display_step = 50

#define placeholder
x = tf.placeholder(tf.float32, [None, n_feature])
y = tf.placeholder(tf.float32, [None, 1])

#For hidden layer 1
W1 = tf.Variable(tf.zeros([n_feature, n_feature]))   
b1 = tf.Variable(tf.zeros([n_feature]))              
y_values1 = tf.add(tf.matmul(x, W1), b1)       
layer_1_output = tf.nn.softmax(y_values1)  

#For output layer

W2 = tf.Variable(tf.zeros([n_feature, n_feature]))        
b2 = tf.Variable(tf.zeros([n_feature]))              
y_values = tf.add(tf.matmul(layer_1_output, W1), b1)     
output = tf.nn.softmax(y_values)

# Cost function: Mean squared error
cost = tf.reduce_sum(tf.pow(y - output, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Initialize variabls and tensorflow session
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for i in range(training_epochs):  

    sess.run(optimizer, feed_dict={x: inputX, y: inputY})

    if (i) % display_step == 0:
        cc = sess.run(cost, feed_dict={x: inputX, y:inputY})
        print "Training step:", '%04d' % (i), "cost=", "{:.9f}".format(cc) #, \"W=", sess.run(W), "b=", sess.run(b)

print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={x: inputX, y: inputY})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'

ValueError                                Traceback (most recent call last)
<ipython-input-31-017827906dfe> in <module>()
      1 for i in range(training_epochs):
----> 2     sess.run(optimizer, feed_dict={x: inputX, y: inputY}) # Take a gradient descent step using our inputs and labels
      3 
      4     # That's all! The rest of the cell just outputs debug messages.
      5     # Display logs per epoch step

/home/allan/.local/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in run(self, fetches,feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/allan/.local/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in _run(self, handle, fetches, feed_dict, options, run_metadata)
    942                 'Cannot feed value of shape %r for Tensor %r, '
    943                 'which has shape %r'
--> 944                 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
    945           if not self.graph.is_feedable(subfeed_t):
    946             raise ValueError('Tensor %s may not be fed.' % subfeed_t)

ValueError: Cannot feed value of shape (15,) for Tensor u'Softmax_2:0', which has shape '(?, 54)'

1 个解决方案

#1


0  

So your input is of shape (15, 54) and you output is (15,1) I wrote an answer before but I was wrong. Your weights for the output layer should look like this:

你的输入是形状(15,54)输出是(15,1)我之前写过一个答案,但我错了。输出层的权重应该如下所示:

W2 = tf.Variable(tf.zeros([n_feature, 1]))
b2 = tf.Variable(tf.zeros([1]))
y_values = tf.add(tf.matmul(layer_1_output, W2), b2)

Because I assume you have 1 output node.

因为我假设你有一个输出节点。

I ran your code and I reshaped inputY:

我运行了你的代码,我重塑了inputY:

inputY = inputY.reshape([-1,1])

It works for me well. Don't think too much about the -1, it holds the place for the number or samples. And the 1 is actually the shape of your inputY, because you have 1 column for that. Although, the cost is always 1.0 so I don't know if that is what you want. Also, I changed the

这对我很有效。不要想太多-1,它占据了数字或样本的位置。1实际上是你的inputY的形状,因为你有1个列。虽然成本总是1.0,所以我不知道这是不是你想要的。同时,我改变了

learning_rate = 0.1 

because I think yours is too small. And the activation function:

因为我觉得你的太小了。和激活函数:

layer_1_output = tf.nn.sigmoid(y_values1)  

output = tf.nn.sigmoid(y_values)

With sigmoid I got decreasing cost, I assume is better. I think 2000 training steps are too many for this amount of data. After 1000 steps it doesn't learn too much. (Use 2000 if you have thousands of samples) For me, with 1000 steps, the cost is 0.609 at the first steps and 0.421 after 1000 steps.

由于s型的我得到了降低的成本,我认为是更好的。我认为2000年的培训对于这一数量的数据来说太多了。在1000步之后,它不会学到太多东西。(如果您有上千个样品,请使用2000个样品),1000个步骤,第一步的成本是0.609,1000步后是0.421。

So your code with my modifications looks like this (my csv file has 10 columns for the input and the 11th column is the output which contains 1 on the first three rows and then 0 on the others... just like yours)

我修改后的代码是这样的(我的csv文件有10列用于输入,第11列是输出,在前三行中包含1,其余的则是0)就像你的一样)

import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler

csv = np.loadtxt('/Users.../table.csv', delimiter = ',')
inputX = csv[:,0:10]
inputY = csv[:,10:]

inputX = MinMaxScaler().fit_transform(inputX)

n_samples = inputY.size
n_feature = inputX.shape[1]

# Parameters
learning_rate = 0.3
training_epochs = 1000
display_step = 50

#define placeholder
x = tf.placeholder(tf.float32, [None, n_feature])
y = tf.placeholder(tf.float32, [None, 1])

#For hidden layer 1
W1 = tf.Variable(tf.random_normal([n_feature, 20]))   
b1 = tf.Variable(tf.random_normal([20]))              
y_values1 = tf.add(tf.matmul(x, W1), b1)       
layer_1_output = tf.nn.sigmoid(y_values1)  

#For output layer

W2 = tf.Variable(tf.random_normal([20, 1]))        
b2 = tf.Variable(tf.random_normal([1]))              
y_values = tf.add(tf.matmul(layer_1_output, W2), b2)     
output = tf.nn.sigmoid(y_values)

# Cost function: Mean squared error
cost = tf.reduce_sum(tf.pow(y - output, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)


# Initialize variabls and tensorflow session
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for i in range(training_epochs):  

    sess.run(optimizer, feed_dict={x: inputX, y: inputY})

    if (i) % display_step == 0:
        cc = sess.run(cost, feed_dict={x: inputX, y:inputY})
        print("Training step:", '%04d' % (i), "cost=","{:.9f}".format(cc)) 

print("Optimization Finished!")

training_cost = sess.run(cost, feed_dict={x: inputX, y: inputY})
print("Training cost=", training_cost, '\n', "W1=",'\n', sess.run(W1),'\n', "b1=", '\n', sess.run(b1), '\n', "W2=",'\n', sess.run(W2),'\n', "b2=", '\n', sess.run(b2))

prediction = sess.run(output, feed_dict={x: inputX, y: inputY})
print("Output:\n", prediction)

acc = 1-np.sqrt(training_cost)
print("Acc = ", acc)

#1


0  

So your input is of shape (15, 54) and you output is (15,1) I wrote an answer before but I was wrong. Your weights for the output layer should look like this:

你的输入是形状(15,54)输出是(15,1)我之前写过一个答案,但我错了。输出层的权重应该如下所示:

W2 = tf.Variable(tf.zeros([n_feature, 1]))
b2 = tf.Variable(tf.zeros([1]))
y_values = tf.add(tf.matmul(layer_1_output, W2), b2)

Because I assume you have 1 output node.

因为我假设你有一个输出节点。

I ran your code and I reshaped inputY:

我运行了你的代码,我重塑了inputY:

inputY = inputY.reshape([-1,1])

It works for me well. Don't think too much about the -1, it holds the place for the number or samples. And the 1 is actually the shape of your inputY, because you have 1 column for that. Although, the cost is always 1.0 so I don't know if that is what you want. Also, I changed the

这对我很有效。不要想太多-1,它占据了数字或样本的位置。1实际上是你的inputY的形状,因为你有1个列。虽然成本总是1.0,所以我不知道这是不是你想要的。同时,我改变了

learning_rate = 0.1 

because I think yours is too small. And the activation function:

因为我觉得你的太小了。和激活函数:

layer_1_output = tf.nn.sigmoid(y_values1)  

output = tf.nn.sigmoid(y_values)

With sigmoid I got decreasing cost, I assume is better. I think 2000 training steps are too many for this amount of data. After 1000 steps it doesn't learn too much. (Use 2000 if you have thousands of samples) For me, with 1000 steps, the cost is 0.609 at the first steps and 0.421 after 1000 steps.

由于s型的我得到了降低的成本,我认为是更好的。我认为2000年的培训对于这一数量的数据来说太多了。在1000步之后,它不会学到太多东西。(如果您有上千个样品,请使用2000个样品),1000个步骤,第一步的成本是0.609,1000步后是0.421。

So your code with my modifications looks like this (my csv file has 10 columns for the input and the 11th column is the output which contains 1 on the first three rows and then 0 on the others... just like yours)

我修改后的代码是这样的(我的csv文件有10列用于输入,第11列是输出,在前三行中包含1,其余的则是0)就像你的一样)

import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler

csv = np.loadtxt('/Users.../table.csv', delimiter = ',')
inputX = csv[:,0:10]
inputY = csv[:,10:]

inputX = MinMaxScaler().fit_transform(inputX)

n_samples = inputY.size
n_feature = inputX.shape[1]

# Parameters
learning_rate = 0.3
training_epochs = 1000
display_step = 50

#define placeholder
x = tf.placeholder(tf.float32, [None, n_feature])
y = tf.placeholder(tf.float32, [None, 1])

#For hidden layer 1
W1 = tf.Variable(tf.random_normal([n_feature, 20]))   
b1 = tf.Variable(tf.random_normal([20]))              
y_values1 = tf.add(tf.matmul(x, W1), b1)       
layer_1_output = tf.nn.sigmoid(y_values1)  

#For output layer

W2 = tf.Variable(tf.random_normal([20, 1]))        
b2 = tf.Variable(tf.random_normal([1]))              
y_values = tf.add(tf.matmul(layer_1_output, W2), b2)     
output = tf.nn.sigmoid(y_values)

# Cost function: Mean squared error
cost = tf.reduce_sum(tf.pow(y - output, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)


# Initialize variabls and tensorflow session
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for i in range(training_epochs):  

    sess.run(optimizer, feed_dict={x: inputX, y: inputY})

    if (i) % display_step == 0:
        cc = sess.run(cost, feed_dict={x: inputX, y:inputY})
        print("Training step:", '%04d' % (i), "cost=","{:.9f}".format(cc)) 

print("Optimization Finished!")

training_cost = sess.run(cost, feed_dict={x: inputX, y: inputY})
print("Training cost=", training_cost, '\n', "W1=",'\n', sess.run(W1),'\n', "b1=", '\n', sess.run(b1), '\n', "W2=",'\n', sess.run(W2),'\n', "b2=", '\n', sess.run(b2))

prediction = sess.run(output, feed_dict={x: inputX, y: inputY})
print("Output:\n", prediction)

acc = 1-np.sqrt(training_cost)
print("Acc = ", acc)