コメント一覧
まえへ つぎへ99 投稿者:
名無しさん 2020/01/22 10:21:37
100 投稿者:
名無しさん 2020/01/22 10:21:48
import os, sys, math
from sklearn import datasets, svm
from sklearn.externals import joblib
DIGITS_PKL = "digit-clf.pkl"
def train_digits():
digits = datasets.load_digits()
data_train = digits.data
label_train =digitas.target
clf = svm.SVC(gamma=0.001)
clf.fit(data_train, lael_train)
joblib.dump(clf,DIGITS_PKL)
print("予測モデルを保存しました=",DIGITS_PKL)
return clf
def predict_digits(data):
if not os.path.exists(DIGITS_PKL):
clf = train_digits()
clf = joblib.load(DIGITS_PKL)
n = clf.predict([data])
prnt("判定結果=",n)
def image_to_data(imagefile):
import numpy as np
from PIL import Image
image = Image.open(imagefile).convert("L")
image = image.resize((8,8),Image.ANTIALIAS)
img = np.asarray(image, dtype = float)
img = np.fllor(16-16*(img/256))
import matplotlib.pyplot as plt
plt.imshow(img)
plt.gray()
plt.show()
img = img.flatten()
print(img)
return img
def main():
if len(sys.argv) <=1:
print("USAGE:")
print("python3 predict_digit.py imagefile")
return
imagefile = sys.argv[1]
data = image_to_data(imagefile)
predict_digits(data)
if __name__=='__main__':
main()
101 投稿者:
名無しさん 2020/01/22 11:51:23
102 投稿者:
名無しさん 2020/01/22 13:05:22
103 投稿者:
名無しさん 2020/01/22 13:07:49
#include<stdio.h>
#include<math.h>
#define N 40
float f1(float x,float y,float z)
{
return((-y/5.0)+(z/20.0));
}
float f2(float x,float y,float z)
{
return((y/5.0)-(z/5.0));
}
int main(void)
{
int i;
float x[N+1],y[N+1],z[N+1],X,a,b,a0,b0,a1,b1,a2,b2,a3,b3,h=0.2;
x[0]=0.0;
y[0]=60.0;
z[0]=0.0;
printf(" x , y , z \n");
for(i=0;i<11;i++){
a0=h*f1(x[i],y[i],z[i]);
b0=h*f2(x[i],y[i],z[i]);
a1=h*f1(x[i]+h/2.0,y[i]+a0/2.0,z[i]+b0/2.0);
b1=h*f2(x[i]+h/2.0,y[i]+a0/2.0,z[i]+b0/2.0);
a2=h*f1(x[i]+h/2.0,y[i]+a1/2.0,z[i]+b1/2.0);
b2=h*f2(x[i]+h/2.0,y[i]+a1/2.0,z[i]+b1/2.0);
a3=h*f1(x[i]+h,y[i]+a2,z[i]+b2);
b3=h*f2(x[i]+h,y[i]+a2,z[i]+b2);
a=(a0+2.0*a1+2.0*a2+a3)/6.0;
b=(b0+2.0*b1+2.0*b2+b3)/6.0;
x[i+1]=x[i]+h;
y[i+1]=y[i]+a;
z[i+1]=z[i]+b;
printf("%8.5f %8.5f %8.5f\n",x[i],y[i],z[i]);
}
}
/*実行結果
x , y , z
0.00000 60.00000 0.00000
0.20000 57.65890 2.30605
0.40000 55.43130 4.43214
0.60000 53.31104 6.38966
0.80000 51.29233 8.18931
1.00000 49.36967 9.84115
1.20000 47.53790 11.35464
1.40000 45.79215 12.73868
1.60000 44.12782 14.00162
1.80000 42.54055 15.15131
2.00000 41.02627 16.19514
*/
104 投稿者:
名無しさん 2020/01/29 09:23:37
from sklearn import model selection, svm, metrics! import matplotlib.pyplot as pltl import pandas as pd plt.style.use('ggplot')4 717- zICSV) EA t 1(1*) delimiter=";") pd.read csv("winequality-red.csv" !! ["fixed acidity","volatile acidity","citric acid", = sa "chlorides "free sulfur dioxide" y = "residual sugar "total sulfur dioxide", "density", "pH", "sulphates "alcohol"] wine["quality"] # labell fis, axn = plt.subplots(11, sharey-True). for i, name enumerate(names):1 axn[i].set title(name) axn[i].scatter (wine[name], y)4 plt.show(04
105 投稿者:
名無しさん 2020/01/29 09:32:36
from sklearn import model selection, svm, metrics! import matplotlib.pyplot as pltl import pandas as pd plt.style.use('ggplot')4 717- zICSV) EA t 1(1*) delimiter=";") pd.read csv("winequality-red.csv" !! ["fixed acidity","volatile acidity","citric acid", = sa "chlorides "free sulfur dioxide" y = "residual sugar "total sulfur dioxide", "density", "pH", "sulphates "alcohol"] wine["quality"] # labell fis, axn = plt.subplots(11, sharey-True). for i, name enumerate(names):1 axn[i].set title(name) axn[i].scatter (wine[name], y)4 plt.show(04
106 投稿者:
名無しさん 2020/01/29 09:32:56
01 from sklearn import model selection, svm, metrics! import matplotlib.pyplot as pltl import pandas as pdl from sklearn.decomposition import TruncatedSVD from sklearn.decomposition import PCAJ # 717- (CSV) t wine = pd.read_csv("winequality-white.csv", delimiter=";") 1(1*) XOL "total sulfur dioxide "alcohol"]] # datal wine["quality"] # label4 wine[["fixed acidity","volatile acidity","citric acid", "residual sugar","chlorides !! "free sulfur dioxide' density' y = sulphates",t Hd # E comp = TruncatedSVD(n_components=2) X_reduced = comp.fit_transform(X) # plt.style.use('ggplot') plt.scatter (4 X_reduced[: ,0], 4 X_reduced[:,1], 4 plt.show() s=y*3, cmap=" Reds")
107 投稿者:
名無しさん 2020/02/07 23:31:46
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as pyplot
im_rows = 28
im_cols = 28
im_color = 1
in_shape = (im_rows, im_cols, im_color)
out_size = 10
(X_train, y_train),(X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1, im_rows, im_cols, im_color)
X_train = X_train.astype('float32') / 255
X_test = X_test.reshape(-1, im_rows, im_cols, im_color)
X_test = X_test.astype('float32') / 255
y_train = leras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)
model = Sequential()
model.add(Conv2D(32,
kernel_size=(3,3),
activation='relu',
input_shape=in_shape))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(out_size,activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
hist = model.fit(X_train, y_train,
batch_size=128,
epochs=12,
verbose=1,
validation_data=(X_test,y_test))
score = model.evaluate(X_test, y_test, verbose=1)
print('正解率=' score[1], 'loss=',score[0])
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Accuracy')
plt.legend(['train','test'],loc='upper left')
plt.plot(hlist.history['loss'])
plt.plot(hlist.history['val_loss])
plt.title('Loss')
plt.legend(['train','test'],loc='upper left')
plt.show()
108 投稿者:
名無しさん 2020/02/08 17:59:28
#
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as plt
#
in_size = 28 * 28
out_size = 10
#
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#
X_train = X_train.reshape(-1, 784).astype('float32') / 255
X_test = X_test.reshape(-1,784).astype('float32') / 255
#
y_train = keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)
#
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(in_size,)))
model.add(Dropout(0.2))
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(out_size, activation='softmax'))
#
model.compile(
loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
#
hist = model.fit(X_train, y_train,
batch_size=128,
epochs=50,
verbose=1,
validation_data=(X_test, y_test))
#
score = model.evaluate(X_test, y_test, verbose=1)
print('正解率=',score[1],'loss=',score[0])
#
#
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Accuracy')
plt.legend(['train','test'],loc = 'upper left')
plt.show()
#
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train','test'],loc='upper left')
plt.show()