In [1]:
from sklearn import datasets
digits = datasets.load_digits()
In [2]:
digits
Out[2]:
{‘DESCR’: “Optical Recognition of Handwritten Digits Data Set\n===================================================\n\nNotes\n—–\nData Set Characteristics:\n :Number of Instances: 5620\n :Number of Attributes: 64\n :Attribute Information: 8×8 image of integer pixels in the range 0..16.\n :Missing Attribute Values: None\n :Creator: E. Alpaydin (alpaydin ‘@’ boun.edu.tr)\n :Date: July; 1998\n\nThis is a copy of the test set of the UCI ML hand-written digits datasets\nhttp://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\nThe data set contains images of hand-written digits: 10 classes where\neach class refers to a digit.\n\nPreprocessing programs made available by NIST were used to extract\nnormalized bitmaps of handwritten digits from a preprinted form. From a\ntotal of 43 people, 30 contributed to the training set and different 13\nto the test set. 32×32 bitmaps are divided into nonoverlapping blocks of\n4x4 and the number of on pixels are counted in each block. This generates\nan input matrix of 8×8 where each element is an integer in the range\n0..16. This reduces dimensionality and gives invariance to small\ndistortions.\n\nFor info on NIST preprocessing routines, see M. D. Garris, J. L. Blue, G.\nT. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C.\nL. Wilson, NIST Form-Based Handprint Recognition System, NISTIR 5469,\n1994.\n\nReferences\n———-\n – C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their\n Applications to Handwritten Digit Recognition, MSc Thesis, Institute of\n Graduate Studies in Science and Engineering, Bogazici University.\n – E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika.\n – Ken Tang and Ponnuthurai N. Suganthan and Xi Yao and A. Kai Qin.\n Linear dimensionalityreduction using relevance weighted LDA. School of\n Electrical and Electronic Engineering Nanyang Technological University.\n 2005.\n – Claudio Gentile. A New Approximate Maximal Margin Classification\n Algorithm. NIPS. 2000.\n”,
‘data’: array([[ 0., 0., 5., …, 0., 0., 0.],
[ 0., 0., 0., …, 10., 0., 0.],
[ 0., 0., 0., …, 16., 9., 0.],
…,
[ 0., 0., 1., …, 6., 0., 0.],
[ 0., 0., 2., …, 12., 0., 0.],
[ 0., 0., 10., …, 12., 1., 0.]]),
‘images’: array([[[ 0., 0., 5., …, 1., 0., 0.],
[ 0., 0., 13., …, 15., 5., 0.],
[ 0., 3., 15., …, 11., 8., 0.],
…,
[ 0., 4., 11., …, 12., 7., 0.],
[ 0., 2., 14., …, 12., 0., 0.],
[ 0., 0., 6., …, 0., 0., 0.]],
[[ 0., 0., 0., …, 5., 0., 0.],
[ 0., 0., 0., …, 9., 0., 0.],
[ 0., 0., 3., …, 6., 0., 0.],
…,
[ 0., 0., 1., …, 6., 0., 0.],
[ 0., 0., 1., …, 6., 0., 0.],
[ 0., 0., 0., …, 10., 0., 0.]],
[[ 0., 0., 0., …, 12., 0., 0.],
[ 0., 0., 3., …, 14., 0., 0.],
[ 0., 0., 8., …, 16., 0., 0.],
…,
[ 0., 9., 16., …, 0., 0., 0.],
[ 0., 3., 13., …, 11., 5., 0.],
[ 0., 0., 0., …, 16., 9., 0.]],
…,
[[ 0., 0., 1., …, 1., 0., 0.],
[ 0., 0., 13., …, 2., 1., 0.],
[ 0., 0., 16., …, 16., 5., 0.],
…,
[ 0., 0., 16., …, 15., 0., 0.],
[ 0., 0., 15., …, 16., 0., 0.],
[ 0., 0., 2., …, 6., 0., 0.]],
[[ 0., 0., 2., …, 0., 0., 0.],
[ 0., 0., 14., …, 15., 1., 0.],
[ 0., 4., 16., …, 16., 7., 0.],
…,
[ 0., 0., 0., …, 16., 2., 0.],
[ 0., 0., 4., …, 16., 2., 0.],
[ 0., 0., 5., …, 12., 0., 0.]],
[[ 0., 0., 10., …, 1., 0., 0.],
[ 0., 2., 16., …, 1., 0., 0.],
[ 0., 0., 15., …, 15., 0., 0.],
…,
[ 0., 4., 16., …, 16., 6., 0.],
[ 0., 8., 16., …, 16., 8., 0.],
[ 0., 1., 8., …, 12., 1., 0.]]]),
‘target’: array([0, 1, 2, …, 8, 9, 8]),
‘target_names’: array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}
In [3]:
X = digits.data # Sepal and pedal negth and width
y = digits.target
In [4]:
X.shape
Out[4]:
(1797, 64)
In [5]:
y.shape
Out[5]:
(1797,)
In [6]:
import numpy as np
from collections import Counter
Counter(y)
Out[6]:
Counter({0: 178,
1: 182,
2: 177,
3: 183,
4: 181,
5: 182,
6: 181,
7: 179,
8: 174,
9: 180})
In [7]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=52)
In [8]:
X_train.shape
Out[8]:
(1437, 64)
In [9]:
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
Out[9]:
RandomForestClassifier(bootstrap=True, class_weight=None, criterion=’gini’,
max_depth=None, max_features=’auto’, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
In [12]:
y_predict = clf.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, y_predict)*100)
95.0
In [14]:
import matplotlib.pyplot as plt
plt.imshow(digits.images[0])
plt.show()