-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMyPreprocessing.py
More file actions
76 lines (65 loc) · 2.69 KB
/
MyPreprocessing.py
File metadata and controls
76 lines (65 loc) · 2.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#from scipy.io.arff import loadarff
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
class MyPreprocessing(BaseEstimator, TransformerMixin):
def __init__(self, bins_no=5):
self.bins_no = bins_no
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def fit(self, data):
df = pd.DataFrame(data)
df = df.replace(b'?', np.nan)
df = df.replace('?', np.nan)
# rename class column to be consistent
df.columns = df.columns[:-1].values.tolist() + ['Class']
# get label
labels = df.iloc[:, -1]
if labels.dtype == np.object:
self.labels_ = pd.DataFrame(labels.apply(
lambda x: x.decode('utf-8')))
else:
self.labels_ = pd.DataFrame(labels.apply(str))
# remove labels
df = df.drop(df.columns[len(df.columns) - 1], axis=1)
#nan_cols = df.loc[:, df.isna().any()].columns
df_obj = df.select_dtypes(include='object')
# handle byte numerical data
for col in df_obj.columns:
try:
int_col = df_obj.loc[:,col].apply(np.float64)
df_obj = df_obj.drop([col], axis=1)
df.loc[:, col] = int_col
except ValueError:
df_obj.loc[:, col] = df_obj.loc[:,col].fillna(df_obj.loc[:,col].mode()[0])
df_obj.loc[:, col] = df_obj.loc[:,col].apply(lambda x: x.decode('utf-8'))
df_num = df.select_dtypes(exclude='object')
df_num_process = pd.DataFrame()
#df_num = df_num.replace(np.NaN, 0)
for col in df_num.columns:
df_num.loc[:, col] = df_num.loc[:,col].fillna(df_num.loc[:, col].mean())
col_discrete = pd.DataFrame(
pd.cut(df_num.loc[:, col],
self.bins_no,
labels=[f'{col}_{i+1}' for i in range(self.bins_no)],
duplicates='drop'), columns=[col])
df_num_process = pd.concat([df_num_process, col_discrete], axis=1)
self.new_df = pd.concat(
[df_obj, df_num_process, self.labels_],
axis=1, sort=False)
#
#print(df.select_dtypes(exclude='object'))
#print(df.select_dtypes(include='object'))
#plt.interactive(False)
#plt.show(block=True)
##
#print(agg_clustering(df_preprocess, 'Single', 3))
#agg = AgglomerativeClustering(n_clusters=2, linkage='complete')
#print(agg.fit_predict(df_preprocess))
#data, meta = loadarff('datasets/adult-test.arff')
#preprocess = MyPreprocessing(data)
#preprocess.fit()
#print(preprocess.new_df)