# 三、數據預處理
> 作者:[Chris Albon](https://chrisalbon.com/)
>
> 譯者:[飛龍](https://github.com/wizardforcel)
>
> 協議:[CC BY-NC-SA 4.0](http://creativecommons.org/licenses/by-nc-sa/4.0/)
## 為 Scikit-Learn 轉換 Pandas 類別數據
```py
# 導入所需的庫
from sklearn import preprocessing
import pandas as pd
raw_data = {'patient': [1, 1, 1, 2, 2],
'obs': [1, 2, 3, 1, 2],
'treatment': [0, 1, 0, 1, 0],
'score': ['strong', 'weak', 'normal', 'weak', 'strong']}
df = pd.DataFrame(raw_data, columns = ['patient', 'obs', 'treatment', 'score'])
# 創建標簽(類別)編碼對象
le = preprocessing.LabelEncoder()
# 使編碼器擬合 pandas 列
le.fit(df['score'])
# LabelEncoder()
# 查看標簽(如果你希望)
list(le.classes_)
# ['normal', 'strong', 'weak']
# 將擬合的編碼器應用于 pandas 列
le.transform(df['score'])
# array([1, 2, 0, 2, 1])
# 將一些整數轉換為它們的類別名稱
list(le.inverse_transform([2, 2, 1]))
# ['weak', 'weak', 'strong']
```
## 刪除帶缺失值的觀測
```py
# 加載庫
import numpy as np
import pandas as pd
# 創建特征矩陣
X = np.array([[1.1, 11.1],
[2.2, 22.2],
[3.3, 33.3],
[4.4, 44.4],
[np.nan, 55]])
# 移除帶缺失值的觀測
X[~np.isnan(X).any(axis=1)]
'''
array([[ 1.1, 11.1],
[ 2.2, 22.2],
[ 3.3, 33.3],
[ 4.4, 44.4]])
'''
```
## 刪除缺失值
```py
# 加載庫
import numpy as np
import pandas as pd
# 創建特征矩陣
X = np.array([[1, 2],
[6, 3],
[8, 4],
[9, 5],
[np.nan, 4]])
# 移除帶缺失值的觀測
X[~np.isnan(X).any(axis=1)]
array([[ 1., 2.],
[ 6., 3.],
[ 8., 4.],
[ 9., 5.]])
# 將數據加載為數據幀
df = pd.DataFrame(X, columns=['feature_1', 'feature_2'])
# 移除帶缺失值的觀測
df.dropna()
```
| | feature_1 | feature_2 |
| --- | --- | --- |
| 0 | 1.0 | 2.0 |
| 1 | 6.0 | 3.0 |
| 2 | 8.0 | 4.0 |
| 3 | 9.0 | 5.0 |
## 檢測離群點
```py
# 加載庫
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.datasets import make_blobs
# 創建模擬數據
X, _ = make_blobs(n_samples = 10,
n_features = 2,
centers = 1,
random_state = 1)
# 將第一個觀測值替換為異常值
X[0,0] = 10000
X[0,1] = 10000
```
`EllipticEnvelope`假設數據是正態分布的,并且基于該假設,在數據周圍“繪制”橢圓,將橢圓內的任何觀測分類為正常(標記為`1`),并將橢圓外的任何觀測分類為異常值(標記為`-1`)。 這種方法的一個主要限制是,需要指定一個`contamination`參數,該參數是異常觀測值的比例,這是我們不知道的值。
```py
# 創建檢測器
outlier_detector = EllipticEnvelope(contamination=.1)
# 擬合檢測器
outlier_detector.fit(X)
# 預測離群點
outlier_detector.predict(X)
# array([-1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
```
## 離散化特征
```py
# 加載庫
from sklearn.preprocessing import Binarizer
import numpy as np
# 創建特征
age = np.array([[6],
[12],
[20],
[36],
[65]])
# 創建二值化器
binarizer = Binarizer(18)
# 轉換特征
binarizer.fit_transform(age)
'''
array([[0],
[0],
[1],
[1],
[1]])
'''
# 對特征分箱
np.digitize(age, bins=[20,30,64])
'''
array([[0],
[0],
[1],
[2],
[3]])
'''
```
## 編碼序數類別特征
```py
# 加載庫
import pandas as pd
# 創建特征
df = pd.DataFrame({'Score': ['Low',
'Low',
'Medium',
'Medium',
'High']})
# 查看數據幀
df
```
| | Score |
| --- | --- |
| 0 | Low |
| 1 | Low |
| 2 | Medium |
| 3 | Medium |
| 4 | High |
### 創建比例映射
```py
# 創建映射器
scale_mapper = {'Low':1,
'Medium':2,
'High':3}
# 將特征值映射為比例
df['Scale'] = df['Score'].replace(scale_mapper)
# 查看數據幀
df
```
| | Score | Scale |
| --- | --- | --- |
| 0 | Low | 1 |
| 1 | Low | 1 |
| 2 | Medium | 2 |
| 3 | Medium | 2 |
| 4 | High | 3 |
## 使用下采樣處理不平衡類

在下采樣中,我們從多數類(即具有更多觀測值的類)中不放回隨機抽樣,來創建與少數類相等的新觀測子集。
```py
# 加載庫
import numpy as np
from sklearn.datasets import load_iris
# 加載鳶尾花數據
iris = load_iris()
# 創建特征矩陣
X = iris.data
# 創建目標向量
y = iris.target
# 移除前 40 個觀測
X = X[40:,:]
y = y[40:]
# 創建二元目標向量,表示是否是類 0
y = np.where((y == 0), 0, 1)
# 查看不平衡的目標向量
y
'''
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
'''
# 每個類別的觀測的下標
i_class0 = np.where(y == 0)[0]
i_class1 = np.where(y == 1)[0]
# 每個類別的觀測數量
n_class0 = len(i_class0)
n_class1 = len(i_class1)
# 對于類 0 的每個觀測,隨機從類 1 不放回采樣
i_class1_downsampled = np.random.choice(i_class1, size=n_class0, replace=False)
# 將類 0 的目標向量,和下采樣的類 1 的目標向量連接到一起
np.hstack((y[i_class0], y[i_class1_downsampled]))
# array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
```
## 使用上采樣處理不平衡類別

在上采樣中,對于多數類中的每個觀測,我們從少數類中帶放回隨機選擇觀測。 最終結果是來自少數類和多數類的觀測數量相同。
```py
# 加載庫
import numpy as np
from sklearn.datasets import load_iris
# 加載鳶尾花數據
iris = load_iris()
# 創建特征矩陣
X = iris.data
# 創建目標向量
y = iris.target
# 移除前 40 個觀測
X = X[40:,:]
y = y[40:]
# 創建二元目標向量,表示是否是類 0
y = np.where((y == 0), 0, 1)
# 查看不平衡的目標向量
y
'''
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
'''
# 每個類別的觀測的下標
i_class0 = np.where(y == 0)[0]
i_class1 = np.where(y == 1)[0]
# 每個類別的觀測數量
n_class0 = len(i_class0)
n_class1 = len(i_class1)
# 對于類 1 中的每個觀測,我們從類 0 中帶放回隨機選擇觀測。
i_class0_upsampled = np.random.choice(i_class0, size=n_class1, replace=True)
# 將類 0 的上采樣的目標向量,和類 1 的目標向量連接到一起
np.concatenate((y[i_class0_upsampled], y[i_class1]))
'''
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
'''
```
## 處理離群點

```py
# 加載庫
import pandas as pd
# 創建 DataFrame
houses = pd.DataFrame()
houses['Price'] = [534433, 392333, 293222, 4322032]
houses['Bathrooms'] = [2, 3.5, 2, 116]
houses['Square_Feet'] = [1500, 2500, 1500, 48000]
houses
```
| | Price | Bathrooms | Square_Feet |
| --- | --- | --- | --- |
| 0 | 534433 | 2.0 | 1500 |
| 1 | 392333 | 3.5 | 2500 |
| 2 | 293222 | 2.0 | 1500 |
| 3 | 4322032 | 116.0 | 48000 |
### 選擇 1:丟棄
```py
# 丟棄大于某個值的觀測
houses[houses['Bathrooms'] < 20]
```
| | Price | Bathrooms | Square_Feet |
| --- | --- | --- | --- |
| 0 | 534433 | 2.0 | 1500 |
| 1 | 392333 | 3.5 | 2500 |
| 2 | 293222 | 2.0 | 1500 |
### 選擇 2:標記
```py
# 加載庫
import numpy as np
# 基于布爾條件創建特征
houses['Outlier'] = np.where(houses['Bathrooms'] < 20, 0, 1)
# 展示數據
houses
```
| | Price | Bathrooms | Square_Feet | Outlier |
| --- | --- | --- | --- | --- |
| 0 | 534433 | 2.0 | 1500 | 0 |
| 1 | 392333 | 3.5 | 2500 | 0 |
| 2 | 293222 | 2.0 | 1500 | 0 |
| 3 | 4322032 | 116.0 | 48000 | 1 |
### 選擇 3:重縮放
```py
# 對數特征
houses['Log_Of_Square_Feet'] = [np.log(x) for x in houses['Square_Feet']]
# 展示數據
houses
```
| | Price | Bathrooms | Square_Feet | Outlier | Log_Of_Square_Feet |
| --- | --- | --- | --- | --- | --- |
| 0 | 534433 | 2.0 | 1500 | 0 | 7.313220 |
| 1 | 392333 | 3.5 | 2500 | 0 | 7.824046 |
| 2 | 293222 | 2.0 | 1500 | 0 | 7.313220 |
| 3 | 4322032 | 116.0 | 48000 | 1 | 10.778956 |
# 使用均值填充缺失值
均值插補用該特征/變量的平均值替換缺失值。 平均插補是最“樸素”的插補方法之一,因為不像 k 最近鄰居插補這樣的更復雜的方法,它不會使用觀測的信息來估計它的值。
```py
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
# 創建空數據集
df = pd.DataFrame()
# 創建兩個變量,叫做 x0 和 x1
# 使 x1 的第一個值為缺失值
df['x0'] = [0.3051,0.4949,0.6974,0.3769,0.2231,0.341,0.4436,0.5897,0.6308,0.5]
df['x1'] = [np.nan,0.2654,0.2615,0.5846,0.4615,0.8308,0.4962,0.3269,0.5346,0.6731]
# 觀察數據集
df
```
| | x0 | x1 |
| --- | --- | --- |
| 0 | 0.3051 | NaN |
| 1 | 0.4949 | 0.2654 |
| 2 | 0.6974 | 0.2615 |
| 3 | 0.3769 | 0.5846 |
| 4 | 0.2231 | 0.4615 |
| 5 | 0.3410 | 0.8308 |
| 6 | 0.4436 | 0.4962 |
| 7 | 0.5897 | 0.3269 |
| 8 | 0.6308 | 0.5346 |
| 9 | 0.5000 | 0.6731 |
### 擬合填充器
```py
# 創建一個填充器對象,它尋找 NaN 值,之后將它們按列替換為特征的均值
mean_imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
# 在 df 數據及上訓練填充器
mean_imputer = mean_imputer.fit(df)
# 將填充器應用于 df 數據集
imputed_df = mean_imputer.transform(df.values)
# 查看數據
imputed_df
'''
array([[ 0.3051 , 0.49273333],
[ 0.4949 , 0.2654 ],
[ 0.6974 , 0.2615 ],
[ 0.3769 , 0.5846 ],
[ 0.2231 , 0.4615 ],
[ 0.341 , 0.8308 ],
[ 0.4436 , 0.4962 ],
[ 0.5897 , 0.3269 ],
[ 0.6308 , 0.5346 ],
[ 0.5 , 0.6731 ]])
'''
```
請注意,`0.49273333`是估算值,取代了`np.NaN`值。
## 填充缺失的類標簽
```py
# 加載庫
import numpy as np
from sklearn.preprocessing import Imputer
# 創建帶有類別特征的特征矩陣
X = np.array([[0, 2.10, 1.45],
[1, 1.18, 1.33],
[0, 1.22, 1.27],
[0, -0.21, -1.19],
[np.nan, 0.87, 1.31],
[np.nan, -0.67, -0.22]])
# 創建填充器對象
imputer = Imputer(strategy='most_frequent', axis=0)
# 使用最頻繁的類別填充缺失值
imputer.fit_transform(X)
'''
array([[ 0. , 2.1 , 1.45],
[ 1. , 1.18, 1.33],
[ 0. , 1.22, 1.27],
[ 0. , -0.21, -1.19],
[ 0. , 0.87, 1.31],
[ 0. , -0.67, -0.22]])
'''
```
## 使用 KNN 填充缺失類別
```py
# 加載庫
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
# 創建帶有類別特征的特征矩陣
X = np.array([[0, 2.10, 1.45],
[1, 1.18, 1.33],
[0, 1.22, 1.27],
[1, -0.21, -1.19]])
# 創建類別特征有缺失的特征矩陣
X_with_nan = np.array([[np.nan, 0.87, 1.31],
[np.nan, -0.67, -0.22]])
# 訓練 KNN 學習器
clf = KNeighborsClassifier(3, weights='distance')
trained_model = clf.fit(X[:,1:], X[:,0])
# 預測缺失值的類別
imputed_values = trained_model.predict(X_with_nan[:,1:])
# 將預測分類的列和它們的其它特征連接
X_with_imputed = np.hstack((imputed_values.reshape(-1,1), X_with_nan[:,1:]))
# 連接兩個特征矩陣
np.vstack((X_with_imputed, X))
'''
array([[ 0. , 0.87, 1.31],
[ 1. , -0.67, -0.22],
[ 0. , 2.1 , 1.45],
[ 1. , 1.18, 1.33],
[ 0. , 1.22, 1.27],
[ 1. , -0.21, -1.19]])
'''
```
## 觀測正則化

```py
# 加載庫
from sklearn.preprocessing import Normalizer
import numpy as np
# 創建特征矩陣
X = np.array([[0.5, 0.5],
[1.1, 3.4],
[1.5, 20.2],
[1.63, 34.4],
[10.9, 3.3]])
```
`Normalizer`重縮放各個觀側,使其具有單位范數(長度之和為 1)。
```py
# 創建正則化器
normalizer = Normalizer(norm='l2')
# 轉換特征矩陣
normalizer.transform(X)
'''
array([[ 0.70710678, 0.70710678],
[ 0.30782029, 0.95144452],
[ 0.07405353, 0.99725427],
[ 0.04733062, 0.99887928],
[ 0.95709822, 0.28976368]])
'''
```
## 多個標簽的獨熱編碼特征
```py
# 加載庫
from sklearn.preprocessing import MultiLabelBinarizer
import numpy as np
# 創建 NumPy 數組
y = [('Texas', 'Florida'),
('California', 'Alabama'),
('Texas', 'Florida'),
('Delware', 'Florida'),
('Texas', 'Alabama')]
# 創建 MultiLabelBinarizer 對象
one_hot = MultiLabelBinarizer()
# 獨熱編碼數據
one_hot.fit_transform(y)
'''
array([[0, 0, 0, 1, 1],
[1, 1, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 0],
[1, 0, 0, 0, 1]])
'''
# 查看類別
one_hot.classes_
# array(['Alabama', 'California', 'Delware', 'Florida', 'Texas'], dtype=object)
```
## 獨熱編碼標稱類別特征

```py
# 加載庫
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
# 創建 NumPy 數組
x = np.array([['Texas'],
['California'],
['Texas'],
['Delaware'],
['Texas']])
# 創建 LabelBinzarizer 對象
one_hot = LabelBinarizer()
# 獨熱編碼數據
one_hot.fit_transform(x)
'''
array([[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1]])
'''
# 查看類別
one_hot.classes_
'''
array(['California', 'Delaware', 'Texas'],
dtype='<U10')
'''
# 虛擬特征
pd.get_dummies(x[:,0])
```
| | California | Delaware | Texas |
| --- | --- | --- | --- |
| 0 | 0 | 0 | 1 |
| 1 | 1 | 0 | 0 |
| 2 | 0 | 0 | 1 |
| 3 | 0 | 1 | 0 |
| 4 | 0 | 0 | 1 |
# 預處理類別特征
通常,機器學習方法(例如邏輯回歸,具有線性核的 SVM 等)將要求將類別變量轉換為虛擬變量(也稱為獨熱編碼)。 例如,單個特征`Fruit`將被轉換為三個特征,`Apples`,`Oranges`和`Bananas`,類別特征中的每個類別一個。
有一些常用的方法可以預處理分類特征:使用 pandas 或 scikit-learn。
```py
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
import pandas as pd
raw_data = {'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze'],
'age': [42, 52, 36, 24, 73],
'city': ['San Francisco', 'Baltimore', 'Miami', 'Douglas', 'Boston']}
df = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'city'])
df
```
| | first_name | last_name | age | city |
| --- | --- | --- | --- | --- |
| 0 | Jason | Miller | 42 | San Francisco |
| 1 | Molly | Jacobson | 52 | Baltimore |
| 2 | Tina | Ali | 36 | Miami |
| 3 | Jake | Milner | 24 | Douglas |
| 4 | Amy | Cooze | 73 | Boston |
```py
# 為 df.city 中的每個獨特的類別創建虛擬變量
pd.get_dummies(df["city"])
```
| | Baltimore | Boston | Douglas | Miami | San Francisco |
| --- | --- | --- | --- | --- | --- |
| 0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 |
| 1 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 |
| 2 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 |
| 3 | 0.0 | 0.0 | 1.0 | 0.0 | 0.0 |
| 4 | 0.0 | 1.0 | 0.0 | 0.0 | 0.0 |
```py
# 將字符串類別變量轉換為整數
integerized_data = preprocessing.LabelEncoder().fit_transform(df["city"])
# 查看數據
integerized_data
# array([4, 0, 3, 2, 1])
# 將整數類別表示為獨熱編碼
preprocessing.OneHotEncoder().fit_transform(integerized_data.reshape(-1,1)).toarray()
'''
array([[ 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 0., 0., 0.]])
'''
```
請注意,`pd.get_dummies()`和 scikit 方法的輸出會生成相同的輸出矩陣。
## 預處理鳶尾花數據
```py
from sklearn import datasets
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
# 加載鳶尾花數據
iris = datasets.load_iris()
# 為特征數據創建變量
X = iris.data
# 為目標數據創建標簽
y = iris.target
# 隨機將數據分成四個新數據集,訓練特征,訓練結果,測試特征,
# 和測試結果。 將測試數據的大小設置為完整數據集的 30%。
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# 加載標準化縮放器
sc = StandardScaler()
# 基于訓練數據計算均值和標準差
sc.fit(X_train)
# 將訓練數據縮放為均值 0 和單位標準差
X_train_std = sc.transform(X_train)
# 將測試數據縮放為均值 0 和單位標準差
X_test_std = sc.transform(X_test)
# 測試數據的特征,非標準化
X_test[0:5]
'''
array([[ 6.1, 2.8, 4.7, 1.2],
[ 5.7, 3.8, 1.7, 0.3],
[ 7.7, 2.6, 6.9, 2.3],
[ 6. , 2.9, 4.5, 1.5],
[ 6.8, 2.8, 4.8, 1.4]])
'''
# 測試數據的特征,標準化
X_test_std[0:5]
'''
array([[ 0.3100623 , -0.49582097, 0.48403749, -0.05143998],
[-0.17225683, 1.92563026, -1.26851205, -1.26670948],
[ 2.23933883, -0.98011121, 1.76924049, 1.43388941],
[ 0.18948252, -0.25367584, 0.36720086, 0.35364985],
[ 1.15412078, -0.49582097, 0.54245581, 0.21861991]])
'''
```
## 特征重縮放

```py
# 加載庫
from sklearn import preprocessing
import numpy as np
# 創建特征
x = np.array([[-500.5],
[-100.1],
[0],
[100.1],
[900.9]])
# 創建縮放器
minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1))
# 縮放特征
x_scale = minmax_scale.fit_transform(x)
# 展示特征
x_scale
'''
array([[ 0. ],
[ 0.28571429],
[ 0.35714286],
[ 0.42857143],
[ 1. ]])
'''
```
## 標準化特征

```py
# 加載庫
from sklearn import preprocessing
import numpy as np
# 創建特征
x = np.array([[-500.5],
[-100.1],
[0],
[100.1],
[900.9]])
# 創建縮放器
scaler = preprocessing.StandardScaler()
# 轉換特征
standardized = scaler.fit_transform(x)
# 展示特征
standardized
'''
array([[ 0. ],
[ 0.28571429],
[ 0.35714286],
[ 0.42857143],
[ 1. ]])
'''
```