日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

机器学习实战-集成学习-23

發布時間:2024/9/15 编程问答 23 豆豆
生活随笔 收集整理的這篇文章主要介紹了 机器学习实战-集成学习-23 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

集成學習-泰坦尼克號船員獲救預測

import pandas titanic = pandas.read_csv("titanic_train.csv") titanic

# 空余的age填充整體age的中值 titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) print(titanic.describe())


print(titanic["Sex"].unique())# 把male變成0,把female變成1 titanic.loc[titanic["Sex"] == "male", "Sex"] = 0 titanic.loc[titanic["Sex"] == "female", "Sex"] = 1


print(titanic["Embarked"].unique()) # 數據填充 titanic["Embarked"] = titanic["Embarked"].fillna('S') # 把類別變成數字 titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0 titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1 titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2


from sklearn.preprocessing import StandardScaler# 選定特征 predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"] x_data = titanic[predictors] y_data = titanic["Survived"]# 數據標準化 scaler = StandardScaler() x_data = scaler.fit_transform(x_data)

邏輯回歸

from sklearn import model_selection from sklearn.linear_model import LogisticRegression# 邏輯回歸模型 LR = LogisticRegression() # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(LR, x_data, y_data, cv=3) # 求平均 print(scores.mean())


神經網絡

from sklearn.neural_network import MLPClassifier# 建模 mlp = MLPClassifier(hidden_layer_sizes=(20,10),max_iter=1000) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(mlp, x_data, y_data, cv=3) # 求平均 print(scores.mean())


KNN

from sklearn import neighborsknn = neighbors.KNeighborsClassifier(21) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(knn, x_data, y_data, cv=3) # 求平均 print(scores.mean())


決策樹

from sklearn import tree# 決策樹模型 dtree = tree.DecisionTreeClassifier(max_depth=5, min_samples_split=4) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(dtree, x_data, y_data, cv=3) # 求平均 print(scores.mean())


隨機森林

# 隨機森林 from sklearn.ensemble import RandomForestClassifierRF1 = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(RF1, x_data, y_data, cv=3) # 求平均 print(scores.mean())

RF2 = RandomForestClassifier(n_estimators=100, min_samples_split=4) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(RF2, x_data, y_data, cv=3) # 求平均 print(scores.mean())


Bagging

from sklearn.ensemble import BaggingClassifierbagging_clf = BaggingClassifier(RF2, n_estimators=20) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(bagging_clf, x_data, y_data, cv=3) # 求平均 print(scores.mean())


Adaboost

from sklearn.ensemble import AdaBoostClassifier# AdaBoost模型 adaboost = AdaBoostClassifier(bagging_clf,n_estimators=10) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(adaboost, x_data, y_data, cv=3) # 求平均 print(scores.mean())


Stacking

from sklearn.ensemble import VotingClassifier from mlxtend.classifier import StackingClassifier sclf = StackingClassifier(classifiers=[bagging_clf, mlp, LR], meta_classifier=LogisticRegression())sclf2 = VotingClassifier([('adaboost',adaboost), ('mlp',mlp), ('LR',LR),('knn',knn),('dtree',dtree)]) # 計算交叉驗證的誤差 scores = model_selection.cross_val_score(sclf2, x_data, y_data, cv=3) # 求平均 print(scores.mean())

集成學習-乳腺癌預測

import numpy as np import pandas as pdimport seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") df = pd.read_csv("data.csv") df.head()


df = df.drop('id', axis=1) df.diagnosis.unique()

df['diagnosis'] = df['diagnosis'].map({'M':1,'B':0}) df.head()

df.describe()

# 畫熱力圖,數值為兩個變量之間的相關系數 plt.figure(figsize=(20,20)) p=sns.heatmap(df.corr(), annot=True ,square=True) plt.show()

# 查看標簽分布 print(df.diagnosis.value_counts()) # 使用柱狀圖的方式畫出標簽個數統計 p=df.diagnosis.value_counts().plot(kind="bar") plt.show()

# 獲取訓練數據和標簽 x_data = df.drop(['diagnosis'], axis=1) y_data = df['diagnosis'] from sklearn.model_selection import train_test_split # 切分數據集,stratify=y表示切分后訓練集和測試集中的數據類型的比例跟切分前y中的比例一致 # 比如切分前y中0和1的比例為1:2,切分后y_train和y_test中0和1的比例也都是1:2 x_train,x_test,y_train,y_test = train_test_split(x_data, y_data, test_size=0.3, stratify=y_data) from sklearn.metrics import accuracy_score from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifierclassifiers = [KNeighborsClassifier(3),LogisticRegression(),MLPClassifier(hidden_layer_sizes=(20,50),max_iter=10000),DecisionTreeClassifier(),RandomForestClassifier(max_depth=9,min_samples_split=3),AdaBoostClassifier(),BaggingClassifier(),]log = [] for clf in classifiers:clf.fit(x_train, y_train)name = clf.__class__.__name__print("="*30)print(name)print('****Results****')test_predictions = clf.predict(x_test)acc = accuracy_score(y_test, test_predictions)print("Accuracy: {:.4%}".format(acc))log.append([name, acc*100])print("="*30)

log = pd.DataFrame(log) log

log.rename(columns={0: 'Classifier', 1:'Accuracy'}, inplace=True) sns.barplot(x='Accuracy', y='Classifier', data=log, color="b")plt.xlabel('Accuracy %') plt.title('Classifier Accuracy') plt.show()

總結

以上是生活随笔為你收集整理的机器学习实战-集成学习-23的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。