本文思路:
https://blog.csdn.net/u013788252/article/details/105528116
(运行环境: jupyter notebook python3)
完善后的完整代码:
下面展示 完整代码
。
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
from sklearn.model_selection import train_test_split
path=r'C:\Users\Administrator\Desktop\titanic.csv'
data = pd.read_csv(path)
data['Survived'].value_counts().plot.pie(autopct='%0.2f%%')
data[['Sex','Survived']].groupby(['Sex']).mean().plot.bar()
data[['Pclass','Survived']].groupby(['Pclass']).mean().plot.bar()
bins = [0, 12, 18, 65, 100]
data['Age_group'] = pd.cut(data['Age'], bins)
by_age = data.groupby('Age_group')['Survived'].mean()
by_age.plot.bar()
fig, ax = plt.subplots(1, 2, figsize = (18, 8))
sns.violinplot("Pclass", "Age", hue="Survived", data=data, split=True, ax=ax[0])
ax[0].set_title('Pclass and Age vs Survived')
sns.violinplot("Sex", "Age", hue="Survived", data=data, split=True, ax=ax[1])
ax[1].set_title('Sex and Age vs Survived')
plt.show()
data.Embarked[data.Embarked.isnull()] = data.Embarked.dropna().mode().values
from sklearn.ensemble import RandomForestRegressor
age_df = data[['Age','Survived','Fare', 'Parch', 'SibSp', 'Pclass']]
age_df_notnull = age_df.loc[(data['Age'].notnull())]
age_df_isnull = age_df.loc[(data['Age'].isnull())]
X = age_df_notnull.values[:,1:]
Y = age_df_notnull.values[:,0]
RFR = RandomForestRegressor(n_estimators=1000, n_jobs=-1)
RFR.fit(X,Y)
predictAges = RFR.predict(age_df_isnull.values[:,1:])
data.loc[data['Age'].isnull(), ['Age']]= predictAges
df =pd.get_dummies(data)
x=df.drop('Survived',1)
y=df.Survived
x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.2, random_state=0)
lr = LogisticRegressionCV(multi_class='ovr',fit_intercept=True, Cs=np.logspace(-2, 2, 20), cv=2, penalty='l2', solver='lbfgs', tol=0.01)
re=lr.fit(x_train, y_train)
print("准确率:",lr.score(x_test, y_test))
(截止上一步已完成了建模与准确率测算)
本部分结合参考:https://blog.csdn.net/weixin_43532000/article/details/108086363
分析中用到的相关库
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
seed =2020
PassengerId => 乘客ID
Survived=>是否生存下来
Pclass => 乘客等级(1/2/3等舱位)
Name => 乘客姓名
Sex => 性别
Age => 年龄
SibSp => 堂兄弟/妹个数
Parch => 父母与小孩个数
Ticket => 船票信息
Fare => 票价
Cabin => 客舱
Embarked => 登船港口
如下代码也可查看数据情况: 代码片
。
def _data_info(data,categorical_features):
print('number of train examples = {}'.format(data.shape[0]))
print('number of train Shape = {}'.format(data.shape))
print('Features={}'.format(data.columns))
print('\n--------输出类别特征的种类--------')
for i in categorical_features:
if i in list(data.columns):
print("train:"+i+":",list(data[i].unique()))
print('\n--------缺失值--------')
missing = data.isnull().sum()
missing = missing[missing > 0]
print(missing)
missing.sort_values(inplace=True)
missing.plot.bar()
plt.show()
def data_info(data_train,data_test,categorical_features):
print('--------训练集基本概况--------')
_data_info(data_train,categorical_features)
print('\n\n--------测试集基本概况--------')
_data_info(data_test,categorical_features)
调用函数:
data_info(train_df,test_df,['Survived','Pclass','Sex','Cabin','Embarked','SibSp','Parch'])
数据清洗及分析在jupyter notebook中进行,便于观察及分析。
代码如下:
train_df['train'] = 1
test_df['train'] = 0
data_df = pd.concat([train_df,test_df],sort=True).reset_index(drop=True)
data_df.drop('PassengerId',inplace=True,axis=1)
from sklearn import preprocessing
ler_sex = preprocessing.LabelEncoder()
ler_sex.fit(data_df['Sex'])
data_df['Sex'] = ler_sex.transform(data_df['Sex'])
data_df['Embarked'].fillna(data_df['Embarked'].mode()[0],inplace=True)
ler_Embarked = preprocessing.LabelEncoder()
ler_Embarked.fit(data_df['Embarked'])
data_df['Embarked'] = ler_Embarked.transform(data_df['Embarked'])
data_df.drop('Cabin',inplace=True,axis=1)
在该阶段可以自由分析,维度自行选择。最终得到清洗好的集合即可。
train_data = data_df[data_df.train==1]
train_data['Survived'] = train_df['Survived']
train_data.drop('train',axis=1,inplace=True)
test_data = data_df[data_df.train==0]
test_data.drop(['Survived','train'],axis=1,inplace=True)
import pandas as pd
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
data = pd.read_csv(r'C:\Users\Administrator\Desktop\titanic.csv')
x = data.iloc[:,:-1]
y = data.iloc[:,-1]
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)
print(len(x_train))
print(len(x_test))
res_train = pd.concat([x_train, y_train], axis=1, ignore_index=True)
res_test = pd.concat([x_test, y_test], axis=1, ignore_index=True)
print(len(res_train))
print(len(res_test))
res_train.columns = data.columns
res_test.columns = data.columns res_train.to_csv(r'C:\Users\Administrator\Desktop\titanic_train.csv', index=False, header=True) res_test.to_csv(r'C:\Users\Administrator\Desktop\titanic_test.csv', index=False, header=True)
Original: https://blog.csdn.net/caoyatingde/article/details/122594773
Author: dai_ricky
Title: 泰坦尼克号数据分析 预测建模 准确率测算
原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/601096/
转载文章受原作者版权保护。转载请注明原作者出处!