빅분기 실기 작업2형 template

# 데이터 가져오기 read_csv()
import pandas as pd
import numpy as np
x_test = pd.read_csv("data/X_test.csv")
x_train = pd.read_csv("data/X_train.csv")
y_train = pd.read_csv("data/y_train.csv")

# ------- 데이터 확인 info ---------

# 모든 컬럼값 확인 T
print(x_train.head().T)

# 행/열 확인 shape (생략가능)
print(x_train.shape)

# 요약정보 확인 info()
## 데이터타입, 결측치 등 확인, object 범주형
### object 범주형 인코딩 확인
print(x_train.info())

# 기초통계량 확인 describe()
## count, mean,std,min, 25%, 50%, 75%, max
## 데이터 스케일링 확인, 회귀, 비tree 계열
print(x_train.describe())
#print(X_train.describe().loc['min',:])

# 타깃값 확인 unique
## 1차원인지도 확인 (y,)
print(y_train.head().T)
y_train = y_train['gender']
print(y_train.unique())

# ------- 데이터 전처리 preprocessing ---------

# 불필요한 컬럭 삭제 drop(columns=[])
## PK 역할 컬럼 등 삭제
x_train = x_train.drop(columns = ['cust_id'])
x_test = x_test.drop(columns = ['cust_id'])
print(x_train.head().T)

# 결측치 처리하기 fillna()
## 임의로 결측치 삭제하지 말 것
## 평균, 중앙값, 상황에 따른 값
print(x_train.isnull().sum())
x_train['환불금액'] = x_train['환불금액'].fillna(0)
x_test['환불금액'] = x_test['환불금액'].fillna(0)
print(x_train.isnull().sum())

# 범주형 변수 인코딩
## 라벨 인코딩 LabelEncoder, 원핫인코딩  OneHotEncoder
## 라벨 인코딩(Tree 계열의 분류 알고리즘에 사용)
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
print(x_train['주구매상품'].unique())
x_train['주구매상품'] = encoder.fit_transform(x_train['주구매상품'])
x_train['주구매지점'] = encoder.fit_transform(x_train['주구매지점'])
print(x_train['주구매상품'].unique())
print(x_test['주구매상품'].unique())
x_test['주구매상품'] = encoder.fit_transform(x_test['주구매상품'])
x_test['주구매지점'] = encoder.fit_transform(x_test['주구매지점'])
print(x_test['주구매상품'].unique())

# 파생변수 생성
## 필요시 생성
condition = x_train['환불금액'] > 0
x_train.loc[condition, '환불금액_new'] = 1
x_train.loc[~condition, '환불금액_new'] = 0
print(x_train[['환불금액','환불금액_new']].head())

condition = x_test['환불금액'] > 0
x_test.loc[condition, '환불금액_new'] = 1
x_test.loc[~condition, '환불금액_new'] = 0
print(x_test[['환불금액','환불금액_new']].head())

x_train = x_train.drop(columns = ['환불금액'])
x_test = x_test.drop(columns = ['환불금액'])

# 데이터 스케일링
## 연속형 변수의 최대, 최소 분포차가 클때
## MinMaxScaler, StandardScaler, RobustScaler
## test는 train용으로 transform()
## Tree 계열 필수는 아님

print(x_train.describe().T)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = pd.DataFrame(scaler.fit_transform(x_train), columns=x_train.columns)
x_test = pd.DataFrame(scaler.transform(x_test), columns=x_test.columns)
print(x_train.describe().T)

# 상관관계 확인 corr()
## 필요 시 확인
print(x_train[['총구매액','최대구매액','환불금액_new']].corr())
x_train = x_train.drop(columns = ['최대구매액'])
x_test = x_test.drop(columns = ['최대구매액'])

# 전처리 확인 info()
print(x_train.info())
print(x_test.info())

# ------- 머신러닝 machine learning ---------

# 평가용 데이터 분리 model_selection
## stratify, stes_size, y값 1차원 확인
print(y_train.value_counts()/len(y_train))
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train)
print(Y_train.value_counts()/len(Y_train))

# 데이터 학습
## fit, predict, predict_proba
## 분류 XXXClassifier, LogisticRegression
## 회귀(예측) XXXRegressor(XXXRegression)
## 공통 ensemble
## RandomForestXXX : n_estimators (default = 100), max_depth, criterion
## criterion : 분할 품질을 측정하는 기능 ()
### RandomForestClassifier: criterion{“gini”, “entropy”, “log_loss”}, default=”gini”
### RandomForestRegressor: criterion{“mse”, “mae”}, / v1.2 criterion{“squared_error”, “absolute_error”}, default=”squared_error”
## GradientBoostingXXX : n_estimators, learning_rate(default =  0.1)

from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier

from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor


#model = DecisionTreeClassifier(random_state=42) 
#model = LogisticRegression(random_state=42) 
model = RandomForestClassifier(random_state=42)  
#model = GradientBoostingClassifier() 

model.fit(X_train, Y_train)
pred = model.predict(X_test)


# 데이터 평가 metrics
## 회귀 : MAE, MSE, RMSE, R^2, RMSE는 np.sqrt(MSE값)
## 분류 : ROC_AUC, Accuracy(정확도), Precision(정밀도), Recall(재현율)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score

print(roc_auc_score(Y_test, pred))


# ------- 답안 제출 ---------

# 전체학습데이터로 다시 학습
model.fit(x_train, y_train)

# 제출용 예측
pred = model.predict(x_test)


# 답안 제출 참고
# #아래 코드 예측변수와 수험번호를 개인별로 변경하여 활용
#pd.DataFrame({'cust_id': x_test_data.cust_id, 'gender': pred}).to_csv('424242.csv', index=False)

# 제출 최종 확인
#print(pd.read_csv("data/424242.csv"))