차원 축소

머신 러닝
공개

2025년 7월 29일

PCA

from sklearn.datasets import load_iris
import pandas as pd
import matplotlib.pyplot as plt

iris = load_iris()
columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
df = pd.DataFrame(iris.data, columns=columns)
df['target'] = iris.target
markers = ['^', 's', 'o']

for i, marker in enumerate(markers):
    x_axis_data = df[df['target'] == i]['sepal_length']
    y_axis_data = df[df['target'] == i]['sepal_width']
    plt.scatter(x_axis_data, y_axis_data, marker=marker, label=iris.target_names[i])
plt.legend()
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.show()

  • PCA는 scaling의 영향을 받음.
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA

scaled_df = StandardScaler().fit_transform(df.iloc[:, :-1])

pca = PCA(n_components=2)
df = pca.fit_transform(scaled_df)
pca_columns = ['pca_component_1', 'pca_component_2']
df = pd.DataFrame(df, columns=pca_columns)
df['target'] = iris.target
markers = ['^', 's', 'o']

for i, marker in enumerate(markers):
    x_axis_data = df[df['target'] == i]['pca_component_1']
    y_axis_data = df[df['target'] == i]['pca_component_2']
    plt.scatter(x_axis_data, y_axis_data, marker=marker, label=iris.target_names[i])
plt.legend()
plt.xlabel('pca_component_1')
plt.ylabel('pca_component_2')
plt.show()

신용카드 고객 데이터

df = pd.read_excel('_data/creadit_card.xls', header=1, sheet_name='Data').iloc[:, 1:]
df.rename(columns={'PAY_0': 'PAY_1', 'default payment next month': 'default'}, inplace=True)
target = df['default']
features = df.drop('default', axis=1)
import seaborn as sns

corr = features.corr()
sns.heatmap(corr, annot=True, fmt='.1g')

  • BILL_AMT1~6, PAY_1~6의 상관도가 높다.
cols_bill = ['BILL_AMT' + str(i) for i in range(1, 7)]
scaler = StandardScaler()
df_cols_scaled = scaler.fit_transform(features[cols_bill])
pca = PCA(n_components=2)
pca.fit(df_cols_scaled)
pca.explained_variance_ratio_
array([0.90555253, 0.0509867 ])
  • PCA 할 때 column 전부 다 안 넣어도 되나?
  • 다 넣어야 하는 듯

LDA

  • 클래스 분리를 최대화하는 축을 찾음
  • PCA와 다르게 지도 학습임.
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import StandardScaler

iris = load_iris()
iris_scaled = StandardScaler().fit_transform(iris.data)
lda = LinearDiscriminantAnalysis(n_components=2)
iris_lda = lda.fit_transform(iris_scaled, iris.target)
lda_columns = ['lda_components_1', 'lda_components_2']
df = pd.DataFrame(iris_lda, columns=lda_columns)
df['target'] = iris.target

markers = ['^', 's', 'o']

for i, marker in enumerate(markers):
    x_axis_data = df[df['target'] == i]['lda_components_1']
    y_axis_data = df[df['target'] == i]['lda_components_2']
    plt.scatter(x_axis_data, y_axis_data, marker=marker, label=iris.target_names[i])
plt.legend()
plt.xlabel('lda_components_1')
plt.ylabel('lda_components_2')
plt.show()

맨 위로