Various-Resources-Titanic

Sun 29 June 2025
import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
url = 'https://raw.githubusercontent.com/datasciencedojo/datasets/master/titanic.csv'
df = pd.read_csv(url)
df.head()
PassengerId Survived Pclass Name Sex Age SibSp Parch Ticket Fare Cabin Embarked
0 1 0 3 Braund, Mr. Owen Harris male 22.0 1 0 A/5 21171 7.2500 NaN S
1 2 1 1 Cumings, Mrs. John Bradley (Florence Briggs Th... female 38.0 1 0 PC 17599 71.2833 C85 C
2 3 1 3 Heikkinen, Miss. Laina female 26.0 0 0 STON/O2. 3101282 7.9250 NaN S
3 4 1 1 Futrelle, Mrs. Jacques Heath (Lily May Peel) female 35.0 1 0 113803 53.1000 C123 S
4 5 0 3 Allen, Mr. William Henry male 35.0 0 0 373450 8.0500 NaN S
data = df.copy()

# Drop columns that won't help in prediction
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.head()
Survived Pclass Sex Age SibSp Parch Fare Embarked
0 0 3 male 22.0 1 0 7.2500 S
1 1 1 female 38.0 1 0 71.2833 C
2 1 3 female 26.0 0 0 7.9250 S
3 1 1 female 35.0 1 0 53.1000 S
4 0 3 male 35.0 0 0 8.0500 S
# Check for missing values
data.isnull().sum()
Survived      0
Pclass        0
Sex           0
Age         177
SibSp         0
Parch         0
Fare          0
Embarked      2
dtype: int64
# Fill missing 'Age' with mean
data['Age'] = data['Age'].fillna(data['Age'].mean())

# Drop rows with missing 'Embarked'
data.dropna(subset=['Embarked'], inplace=True)

# Verify again
data.isnull().sum()
Survived    0
Pclass      0
Sex         0
Age         0
SibSp       0
Parch       0
Fare        0
Embarked    0
dtype: int64
# Convert 'Sex' and 'Embarked' using one-hot encoding
data = pd.get_dummies(data, columns=['Sex', 'Embarked'], drop_first=True)
data.head()
Survived Pclass Age SibSp Parch Fare Sex_male Embarked_Q Embarked_S
0 0 3 22.0 1 0 7.2500 True False True
1 1 1 38.0 1 0 71.2833 False False False
2 1 3 26.0 0 0 7.9250 False False True
3 1 1 35.0 1 0 53.1000 False False True
4 0 3 35.0 0 0 8.0500 True False True
X = data.drop('Survived', axis=1)  # Features
y = data['Survived']              # Target
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.3, random_state=42)
lr_model = LinearRegression().fit(X_train, y_train)
print("Linear Regression R² Score:", lr_model.score(X_test, y_test))
Linear Regression R² Score: 0.39185146303300156
# 1. KNN
knn = KNeighborsClassifier().fit(X_train, y_train)

# 2. Logistic Regression
log_reg = LogisticRegression(solver='liblinear').fit(X_train, y_train)

# 3. Support Vector Machine
svm = SVC(kernel='linear').fit(X_train, y_train)

# 4. Naive Bayes
nb = GaussianNB().fit(X_train, y_train)

# 5. Neural Network
mlp = MLPClassifier(max_iter=500).fit(X_train, y_train)

# 6. Decision Tree
tree = DecisionTreeClassifier().fit(X_train, y_train)

# 7. Random Forest
rf = RandomForestClassifier().fit(X_train, y_train)
models = [knn, log_reg, svm, nb, mlp, tree, rf]

for model in models:
    name = model.__class__.__name__
    y_pred = model.predict(X_test)
    acc = accuracy_score(y_test, y_pred)
    print("-" * 28)
    print(f"{name}:")
    print("Accuracy: {:.2%}".format(acc))
----------------------------
KNeighborsClassifier:
Accuracy: 68.91%
----------------------------
LogisticRegression:
Accuracy: 79.03%
----------------------------
SVC:
Accuracy: 79.03%
----------------------------
GaussianNB:
Accuracy: 78.28%
----------------------------
MLPClassifier:
Accuracy: 77.15%
----------------------------
DecisionTreeClassifier:
Accuracy: 77.15%
----------------------------
RandomForestClassifier:
Accuracy: 77.53%


Score: 10

Category: basics