Ts is tech

This commit is contained in:
2026-01-10 22:18:24 -06:00
parent 9245f31fa1
commit 285ccb3153
2 changed files with 111 additions and 61 deletions

4
.gitignore vendored
View File

@@ -1,2 +1,4 @@
asl_kaggle/ asl_kaggle/
hand_landmarker.task hand_landmarker.task
asl-dataset.zip
asl-signs.zip

View File

@@ -14,6 +14,39 @@ import torch.nn.functional as F
import math import math
from pathlib import Path from pathlib import Path
# GPU Configuration
print("=" * 50)
print("GPU CONFIGURATION")
print("=" * 50)
# Check CUDA availability
if torch.cuda.is_available():
print(f"✓ CUDA is available!")
print(f"✓ GPU Device: {torch.cuda.get_device_name(0)}")
print(f"✓ CUDA Version: {torch.version.cuda}")
print(f"✓ Number of GPUs: {torch.cuda.device_count()}")
print(f"✓ Current GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024 ** 3:.2f} GB")
# Set default GPU device
torch.cuda.set_device(0)
device = torch.device('cuda:0')
# Enable cuDNN benchmark for better performance
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
print(f"✓ cuDNN benchmark mode: enabled")
else:
print("✗ CUDA is NOT available. Using CPU.")
print(" Make sure you have:")
print(" 1. NVIDIA GPU")
print(" 2. CUDA toolkit installed")
print(" 3. PyTorch with CUDA support")
device = torch.device('cpu')
print("=" * 50)
print()
# Load the dataset # Load the dataset
def load_kaggle_asl_data(base_path='asl_kaggle'): def load_kaggle_asl_data(base_path='asl_kaggle'):
@@ -24,57 +57,40 @@ def load_kaggle_asl_data(base_path='asl_kaggle'):
- train_landmark_files/ directory - train_landmark_files/ directory
- sign_to_prediction_index_map.json - sign_to_prediction_index_map.json
""" """
# Load train.csv
train_df = pd.read_csv(os.path.join(base_path, 'train.csv')) train_df = pd.read_csv(os.path.join(base_path, 'train.csv'))
# Load sign mapping
with open(os.path.join(base_path, 'sign_to_prediction_index_map.json'), 'r') as f: with open(os.path.join(base_path, 'sign_to_prediction_index_map.json'), 'r') as f:
sign_to_idx = json.load(f) sign_to_idx = json.load(f)
print(f"Total sequences: {len(train_df)}") print(f"Total sequences: {len(train_df)}")
print(f"Unique signs: {len(sign_to_idx)}") print(f"Unique signs: {len(sign_to_idx)}")
print(f"Signs: {list(sign_to_idx.keys())[:10]}...") # Show first 10 print(f"Signs: {list(sign_to_idx.keys())[:10]}...")
return train_df, sign_to_idx return train_df, sign_to_idx
def extract_hand_landmarks_from_parquet(parquet_path): def extract_hand_landmarks_from_parquet(parquet_path):
""" """Extract hand landmarks from a parquet file"""
Extract hand landmarks from a parquet file
The file contains landmarks for face, left_hand, pose, right_hand
We only care about hand landmarks
"""
df = pd.read_parquet(parquet_path) df = pd.read_parquet(parquet_path)
# Filter for hand landmarks only (left_hand or right_hand)
# For ASL, we'll use whichever hand is dominant in the sequence
left_hand = df[df['type'] == 'left_hand'] left_hand = df[df['type'] == 'left_hand']
right_hand = df[df['type'] == 'right_hand'] right_hand = df[df['type'] == 'right_hand']
# Use the hand with more detected landmarks
if len(left_hand) > len(right_hand): if len(left_hand) > len(right_hand):
hand_df = left_hand hand_df = left_hand
elif len(right_hand) > 0: elif len(right_hand) > 0:
hand_df = right_hand hand_df = right_hand
else: else:
return None # No hand detected return None
# Get unique frames
frames = hand_df['frame'].unique()
# We'll use the middle frame (most stable) or average across frames
# For now, let's average the landmarks across all frames
landmarks_list = [] landmarks_list = []
for landmark_idx in range(21): # MediaPipe has 21 hand landmarks for landmark_idx in range(21):
landmark_data = hand_df[hand_df['landmark_index'] == landmark_idx] landmark_data = hand_df[hand_df['landmark_index'] == landmark_idx]
if len(landmark_data) == 0: if len(landmark_data) == 0:
# Missing landmark, use zeros
landmarks_list.append([0.0, 0.0, 0.0]) landmarks_list.append([0.0, 0.0, 0.0])
else: else:
# Average across frames
x = landmark_data['x'].mean() x = landmark_data['x'].mean()
y = landmark_data['y'].mean() y = landmark_data['y'].mean()
z = landmark_data['z'].mean() z = landmark_data['z'].mean()
@@ -84,34 +100,26 @@ def extract_hand_landmarks_from_parquet(parquet_path):
def get_optimized_features(landmarks_array): def get_optimized_features(landmarks_array):
""" """Extract optimally normalized relative coordinates from landmark array"""
Extract optimally normalized relative coordinates from landmark array
landmarks_array: (21, 3) numpy array
Returns 77 features
"""
if landmarks_array is None: if landmarks_array is None:
return None return None
points = landmarks_array.copy() points = landmarks_array.copy()
# Translation invariance
wrist = points[0].copy() wrist = points[0].copy()
points_centered = points - wrist points_centered = points - wrist
# Scale invariance
palm_size = np.linalg.norm(points[9] - points[0]) palm_size = np.linalg.norm(points[9] - points[0])
if palm_size < 1e-6: if palm_size < 1e-6:
palm_size = 1.0 palm_size = 1.0
points_normalized = points_centered / palm_size points_normalized = points_centered / palm_size
# Standardization
mean = np.mean(points_normalized, axis=0) mean = np.mean(points_normalized, axis=0)
std = np.std(points_normalized, axis=0) + 1e-8 std = np.std(points_normalized, axis=0) + 1e-8
points_standardized = (points_normalized - mean) / std points_standardized = (points_normalized - mean) / std
features = points_standardized.flatten() features = points_standardized.flatten()
# Derived features
finger_tips = [4, 8, 12, 16, 20] finger_tips = [4, 8, 12, 16, 20]
tip_distances = [] tip_distances = []
@@ -143,7 +151,7 @@ def get_optimized_features(landmarks_array):
# Load dataset # Load dataset
print("Loading Kaggle ASL dataset...") print("Loading Kaggle ASL dataset...")
base_path = 'asl_kaggle' # Change this to your dataset path base_path = 'asl_kaggle'
train_df, sign_to_idx = load_kaggle_asl_data(base_path) train_df, sign_to_idx = load_kaggle_asl_data(base_path)
# Process landmarks # Process landmarks
@@ -155,19 +163,16 @@ for idx, row in train_df.iterrows():
if idx % 1000 == 0: if idx % 1000 == 0:
print(f"Processed {idx}/{len(train_df)} sequences...") print(f"Processed {idx}/{len(train_df)} sequences...")
# Construct full path
parquet_path = os.path.join(base_path, row['path']) parquet_path = os.path.join(base_path, row['path'])
if not os.path.exists(parquet_path): if not os.path.exists(parquet_path):
continue continue
# Extract landmarks
landmarks = extract_hand_landmarks_from_parquet(parquet_path) landmarks = extract_hand_landmarks_from_parquet(parquet_path)
if landmarks is None: if landmarks is None:
continue continue
# Get features
features = get_optimized_features(landmarks) features = get_optimized_features(landmarks)
if features is None: if features is None:
@@ -200,7 +205,7 @@ if np.isinf(X).any():
X = X[mask] X = X[mask]
y = y[mask] y = y[mask]
# Encode labels using the provided mapping # Encode labels
label_encoder = LabelEncoder() label_encoder = LabelEncoder()
y_encoded = label_encoder.fit_transform(y) y_encoded = label_encoder.fit_transform(y)
num_classes = len(label_encoder.classes_) num_classes = len(label_encoder.classes_)
@@ -230,8 +235,33 @@ class ASLDataset(Dataset):
train_dataset = ASLDataset(X_train, y_train) train_dataset = ASLDataset(X_train, y_train)
test_dataset = ASLDataset(X_test, y_test) test_dataset = ASLDataset(X_test, y_test)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=4) # Optimized DataLoader settings for GPU
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=4) num_workers = 4 if device.type == 'cuda' else 0
pin_memory = True if device.type == 'cuda' else False
batch_size = 128 if device.type == 'cuda' else 64 # Larger batch size for GPU
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
persistent_workers=True if num_workers > 0 else False
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
persistent_workers=True if num_workers > 0 else False
)
print(f"\nDataLoader Configuration:")
print(f" Batch size: {batch_size}")
print(f" Num workers: {num_workers}")
print(f" Pin memory: {pin_memory}")
# Positional Encoding for Transformer # Positional Encoding for Transformer
@@ -261,14 +291,11 @@ class TransformerCNN_ASL(nn.Module):
self.input_dim = input_dim self.input_dim = input_dim
self.d_model = d_model self.d_model = d_model
# Input projection
self.input_projection = nn.Linear(input_dim, d_model) self.input_projection = nn.Linear(input_dim, d_model)
self.input_norm = nn.LayerNorm(d_model) self.input_norm = nn.LayerNorm(d_model)
# Positional encoding
self.pos_encoder = PositionalEncoding(d_model, max_len=100) self.pos_encoder = PositionalEncoding(d_model, max_len=100)
# Transformer Encoder with Self-Attention
encoder_layer = nn.TransformerEncoderLayer( encoder_layer = nn.TransformerEncoderLayer(
d_model=d_model, d_model=d_model,
nhead=nhead, nhead=nhead,
@@ -280,7 +307,6 @@ class TransformerCNN_ASL(nn.Module):
) )
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
# CNN Blocks for pattern detection
self.conv1 = nn.Conv1d(d_model, 1024, kernel_size=3, padding=1) self.conv1 = nn.Conv1d(d_model, 1024, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm1d(1024) self.bn1 = nn.BatchNorm1d(1024)
self.pool1 = nn.MaxPool1d(2) self.pool1 = nn.MaxPool1d(2)
@@ -293,10 +319,9 @@ class TransformerCNN_ASL(nn.Module):
self.conv3 = nn.Conv1d(2048, 4096, kernel_size=3, padding=1) self.conv3 = nn.Conv1d(2048, 4096, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm1d(4096) self.bn3 = nn.BatchNorm1d(4096)
self.pool3 = nn.AdaptiveMaxPool1d(1) # Global pooling self.pool3 = nn.AdaptiveMaxPool1d(1)
self.dropout3 = nn.Dropout(0.4) self.dropout3 = nn.Dropout(0.4)
# Fully connected layers
self.fc1 = nn.Linear(4096, 4096) self.fc1 = nn.Linear(4096, 4096)
self.bn_fc1 = nn.BatchNorm1d(4096) self.bn_fc1 = nn.BatchNorm1d(4096)
self.dropout_fc1 = nn.Dropout(0.5) self.dropout_fc1 = nn.Dropout(0.5)
@@ -314,21 +339,15 @@ class TransformerCNN_ASL(nn.Module):
def forward(self, x): def forward(self, x):
batch_size = x.size(0) batch_size = x.size(0)
# Project to d_model
x = self.input_projection(x) x = self.input_projection(x)
x = self.input_norm(x) x = self.input_norm(x)
x = x.unsqueeze(1) x = x.unsqueeze(1)
# Add positional encoding
x = self.pos_encoder(x) x = self.pos_encoder(x)
# Transformer encoder with self-attention
x = self.transformer_encoder(x) x = self.transformer_encoder(x)
# Reshape for CNN
x = x.permute(0, 2, 1) x = x.permute(0, 2, 1)
# CNN pattern detection
x = F.gelu(self.bn1(self.conv1(x))) x = F.gelu(self.bn1(self.conv1(x)))
x = self.pool1(x) x = self.pool1(x)
x = self.dropout1(x) x = self.dropout1(x)
@@ -341,10 +360,8 @@ class TransformerCNN_ASL(nn.Module):
x = self.pool3(x) x = self.pool3(x)
x = self.dropout3(x) x = self.dropout3(x)
# Flatten
x = x.view(batch_size, -1) x = x.view(batch_size, -1)
# Fully connected layers
x = F.gelu(self.bn_fc1(self.fc1(x))) x = F.gelu(self.bn_fc1(self.fc1(x)))
x = self.dropout_fc1(x) x = self.dropout_fc1(x)
@@ -360,8 +377,7 @@ class TransformerCNN_ASL(nn.Module):
# Initialize model # Initialize model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f"\nInitializing model on {device}...")
print(f"\nUsing device: {device}")
model = TransformerCNN_ASL( model = TransformerCNN_ASL(
input_dim=X.shape[1], input_dim=X.shape[1],
@@ -383,6 +399,12 @@ if total_params > 50_000_000:
else: else:
print(f"Model is within 50M parameter limit ✓") print(f"Model is within 50M parameter limit ✓")
# Display GPU memory usage
if device.type == 'cuda':
print(f"\nGPU Memory after model initialization:")
print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024 ** 2:.2f} MB")
print(f" Cached: {torch.cuda.memory_reserved(0) / 1024 ** 2:.2f} MB")
# Loss and optimizer # Loss and optimizer
criterion = nn.CrossEntropyLoss(label_smoothing=0.1) criterion = nn.CrossEntropyLoss(label_smoothing=0.1)
optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-4) optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-4)
@@ -399,14 +421,13 @@ def train_epoch(model, loader, criterion, optimizer, device):
total = 0 total = 0
for X_batch, y_batch in loader: for X_batch, y_batch in loader:
X_batch, y_batch = X_batch.to(device), y_batch.to(device) X_batch, y_batch = X_batch.to(device, non_blocking=True), y_batch.to(device, non_blocking=True)
optimizer.zero_grad() optimizer.zero_grad(set_to_none=True) # More efficient than zero_grad()
outputs = model(X_batch) outputs = model(X_batch)
loss = criterion(outputs, y_batch) loss = criterion(outputs, y_batch)
loss.backward() loss.backward()
# Gradient clipping
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step() optimizer.step()
@@ -427,7 +448,7 @@ def evaluate(model, loader, device):
with torch.no_grad(): with torch.no_grad():
for X_batch, y_batch in loader: for X_batch, y_batch in loader:
X_batch, y_batch = X_batch.to(device), y_batch.to(device) X_batch, y_batch = X_batch.to(device, non_blocking=True), y_batch.to(device, non_blocking=True)
outputs = model(X_batch) outputs = model(X_batch)
_, predicted = outputs.max(1) _, predicted = outputs.max(1)
total += y_batch.size(0) total += y_batch.size(0)
@@ -459,13 +480,23 @@ best_acc = 0
patience_counter = 0 patience_counter = 0
print("\nStarting training with Transformer + CNN architecture...") print("\nStarting training with Transformer + CNN architecture...")
print("=" * 50)
# Track training time
import time
start_time = time.time()
for epoch in range(num_epochs): for epoch in range(num_epochs):
epoch_start = time.time()
train_loss, train_acc = train_epoch(model, train_loader, criterion, optimizer, device) train_loss, train_acc = train_epoch(model, train_loader, criterion, optimizer, device)
test_acc = evaluate(model, test_loader, device) test_acc = evaluate(model, test_loader, device)
scheduler.step() scheduler.step()
epoch_time = time.time() - epoch_start
if test_acc > best_acc: if test_acc > best_acc:
best_acc = test_acc best_acc = test_acc
patience_counter = 0 patience_counter = 0
@@ -488,13 +519,30 @@ for epoch in range(num_epochs):
if (epoch + 1) % 5 == 0: if (epoch + 1) % 5 == 0:
current_lr = optimizer.param_groups[0]['lr'] current_lr = optimizer.param_groups[0]['lr']
print( print(f"Epoch {epoch + 1}/{num_epochs} | Loss: {train_loss:.4f} | "
f"Epoch {epoch + 1}/{num_epochs} | Loss: {train_loss:.4f} | Train: {train_acc:.2f}% | Test: {test_acc:.2f}% | Best: {best_acc:.2f}% | LR: {current_lr:.6f}") f"Train: {train_acc:.2f}% | Test: {test_acc:.2f}% | "
f"Best: {best_acc:.2f}% | LR: {current_lr:.6f} | "
f"Time: {epoch_time:.2f}s")
if device.type == 'cuda':
print(f" GPU Memory: {torch.cuda.memory_allocated(0) / 1024 ** 2:.2f} MB")
# Early stopping # Early stopping
if patience_counter >= patience: if patience_counter >= patience:
print(f"\nEarly stopping triggered at epoch {epoch + 1}") print(f"\nEarly stopping triggered at epoch {epoch + 1}")
break break
total_time = time.time() - start_time
print("=" * 50)
print(f"\nTraining complete! Best test accuracy: {best_acc:.2f}%") print(f"\nTraining complete! Best test accuracy: {best_acc:.2f}%")
print("Model saved to asl_kaggle_transformer.pth") print(f"Total training time: {total_time / 60:.2f} minutes")
print(f"Average time per epoch: {total_time / (epoch + 1):.2f} seconds")
print("Model saved to asl_kaggle_transformer.pth")
# Final GPU memory stats
if device.type == 'cuda':
print(f"\nFinal GPU Memory Usage:")
print(f" Allocated: {torch.cuda.memory_allocated(0) / 1024 ** 2:.2f} MB")
print(f" Cached: {torch.cuda.memory_reserved(0) / 1024 ** 2:.2f} MB")
print(f" Max Allocated: {torch.cuda.max_memory_allocated(0) / 1024 ** 2:.2f} MB")