fix: normalize hyper params

This commit is contained in:
2025-09-21 13:10:07 +02:00
parent 83d21c640b
commit 63e23d6600
8 changed files with 26 additions and 19 deletions
+1 -2
View File
@@ -13,7 +13,7 @@ from research.neural_network_model import NeuralNetworkModel
class BiGRUModel(NeuralNetworkModel):
"""Bidirectional GRU model for name classification"""
def build_model_with_vocab(self, vocab_size: int, max_len: int = 6, **kwargs) -> Any:
def build_model_with_vocab(self, vocab_size: int, **kwargs) -> Any:
params = kwargs
model = Sequential(
[
@@ -22,7 +22,6 @@ class BiGRUModel(NeuralNetworkModel):
Embedding(
input_dim=vocab_size,
output_dim=params.get("embedding_dim", 64),
input_length=max_len,
mask_zero=True,
),
# First recurrent block returns full sequences to allow stacking.
+1 -1
View File
@@ -21,7 +21,7 @@ from research.neural_network_model import NeuralNetworkModel
class CNNModel(NeuralNetworkModel):
"""1D Convolutional Neural Network for character patterns"""
def build_model_with_vocab(self, vocab_size: int, max_len: int = 20, **kwargs) -> Any:
def build_model_with_vocab(self, vocab_size: int, **kwargs) -> Any:
"""Build CNN model with known vocabulary size"""
params = kwargs
+1 -2
View File
@@ -13,7 +13,7 @@ from research.neural_network_model import NeuralNetworkModel
class LSTMModel(NeuralNetworkModel):
"""LSTM model for sequence learning"""
def build_model_with_vocab(self, vocab_size: int, max_len: int = 6, **kwargs) -> Any:
def build_model_with_vocab(self, vocab_size: int, **kwargs) -> Any:
params = kwargs
model = Sequential(
[
@@ -21,7 +21,6 @@ class LSTMModel(NeuralNetworkModel):
Embedding(
input_dim=vocab_size,
output_dim=params.get("embedding_dim", 64),
input_length=max_len,
mask_zero=True,
),
# Stacked bidirectional LSTMs: first returns sequences to feed the next.
+1 -1
View File
@@ -17,7 +17,7 @@ class NaiveBayesModel(TraditionalModel):
# includes unigrams for coverage and higher n for suffix/prefix cues.
vectorizer = CountVectorizer(
analyzer="char",
ngram_range=params.get("ngram_range", (1, 4)),
ngram_range=params.get("ngram_range", (2, 5)),
max_features=params.get("max_features", 8000),
)
+5 -5
View File
@@ -22,21 +22,21 @@ from research.neural_network_model import NeuralNetworkModel
class TransformerModel(NeuralNetworkModel):
"""Transformer-based model"""
def build_model_with_vocab(self, vocab_size: int, max_len: int = 6, **kwargs) -> Any:
def build_model_with_vocab(self, vocab_size: int, **kwargs) -> Any:
params = kwargs
# Build Transformer model
inputs = Input(shape=(max_len,))
inputs = Input(shape=(params.get("max_len", 8),))
x = Embedding(
input_dim=vocab_size,
output_dim=params.get("embedding_dim", 64),
input_length=max_len,
input_length=params.get("max_len", 8),
mask_zero=True,
)(inputs)
# Add positional encoding
positions = tf.range(start=0, limit=max_len, delta=1)
pos_embedding = Embedding(input_dim=max_len, output_dim=params.get("embedding_dim", 64))(
positions = tf.range(start=0, limit=params.get("max_len", 8), delta=1)
pos_embedding = Embedding(input_dim=params.get("max_len", 8), output_dim=params.get("embedding_dim", 64))(
positions
)
x = x + pos_embedding