- Add end-to-end tests for complete workflow validation - Include integration tests for API and database interactions - Add unit tests for all major components and utilities - Include performance tests for payload handling - Add CLI API integration tests - Include Podman container integration tests - Add WebSocket and queue execution tests - Include shell script tests for setup validation Provides comprehensive test coverage ensuring platform reliability and functionality across all components and interactions.
80 lines
2.2 KiB
Python
Executable file
80 lines
2.2 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
import argparse
|
|
import json
|
|
import logging
|
|
from pathlib import Path
|
|
import time
|
|
|
|
import numpy as np
|
|
import tensorflow as tf
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--epochs", type=int, default=10)
|
|
parser.add_argument("--batch_size", type=int, default=32)
|
|
parser.add_argument("--learning_rate", type=float, default=0.001)
|
|
parser.add_argument("--output_dir", type=str, required=True)
|
|
args = parser.parse_args()
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
logger.info(f"Training TensorFlow model for {args.epochs} epochs...")
|
|
|
|
# Generate synthetic data
|
|
np.random.seed(42)
|
|
tf.random.set_seed(42)
|
|
X = np.random.randn(1000, 20)
|
|
y = np.random.randint(0, 2, (1000,))
|
|
|
|
# Create TensorFlow dataset
|
|
dataset = tf.data.Dataset.from_tensor_slices((X, y))
|
|
dataset = dataset.shuffle(buffer_size=1000).batch(args.batch_size)
|
|
|
|
# Build model
|
|
model = tf.keras.Sequential(
|
|
[
|
|
tf.keras.layers.Dense(64, activation="relu", input_shape=(20,)),
|
|
tf.keras.layers.Dense(32, activation="relu"),
|
|
tf.keras.layers.Dense(2, activation="softmax"),
|
|
]
|
|
)
|
|
|
|
model.compile(
|
|
optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
|
|
loss="sparse_categorical_crossentropy",
|
|
metrics=["accuracy"],
|
|
)
|
|
|
|
# Training
|
|
history = model.fit(dataset, epochs=args.epochs, verbose=1)
|
|
|
|
final_accuracy = history.history["accuracy"][-1]
|
|
logger.info(f"Training completed. Final accuracy: {final_accuracy:.4f}")
|
|
|
|
# Save results
|
|
results = {
|
|
"model_type": "TensorFlow",
|
|
"epochs": args.epochs,
|
|
"batch_size": args.batch_size,
|
|
"learning_rate": args.learning_rate,
|
|
"final_accuracy": float(final_accuracy),
|
|
"n_samples": len(X),
|
|
"input_features": X.shape[1],
|
|
}
|
|
|
|
output_dir = Path(args.output_dir)
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
with open(output_dir / "results.json", "w") as f:
|
|
json.dump(results, f, indent=2)
|
|
|
|
# Save model
|
|
model.save(output_dir / "tensorflow_model")
|
|
|
|
logger.info("Results and model saved successfully!")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|