Linear Regression Plot
Neural Network Learning Dynamics
Deep learning convergence with polynomial fit and performance bands
Output
Python
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
np.random.seed(123)
# Training data
epochs = np.arange(1, 101)
loss = 2.5 * np.exp(-0.05 * epochs) + 0.15 + np.random.normal(0, 0.08, 100)
# Polynomial fit
z = np.polyfit(epochs, loss, 4)
p = np.poly1d(z)
x_smooth = np.linspace(1, 100, 200)
fig, ax = plt.subplots(figsize=(10, 7), facecolor='#0a0a0f')
ax.set_facecolor('#0a0a0f')
# Confidence region
ax.fill_between(x_smooth, p(x_smooth) - 0.12, p(x_smooth) + 0.12,
color='#6CF527', alpha=0.12, linewidth=0)
# Fit line
ax.plot(x_smooth, p(x_smooth), color='#6CF527', linewidth=2.5, zorder=3)
# Data points
ax.scatter(epochs, loss, c='#F5B027', s=45, alpha=0.7, edgecolors='none', zorder=4)
# Convergence annotation
ax.axhline(y=0.15, color='#F5276C', linestyle='--', linewidth=1.5, alpha=0.6)
ax.text(95, 0.18, 'convergence', fontsize=9, color='#F5276C', ha='right')
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['bottom', 'left']:
ax.spines[spine].set_color('#333333')
ax.set_xlabel('Training Epoch', fontsize=12, color='white', fontweight='500', labelpad=10)
ax.set_ylabel('Validation Loss', fontsize=12, color='white', fontweight='500', labelpad=10)
ax.set_title('Model Convergence Analysis', fontsize=15, color='white', fontweight='bold', pad=20, loc='left')
ax.tick_params(colors='#666666', labelsize=10, length=0)
plt.tight_layout()
plt.show()
Library
Matplotlib
Category
Pairwise Data
More Linear Regression Plot examples
☕