diff --git a/dashboard/public/index.html b/dashboard/public/index.html
index 2fb66e7..0acc709 100644
--- a/dashboard/public/index.html
+++ b/dashboard/public/index.html
@@ -71,7 +71,7 @@
-
Moving Average: Tradezahlen & Volumen je Exchange
+
Moving Average: Tradezahlen & Volumen (alle Exchanges)
@@ -251,61 +251,81 @@
if (charts.movingAverage) charts.movingAverage.destroy();
const dateIdx = columns.findIndex(c => c.name === 'date' || c.name === 'timestamp');
- const exchangeIdx = columns.findIndex(c => c.name === 'exchange');
const countIdx = columns.findIndex(c => c.name === 'trade_count');
const volumeIdx = columns.findIndex(c => c.name === 'volume');
- const maCountIdx = columns.findIndex(c => c.name === 'ma_count');
- const maVolumeIdx = columns.findIndex(c => c.name === 'ma_volume');
- const exchanges = [...new Set(data.map(r => r[exchangeIdx]))];
+ // Alle Daten nach Datum aggregieren (über alle Exchanges summieren)
const dates = [...new Set(data.map(r => r[dateIdx]))].sort();
- const datasets = [];
- // Erweiterte Farben für mehr Exchanges (EIX, LS, XETRA, FRA, GETTEX, STU, QUOTRIX)
- const colors = ['#38bdf8', '#f43f5e', '#10b981', '#fbbf24', '#8b5cf6', '#f97316', '#ec4899', '#14b8a6', '#84cc16', '#a855f7'];
-
- exchanges.forEach((exchange, idx) => {
- datasets.push({
- label: `${exchange} - Trade Count`,
- data: dates.map(d => {
- const row = data.find(r => r[dateIdx] === d && r[exchangeIdx] === exchange);
- return row ? (row[countIdx] || 0) : 0;
- }),
- borderColor: colors[idx % colors.length],
- backgroundColor: colors[idx % colors.length] + '33',
- borderWidth: 2,
- yAxisID: 'y',
- tension: 0.3
- });
-
- datasets.push({
- label: `${exchange} - MA Count`,
- data: dates.map(d => {
- const row = data.find(r => r[dateIdx] === d && r[exchangeIdx] === exchange);
- return row ? (row[maCountIdx] || 0) : 0;
- }),
- borderColor: colors[idx % colors.length],
- backgroundColor: 'transparent',
- borderWidth: 2,
- borderDash: [5, 5],
- yAxisID: 'y',
- tension: 0.3
- });
-
- datasets.push({
- label: `${exchange} - Volume`,
- data: dates.map(d => {
- const row = data.find(r => r[dateIdx] === d && r[exchangeIdx] === exchange);
- return row ? (row[volumeIdx] || 0) : 0;
- }),
- borderColor: colors[(idx + 2) % colors.length],
- backgroundColor: colors[(idx + 2) % colors.length] + '33',
- borderWidth: 2,
- yAxisID: 'y1',
- tension: 0.3
- });
+ // Summiere Trade Count und Volume pro Tag (alle Exchanges zusammen)
+ const dailyTotals = {};
+ dates.forEach(date => {
+ const dayRows = data.filter(r => r[dateIdx] === date);
+ dailyTotals[date] = {
+ tradeCount: dayRows.reduce((sum, r) => sum + (r[countIdx] || 0), 0),
+ volume: dayRows.reduce((sum, r) => sum + (r[volumeIdx] || 0), 0)
+ };
});
+ // Moving Average berechnen (7-Tage gleitender Durchschnitt)
+ const maWindow = 7;
+ const tradeCountData = dates.map(d => dailyTotals[d].tradeCount);
+ const volumeData = dates.map(d => dailyTotals[d].volume);
+
+ const calculateMA = (data, window) => {
+ return data.map((val, idx) => {
+ if (idx < window - 1) return null;
+ const slice = data.slice(idx - window + 1, idx + 1);
+ return slice.reduce((a, b) => a + b, 0) / window;
+ });
+ };
+
+ const maTradeCount = calculateMA(tradeCountData, maWindow);
+ const maVolume = calculateMA(volumeData, maWindow);
+
+ const datasets = [
+ {
+ label: 'Trades (täglich)',
+ data: tradeCountData,
+ borderColor: '#38bdf8',
+ backgroundColor: '#38bdf833',
+ borderWidth: 1,
+ yAxisID: 'y',
+ tension: 0.3,
+ pointRadius: 2
+ },
+ {
+ label: `Trades (${maWindow}-Tage MA)`,
+ data: maTradeCount,
+ borderColor: '#38bdf8',
+ backgroundColor: 'transparent',
+ borderWidth: 3,
+ yAxisID: 'y',
+ tension: 0.4,
+ pointRadius: 0
+ },
+ {
+ label: 'Volumen (täglich)',
+ data: volumeData,
+ borderColor: '#10b981',
+ backgroundColor: '#10b98133',
+ borderWidth: 1,
+ yAxisID: 'y1',
+ tension: 0.3,
+ pointRadius: 2
+ },
+ {
+ label: `Volumen (${maWindow}-Tage MA)`,
+ data: maVolume,
+ borderColor: '#10b981',
+ backgroundColor: 'transparent',
+ borderWidth: 3,
+ yAxisID: 'y1',
+ tension: 0.4,
+ pointRadius: 0
+ }
+ ];
+
charts.movingAverage = new Chart(ctx, {
type: 'line',
data: {
@@ -321,7 +341,7 @@
type: 'linear',
display: true,
position: 'left',
- title: { display: true, text: 'Trade Count', color: '#94a3b8' },
+ title: { display: true, text: 'Anzahl Trades', color: '#94a3b8' },
grid: { color: 'rgba(255,255,255,0.05)' },
ticks: { color: '#64748b' }
},
@@ -329,13 +349,20 @@
type: 'linear',
display: true,
position: 'right',
- title: { display: true, text: 'Volume (€)', color: '#94a3b8' },
+ title: { display: true, text: 'Volumen (€)', color: '#94a3b8' },
grid: { drawOnChartArea: false },
- ticks: { color: '#64748b' }
+ ticks: {
+ color: '#64748b',
+ callback: function(value) {
+ if (value >= 1e6) return (value / 1e6).toFixed(1) + 'M';
+ if (value >= 1e3) return (value / 1e3).toFixed(0) + 'k';
+ return value;
+ }
+ }
},
x: {
grid: { display: false },
- ticks: { color: '#64748b' }
+ ticks: { color: '#64748b', maxRotation: 45 }
}
},
plugins: {
@@ -343,6 +370,22 @@
display: true,
position: 'bottom',
labels: { color: '#94a3b8', boxWidth: 12, usePointStyle: true, padding: 15 }
+ },
+ tooltip: {
+ callbacks: {
+ label: function(context) {
+ let label = context.dataset.label || '';
+ if (label) label += ': ';
+ if (context.parsed.y !== null) {
+ if (context.dataset.yAxisID === 'y1') {
+ label += '€' + context.parsed.y.toLocaleString();
+ } else {
+ label += context.parsed.y.toLocaleString();
+ }
+ }
+ return label;
+ }
+ }
}
}
}
diff --git a/src/exchanges/__pycache__/deutsche_boerse.cpython-313.pyc b/src/exchanges/__pycache__/deutsche_boerse.cpython-313.pyc
new file mode 100644
index 0000000..76f5aa2
Binary files /dev/null and b/src/exchanges/__pycache__/deutsche_boerse.cpython-313.pyc differ
diff --git a/src/exchanges/__pycache__/gettex.cpython-313.pyc b/src/exchanges/__pycache__/gettex.cpython-313.pyc
new file mode 100644
index 0000000..d81427a
Binary files /dev/null and b/src/exchanges/__pycache__/gettex.cpython-313.pyc differ
diff --git a/src/exchanges/__pycache__/stuttgart.cpython-313.pyc b/src/exchanges/__pycache__/stuttgart.cpython-313.pyc
new file mode 100644
index 0000000..c3bda68
Binary files /dev/null and b/src/exchanges/__pycache__/stuttgart.cpython-313.pyc differ
diff --git a/src/exchanges/deutsche_boerse.py b/src/exchanges/deutsche_boerse.py
index 51e1983..189a621 100644
--- a/src/exchanges/deutsche_boerse.py
+++ b/src/exchanges/deutsche_boerse.py
@@ -28,20 +28,44 @@ class DeutscheBoerseBase(BaseExchange):
def _get_file_list(self) -> List[str]:
"""Parst die Verzeichnisseite und extrahiert alle Dateinamen"""
+ import re
try:
response = requests.get(self.base_url, headers=HEADERS, timeout=30)
response.raise_for_status()
- soup = BeautifulSoup(response.text, 'html.parser')
files = []
- # Deutsche Börse listet Dateien als Links auf
- for link in soup.find_all('a'):
- href = link.get('href', '')
- # Nur posttrade JSON.gz Dateien
- if 'posttrade' in href and href.endswith('.json.gz'):
- files.append(href)
+ # Primär: Regex-basierte Extraktion (zuverlässiger)
+ # Pattern: PREFIX-posttrade-YYYY-MM-DDTHH_MM.json.gz
+ # Das Prefix wird aus der base_url extrahiert (z.B. DETR, DFRA, DGAT)
+ prefix_match = re.search(r'/([A-Z]{4})-posttrade', self.base_url)
+ if prefix_match:
+ prefix = prefix_match.group(1)
+ # Suche nach Dateinamen mit diesem Prefix
+ pattern = f'{prefix}-posttrade-\\d{{4}}-\\d{{2}}-\\d{{2}}T\\d{{2}}_\\d{{2}}\\.json\\.gz'
+ else:
+ # Generisches Pattern
+ pattern = r'[A-Z]{4}-posttrade-\d{4}-\d{2}-\d{2}T\d{2}_\d{2}\.json\.gz'
+ matches = re.findall(pattern, response.text)
+ files = list(set(matches))
+
+ # Sekundär: BeautifulSoup für Links (falls Regex nichts findet)
+ if not files:
+ soup = BeautifulSoup(response.text, 'html.parser')
+ for link in soup.find_all('a'):
+ href = link.get('href', '')
+ text = link.get_text(strip=True)
+
+ # Prüfe href und Text für posttrade Dateien
+ if href and 'posttrade' in href.lower() and '.json.gz' in href.lower():
+ # Extrahiere nur den Dateinamen
+ filename = href.split('/')[-1] if '/' in href else href
+ files.append(filename)
+ elif text and 'posttrade' in text.lower() and '.json.gz' in text.lower():
+ files.append(text)
+
+ print(f"[{self.name}] Found {len(files)} files via regex/soup")
return files
except Exception as e:
print(f"Error fetching file list from {self.base_url}: {e}")
@@ -50,11 +74,12 @@ class DeutscheBoerseBase(BaseExchange):
def _filter_files_for_date(self, files: List[str], target_date: datetime.date) -> List[str]:
"""
Filtert Dateien für ein bestimmtes Datum.
- Dateiformat: *posttrade-YYYY-MM-DDTHH:MM:SS*.json.gz
+ Dateiformat: DETR-posttrade-YYYY-MM-DDTHH_MM.json.gz (mit Unterstrich!)
Da Handel bis 22:00 MEZ geht (21:00/20:00 UTC), müssen wir auch
Dateien nach Mitternacht UTC berücksichtigen.
"""
+ import re
filtered = []
# Für den Vortag: Dateien vom target_date UND vom Folgetag (bis ~02:00 UTC)
@@ -64,18 +89,17 @@ class DeutscheBoerseBase(BaseExchange):
for file in files:
# Extrahiere Datum aus Dateiname
- # Format: posttrade-2026-01-26T21:30:00.json.gz
+ # Format: DETR-posttrade-2026-01-26T21_30.json.gz
if target_str in file:
filtered.append(file)
elif next_day_str in file:
# Prüfe ob es eine frühe Datei vom nächsten Tag ist (< 03:00 UTC)
try:
- # Finde Timestamp im Dateinamen
- parts = file.split('posttrade-')
- if len(parts) > 1:
- ts_part = parts[1].split('.json.gz')[0]
- file_dt = datetime.fromisoformat(ts_part)
- if file_dt.hour < 3: # Frühe Morgenstunden gehören noch zum Vortag
+ # Finde Timestamp im Dateinamen mit Unterstrich für Minuten
+ match = re.search(r'posttrade-(\d{4}-\d{2}-\d{2})T(\d{2})_(\d{2})', file)
+ if match:
+ hour = int(match.group(2))
+ if hour < 3: # Frühe Morgenstunden gehören noch zum Vortag
filtered.append(file)
except Exception:
pass
@@ -88,13 +112,22 @@ class DeutscheBoerseBase(BaseExchange):
try:
# Vollständige URL erstellen
+ # Format: https://mfs.deutsche-boerse.com/DETR-posttrade/DETR-posttrade-2026-01-27T08_53.json.gz
if not file_url.startswith('http'):
- full_url = f"{self.base_url.rstrip('/')}/{file_url.lstrip('/')}"
+ # Entferne führenden Slash falls vorhanden
+ filename = file_url.lstrip('/')
+ full_url = f"{self.base_url}/{filename}"
else:
full_url = file_url
response = requests.get(full_url, headers=HEADERS, timeout=60)
+
+ if response.status_code == 404:
+ print(f"[{self.name}] File not found: {full_url}")
+ return []
+
response.raise_for_status()
+ print(f"[{self.name}] Downloaded: {full_url} ({len(response.content)} bytes)")
# Gzip entpacken
with gzip.GzipFile(fileobj=io.BytesIO(response.content)) as f:
diff --git a/src/exchanges/gettex.py b/src/exchanges/gettex.py
index afc5cf3..84ac844 100644
--- a/src/exchanges/gettex.py
+++ b/src/exchanges/gettex.py
@@ -17,7 +17,9 @@ HEADERS = {
# gettex Download-Basis-URLs
GETTEX_PAGE_URL = "https://www.gettex.de/handel/delayed-data/posttrade-data/"
-GETTEX_DOWNLOAD_BASE = "https://erdk.bayerische-boerse.de:8000/delayed-data/MUNC-MUND/posttrade/"
+# Die Download-URL ist auf der gettex-Webseite als Direkt-Link verfügbar
+# Basis-URL für fileadmin Downloads (gefunden durch Seitenanalyse)
+GETTEX_DOWNLOAD_BASE = "https://www.gettex.de/fileadmin/posttrade-data/"
class GettexExchange(BaseExchange):
@@ -32,9 +34,10 @@ class GettexExchange(BaseExchange):
def name(self) -> str:
return "GETTEX"
- def _get_file_list_from_page(self) -> List[str]:
+ def _get_file_list_from_page(self) -> List[dict]:
"""
Parst die gettex Seite und extrahiert Download-Links.
+ Gibt Liste von dicts mit 'filename' und 'url' zurück.
"""
files = []
@@ -47,16 +50,32 @@ class GettexExchange(BaseExchange):
# Suche nach Links zu CSV.gz Dateien
for link in soup.find_all('a'):
href = link.get('href', '')
- if href and 'posttrade' in href.lower() and href.endswith('.csv.gz'):
- files.append(href)
+ text = link.get_text(strip=True)
+
+ # Prüfe den Link-Text oder href auf posttrade CSV.gz Dateien
+ if href and 'posttrade' in href.lower() and '.csv.gz' in href.lower():
+ # Vollständige URL erstellen
+ if not href.startswith('http'):
+ url = f"https://www.gettex.de{href}" if href.startswith('/') else f"https://www.gettex.de/{href}"
+ else:
+ url = href
+ filename = href.split('/')[-1]
+ files.append({'filename': filename, 'url': url})
+
+ elif text and 'posttrade' in text.lower() and '.csv.gz' in text.lower():
+ # Link-Text ist der Dateiname, href könnte die URL sein
+ filename = text
+ if href:
+ if not href.startswith('http'):
+ url = f"https://www.gettex.de{href}" if href.startswith('/') else f"https://www.gettex.de/{href}"
+ else:
+ url = href
+ else:
+ # Fallback: Versuche verschiedene URL-Patterns
+ url = f"https://www.gettex.de/fileadmin/posttrade-data/{filename}"
+ files.append({'filename': filename, 'url': url})
- # Falls keine Links gefunden, versuche alternative Struktur
- if not files:
- # Manchmal sind Links in data-Attributen versteckt
- for elem in soup.find_all(attrs={'data-href': True}):
- href = elem.get('data-href', '')
- if 'posttrade' in href.lower() and href.endswith('.csv.gz'):
- files.append(href)
+ print(f"[GETTEX] Found {len(files)} files on page")
except Exception as e:
print(f"[GETTEX] Error fetching page: {e}")
@@ -211,19 +230,95 @@ class GettexExchange(BaseExchange):
print(f"[{self.name}] Fetching trades for date: {target_date}")
- # Generiere erwartete Dateinamen
- expected_files = self._generate_expected_files(target_date)
- print(f"[{self.name}] Trying {len(expected_files)} potential files")
+ # Versuche zuerst, Dateien von der Webseite zu laden
+ page_files = self._get_file_list_from_page()
- # Versuche Dateien herunterzuladen
- successful_files = 0
- for filename in expected_files:
- trades = self._download_and_parse_file(filename)
- if trades:
- all_trades.extend(trades)
- successful_files += 1
+ if page_files:
+ # Filtere Dateien für das Zieldatum
+ target_str = target_date.strftime('%Y%m%d')
+ next_day = target_date + timedelta(days=1)
+ next_day_str = next_day.strftime('%Y%m%d')
+
+ target_files = []
+ for f in page_files:
+ filename = f['filename']
+ # Dateien vom Zieldatum oder frühe Morgenstunden des nächsten Tages
+ if target_str in filename:
+ target_files.append(f)
+ elif next_day_str in filename:
+ # Frühe Morgenstunden (00:00 - 02:45) gehören zum Vortag
+ try:
+ # Format: posttrade.YYYYMMDD.HH.MM.{munc|mund}.csv.gz
+ parts = filename.split('.')
+ if len(parts) >= 4:
+ hour = int(parts[2])
+ if hour < 3:
+ target_files.append(f)
+ except:
+ pass
+
+ print(f"[{self.name}] Found {len(target_files)} files for target date from page")
+
+ # Lade Dateien von der Webseite
+ for f in target_files:
+ trades = self._download_file_by_url(f['url'], f['filename'])
+ if trades:
+ all_trades.extend(trades)
+
+ # Fallback: Versuche erwartete Dateinamen
+ if not all_trades:
+ print(f"[{self.name}] No files from page, trying generated filenames...")
+ expected_files = self._generate_expected_files(target_date)
+ print(f"[{self.name}] Trying {len(expected_files)} potential files")
+
+ successful_files = 0
+ for filename in expected_files:
+ trades = self._download_and_parse_file(filename)
+ if trades:
+ all_trades.extend(trades)
+ successful_files += 1
+
+ print(f"[{self.name}] Successfully downloaded {successful_files} files")
- print(f"[{self.name}] Successfully downloaded {successful_files} files")
print(f"[{self.name}] Total trades fetched: {len(all_trades)}")
return all_trades
+
+ def _download_file_by_url(self, url: str, filename: str) -> List[Trade]:
+ """Lädt eine Datei direkt von einer URL"""
+ trades = []
+
+ try:
+ print(f"[{self.name}] Downloading: {url}")
+ response = requests.get(url, headers=HEADERS, timeout=60)
+
+ if response.status_code == 404:
+ return []
+
+ response.raise_for_status()
+
+ # Gzip entpacken
+ with gzip.GzipFile(fileobj=io.BytesIO(response.content)) as f:
+ csv_text = f.read().decode('utf-8')
+
+ # CSV parsen
+ reader = csv.DictReader(io.StringIO(csv_text), delimiter=';')
+
+ for row in reader:
+ try:
+ trade = self._parse_csv_row(row)
+ if trade:
+ trades.append(trade)
+ except Exception as e:
+ print(f"[{self.name}] Error parsing row: {e}")
+ continue
+
+ print(f"[{self.name}] Parsed {len(trades)} trades from {filename}")
+
+ except requests.exceptions.HTTPError as e:
+ if e.response.status_code != 404:
+ print(f"[{self.name}] HTTP error downloading {url}: {e}")
+ except Exception as e:
+ print(f"[{self.name}] Error downloading {url}: {e}")
+
+ return trades
diff --git a/src/exchanges/stuttgart.py b/src/exchanges/stuttgart.py
index d10fc8b..7f11dd0 100644
--- a/src/exchanges/stuttgart.py
+++ b/src/exchanges/stuttgart.py
@@ -8,12 +8,22 @@ from typing import List, Optional
from .base import BaseExchange, Trade
from bs4 import BeautifulSoup
-# Browser User-Agent
+# Browser User-Agent (Vollständiger Browser-Fingerprint für Stuttgart)
HEADERS = {
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Accept-Language': 'de-DE,de;q=0.9,en;q=0.8',
- 'Referer': 'https://www.boerse-stuttgart.de/'
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
+ 'Accept-Language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Referer': 'https://www.boerse-stuttgart.de/de-de/fuer-geschaeftspartner/reports/mifir-ii-delayed-data/',
+ 'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
+ 'Sec-Ch-Ua-Mobile': '?0',
+ 'Sec-Ch-Ua-Platform': '"Windows"',
+ 'Sec-Fetch-Dest': 'document',
+ 'Sec-Fetch-Mode': 'navigate',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Sec-Fetch-User': '?1',
+ 'Upgrade-Insecure-Requests': '1',
+ 'Cache-Control': 'max-age=0'
}
# Börse Stuttgart URLs