fixed deutsche boerse
All checks were successful
Deployment / deploy-docker (push) Successful in 17s
All checks were successful
Deployment / deploy-docker (push) Successful in 17s
This commit is contained in:
BIN
src/exchanges/__pycache__/deutsche_boerse.cpython-313.pyc
Normal file
BIN
src/exchanges/__pycache__/deutsche_boerse.cpython-313.pyc
Normal file
Binary file not shown.
BIN
src/exchanges/__pycache__/gettex.cpython-313.pyc
Normal file
BIN
src/exchanges/__pycache__/gettex.cpython-313.pyc
Normal file
Binary file not shown.
BIN
src/exchanges/__pycache__/stuttgart.cpython-313.pyc
Normal file
BIN
src/exchanges/__pycache__/stuttgart.cpython-313.pyc
Normal file
Binary file not shown.
@@ -28,20 +28,44 @@ class DeutscheBoerseBase(BaseExchange):
|
||||
|
||||
def _get_file_list(self) -> List[str]:
|
||||
"""Parst die Verzeichnisseite und extrahiert alle Dateinamen"""
|
||||
import re
|
||||
try:
|
||||
response = requests.get(self.base_url, headers=HEADERS, timeout=30)
|
||||
response.raise_for_status()
|
||||
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
files = []
|
||||
|
||||
# Deutsche Börse listet Dateien als Links auf
|
||||
for link in soup.find_all('a'):
|
||||
href = link.get('href', '')
|
||||
# Nur posttrade JSON.gz Dateien
|
||||
if 'posttrade' in href and href.endswith('.json.gz'):
|
||||
files.append(href)
|
||||
# Primär: Regex-basierte Extraktion (zuverlässiger)
|
||||
# Pattern: PREFIX-posttrade-YYYY-MM-DDTHH_MM.json.gz
|
||||
# Das Prefix wird aus der base_url extrahiert (z.B. DETR, DFRA, DGAT)
|
||||
prefix_match = re.search(r'/([A-Z]{4})-posttrade', self.base_url)
|
||||
if prefix_match:
|
||||
prefix = prefix_match.group(1)
|
||||
# Suche nach Dateinamen mit diesem Prefix
|
||||
pattern = f'{prefix}-posttrade-\\d{{4}}-\\d{{2}}-\\d{{2}}T\\d{{2}}_\\d{{2}}\\.json\\.gz'
|
||||
else:
|
||||
# Generisches Pattern
|
||||
pattern = r'[A-Z]{4}-posttrade-\d{4}-\d{2}-\d{2}T\d{2}_\d{2}\.json\.gz'
|
||||
|
||||
matches = re.findall(pattern, response.text)
|
||||
files = list(set(matches))
|
||||
|
||||
# Sekundär: BeautifulSoup für Links (falls Regex nichts findet)
|
||||
if not files:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
for link in soup.find_all('a'):
|
||||
href = link.get('href', '')
|
||||
text = link.get_text(strip=True)
|
||||
|
||||
# Prüfe href und Text für posttrade Dateien
|
||||
if href and 'posttrade' in href.lower() and '.json.gz' in href.lower():
|
||||
# Extrahiere nur den Dateinamen
|
||||
filename = href.split('/')[-1] if '/' in href else href
|
||||
files.append(filename)
|
||||
elif text and 'posttrade' in text.lower() and '.json.gz' in text.lower():
|
||||
files.append(text)
|
||||
|
||||
print(f"[{self.name}] Found {len(files)} files via regex/soup")
|
||||
return files
|
||||
except Exception as e:
|
||||
print(f"Error fetching file list from {self.base_url}: {e}")
|
||||
@@ -50,11 +74,12 @@ class DeutscheBoerseBase(BaseExchange):
|
||||
def _filter_files_for_date(self, files: List[str], target_date: datetime.date) -> List[str]:
|
||||
"""
|
||||
Filtert Dateien für ein bestimmtes Datum.
|
||||
Dateiformat: *posttrade-YYYY-MM-DDTHH:MM:SS*.json.gz
|
||||
Dateiformat: DETR-posttrade-YYYY-MM-DDTHH_MM.json.gz (mit Unterstrich!)
|
||||
|
||||
Da Handel bis 22:00 MEZ geht (21:00/20:00 UTC), müssen wir auch
|
||||
Dateien nach Mitternacht UTC berücksichtigen.
|
||||
"""
|
||||
import re
|
||||
filtered = []
|
||||
|
||||
# Für den Vortag: Dateien vom target_date UND vom Folgetag (bis ~02:00 UTC)
|
||||
@@ -64,18 +89,17 @@ class DeutscheBoerseBase(BaseExchange):
|
||||
|
||||
for file in files:
|
||||
# Extrahiere Datum aus Dateiname
|
||||
# Format: posttrade-2026-01-26T21:30:00.json.gz
|
||||
# Format: DETR-posttrade-2026-01-26T21_30.json.gz
|
||||
if target_str in file:
|
||||
filtered.append(file)
|
||||
elif next_day_str in file:
|
||||
# Prüfe ob es eine frühe Datei vom nächsten Tag ist (< 03:00 UTC)
|
||||
try:
|
||||
# Finde Timestamp im Dateinamen
|
||||
parts = file.split('posttrade-')
|
||||
if len(parts) > 1:
|
||||
ts_part = parts[1].split('.json.gz')[0]
|
||||
file_dt = datetime.fromisoformat(ts_part)
|
||||
if file_dt.hour < 3: # Frühe Morgenstunden gehören noch zum Vortag
|
||||
# Finde Timestamp im Dateinamen mit Unterstrich für Minuten
|
||||
match = re.search(r'posttrade-(\d{4}-\d{2}-\d{2})T(\d{2})_(\d{2})', file)
|
||||
if match:
|
||||
hour = int(match.group(2))
|
||||
if hour < 3: # Frühe Morgenstunden gehören noch zum Vortag
|
||||
filtered.append(file)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -88,13 +112,22 @@ class DeutscheBoerseBase(BaseExchange):
|
||||
|
||||
try:
|
||||
# Vollständige URL erstellen
|
||||
# Format: https://mfs.deutsche-boerse.com/DETR-posttrade/DETR-posttrade-2026-01-27T08_53.json.gz
|
||||
if not file_url.startswith('http'):
|
||||
full_url = f"{self.base_url.rstrip('/')}/{file_url.lstrip('/')}"
|
||||
# Entferne führenden Slash falls vorhanden
|
||||
filename = file_url.lstrip('/')
|
||||
full_url = f"{self.base_url}/{filename}"
|
||||
else:
|
||||
full_url = file_url
|
||||
|
||||
response = requests.get(full_url, headers=HEADERS, timeout=60)
|
||||
|
||||
if response.status_code == 404:
|
||||
print(f"[{self.name}] File not found: {full_url}")
|
||||
return []
|
||||
|
||||
response.raise_for_status()
|
||||
print(f"[{self.name}] Downloaded: {full_url} ({len(response.content)} bytes)")
|
||||
|
||||
# Gzip entpacken
|
||||
with gzip.GzipFile(fileobj=io.BytesIO(response.content)) as f:
|
||||
|
||||
@@ -17,7 +17,9 @@ HEADERS = {
|
||||
|
||||
# gettex Download-Basis-URLs
|
||||
GETTEX_PAGE_URL = "https://www.gettex.de/handel/delayed-data/posttrade-data/"
|
||||
GETTEX_DOWNLOAD_BASE = "https://erdk.bayerische-boerse.de:8000/delayed-data/MUNC-MUND/posttrade/"
|
||||
# Die Download-URL ist auf der gettex-Webseite als Direkt-Link verfügbar
|
||||
# Basis-URL für fileadmin Downloads (gefunden durch Seitenanalyse)
|
||||
GETTEX_DOWNLOAD_BASE = "https://www.gettex.de/fileadmin/posttrade-data/"
|
||||
|
||||
|
||||
class GettexExchange(BaseExchange):
|
||||
@@ -32,9 +34,10 @@ class GettexExchange(BaseExchange):
|
||||
def name(self) -> str:
|
||||
return "GETTEX"
|
||||
|
||||
def _get_file_list_from_page(self) -> List[str]:
|
||||
def _get_file_list_from_page(self) -> List[dict]:
|
||||
"""
|
||||
Parst die gettex Seite und extrahiert Download-Links.
|
||||
Gibt Liste von dicts mit 'filename' und 'url' zurück.
|
||||
"""
|
||||
files = []
|
||||
|
||||
@@ -47,16 +50,32 @@ class GettexExchange(BaseExchange):
|
||||
# Suche nach Links zu CSV.gz Dateien
|
||||
for link in soup.find_all('a'):
|
||||
href = link.get('href', '')
|
||||
if href and 'posttrade' in href.lower() and href.endswith('.csv.gz'):
|
||||
files.append(href)
|
||||
text = link.get_text(strip=True)
|
||||
|
||||
# Prüfe den Link-Text oder href auf posttrade CSV.gz Dateien
|
||||
if href and 'posttrade' in href.lower() and '.csv.gz' in href.lower():
|
||||
# Vollständige URL erstellen
|
||||
if not href.startswith('http'):
|
||||
url = f"https://www.gettex.de{href}" if href.startswith('/') else f"https://www.gettex.de/{href}"
|
||||
else:
|
||||
url = href
|
||||
filename = href.split('/')[-1]
|
||||
files.append({'filename': filename, 'url': url})
|
||||
|
||||
elif text and 'posttrade' in text.lower() and '.csv.gz' in text.lower():
|
||||
# Link-Text ist der Dateiname, href könnte die URL sein
|
||||
filename = text
|
||||
if href:
|
||||
if not href.startswith('http'):
|
||||
url = f"https://www.gettex.de{href}" if href.startswith('/') else f"https://www.gettex.de/{href}"
|
||||
else:
|
||||
url = href
|
||||
else:
|
||||
# Fallback: Versuche verschiedene URL-Patterns
|
||||
url = f"https://www.gettex.de/fileadmin/posttrade-data/{filename}"
|
||||
files.append({'filename': filename, 'url': url})
|
||||
|
||||
# Falls keine Links gefunden, versuche alternative Struktur
|
||||
if not files:
|
||||
# Manchmal sind Links in data-Attributen versteckt
|
||||
for elem in soup.find_all(attrs={'data-href': True}):
|
||||
href = elem.get('data-href', '')
|
||||
if 'posttrade' in href.lower() and href.endswith('.csv.gz'):
|
||||
files.append(href)
|
||||
print(f"[GETTEX] Found {len(files)} files on page")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[GETTEX] Error fetching page: {e}")
|
||||
@@ -211,19 +230,95 @@ class GettexExchange(BaseExchange):
|
||||
|
||||
print(f"[{self.name}] Fetching trades for date: {target_date}")
|
||||
|
||||
# Generiere erwartete Dateinamen
|
||||
expected_files = self._generate_expected_files(target_date)
|
||||
print(f"[{self.name}] Trying {len(expected_files)} potential files")
|
||||
# Versuche zuerst, Dateien von der Webseite zu laden
|
||||
page_files = self._get_file_list_from_page()
|
||||
|
||||
# Versuche Dateien herunterzuladen
|
||||
successful_files = 0
|
||||
for filename in expected_files:
|
||||
trades = self._download_and_parse_file(filename)
|
||||
if trades:
|
||||
all_trades.extend(trades)
|
||||
successful_files += 1
|
||||
if page_files:
|
||||
# Filtere Dateien für das Zieldatum
|
||||
target_str = target_date.strftime('%Y%m%d')
|
||||
next_day = target_date + timedelta(days=1)
|
||||
next_day_str = next_day.strftime('%Y%m%d')
|
||||
|
||||
target_files = []
|
||||
for f in page_files:
|
||||
filename = f['filename']
|
||||
# Dateien vom Zieldatum oder frühe Morgenstunden des nächsten Tages
|
||||
if target_str in filename:
|
||||
target_files.append(f)
|
||||
elif next_day_str in filename:
|
||||
# Frühe Morgenstunden (00:00 - 02:45) gehören zum Vortag
|
||||
try:
|
||||
# Format: posttrade.YYYYMMDD.HH.MM.{munc|mund}.csv.gz
|
||||
parts = filename.split('.')
|
||||
if len(parts) >= 4:
|
||||
hour = int(parts[2])
|
||||
if hour < 3:
|
||||
target_files.append(f)
|
||||
except:
|
||||
pass
|
||||
|
||||
print(f"[{self.name}] Found {len(target_files)} files for target date from page")
|
||||
|
||||
# Lade Dateien von der Webseite
|
||||
for f in target_files:
|
||||
trades = self._download_file_by_url(f['url'], f['filename'])
|
||||
if trades:
|
||||
all_trades.extend(trades)
|
||||
|
||||
# Fallback: Versuche erwartete Dateinamen
|
||||
if not all_trades:
|
||||
print(f"[{self.name}] No files from page, trying generated filenames...")
|
||||
expected_files = self._generate_expected_files(target_date)
|
||||
print(f"[{self.name}] Trying {len(expected_files)} potential files")
|
||||
|
||||
successful_files = 0
|
||||
for filename in expected_files:
|
||||
trades = self._download_and_parse_file(filename)
|
||||
if trades:
|
||||
all_trades.extend(trades)
|
||||
successful_files += 1
|
||||
|
||||
print(f"[{self.name}] Successfully downloaded {successful_files} files")
|
||||
|
||||
print(f"[{self.name}] Successfully downloaded {successful_files} files")
|
||||
print(f"[{self.name}] Total trades fetched: {len(all_trades)}")
|
||||
|
||||
return all_trades
|
||||
|
||||
def _download_file_by_url(self, url: str, filename: str) -> List[Trade]:
|
||||
"""Lädt eine Datei direkt von einer URL"""
|
||||
trades = []
|
||||
|
||||
try:
|
||||
print(f"[{self.name}] Downloading: {url}")
|
||||
response = requests.get(url, headers=HEADERS, timeout=60)
|
||||
|
||||
if response.status_code == 404:
|
||||
return []
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
# Gzip entpacken
|
||||
with gzip.GzipFile(fileobj=io.BytesIO(response.content)) as f:
|
||||
csv_text = f.read().decode('utf-8')
|
||||
|
||||
# CSV parsen
|
||||
reader = csv.DictReader(io.StringIO(csv_text), delimiter=';')
|
||||
|
||||
for row in reader:
|
||||
try:
|
||||
trade = self._parse_csv_row(row)
|
||||
if trade:
|
||||
trades.append(trade)
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error parsing row: {e}")
|
||||
continue
|
||||
|
||||
print(f"[{self.name}] Parsed {len(trades)} trades from {filename}")
|
||||
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code != 404:
|
||||
print(f"[{self.name}] HTTP error downloading {url}: {e}")
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error downloading {url}: {e}")
|
||||
|
||||
return trades
|
||||
|
||||
@@ -8,12 +8,22 @@ from typing import List, Optional
|
||||
from .base import BaseExchange, Trade
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
# Browser User-Agent
|
||||
# Browser User-Agent (Vollständiger Browser-Fingerprint für Stuttgart)
|
||||
HEADERS = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
'Accept-Language': 'de-DE,de;q=0.9,en;q=0.8',
|
||||
'Referer': 'https://www.boerse-stuttgart.de/'
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
|
||||
'Accept-Language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
'Accept-Encoding': 'gzip, deflate, br',
|
||||
'Referer': 'https://www.boerse-stuttgart.de/de-de/fuer-geschaeftspartner/reports/mifir-ii-delayed-data/',
|
||||
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
|
||||
'Sec-Ch-Ua-Mobile': '?0',
|
||||
'Sec-Ch-Ua-Platform': '"Windows"',
|
||||
'Sec-Fetch-Dest': 'document',
|
||||
'Sec-Fetch-Mode': 'navigate',
|
||||
'Sec-Fetch-Site': 'same-origin',
|
||||
'Sec-Fetch-User': '?1',
|
||||
'Upgrade-Insecure-Requests': '1',
|
||||
'Cache-Control': 'max-age=0'
|
||||
}
|
||||
|
||||
# Börse Stuttgart URLs
|
||||
|
||||
Reference in New Issue
Block a user