300 lines
9.2 KiB
Python
300 lines
9.2 KiB
Python
#!/usr/bin/env python3
|
||
# -*- coding: utf-8 -*-
|
||
|
||
"""
|
||
Query Medevio for the full agenda of 17 Oct 2025,
|
||
print raw API response, and export to Excel.
|
||
"""
|
||
import re
|
||
import json
|
||
import time
|
||
from pathlib import Path
|
||
import requests
|
||
import pandas as pd
|
||
from openpyxl import load_workbook
|
||
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
|
||
from openpyxl.utils import get_column_letter
|
||
from datetime import datetime, timedelta
|
||
from dateutil.relativedelta import relativedelta
|
||
from Functions import get_reports_folder
|
||
from openpyxl.utils.dataframe import dataframe_to_rows
|
||
|
||
|
||
GRAPHQL_URL = "https://api.medevio.cz/graphql"
|
||
|
||
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
|
||
CLINIC_SLUG = "mudr-buzalkova"
|
||
|
||
# ==================== Load Token ====================
|
||
def load_gateway_token(storage_path="medevio_storage.json"):
|
||
"""Return Medevio gateway-access-token from saved Playwright storage."""
|
||
path = Path(storage_path)
|
||
if not path.exists():
|
||
raise SystemExit(f"❌ Storage file not found: {path}")
|
||
|
||
with path.open("r", encoding="utf-8") as f:
|
||
state = json.load(f)
|
||
|
||
token = next(
|
||
(c["value"] for c in state["cookies"]
|
||
if c["name"] == "gateway-access-token"), None
|
||
)
|
||
|
||
if not token:
|
||
raise SystemExit("❌ gateway-access-token not found in storage file.")
|
||
|
||
return token
|
||
|
||
|
||
gateway_token = load_gateway_token()
|
||
|
||
headers = {
|
||
"content-type": "application/json",
|
||
"origin": "https://my.medevio.cz",
|
||
"referer": "https://my.medevio.cz/",
|
||
"authorization": f"Bearer {gateway_token}",
|
||
}
|
||
|
||
# === Dynamic date range ===
|
||
dnes = datetime.utcnow().date()
|
||
since = datetime.combine(dnes, datetime.min.time()).replace(microsecond=1)
|
||
until = since + relativedelta(months=1) - timedelta(milliseconds=1)
|
||
|
||
since_iso = since.isoformat() + "Z"
|
||
until_iso = until.isoformat() + "Z"
|
||
|
||
payload = {
|
||
"operationName": "ClinicAgenda_ListClinicReservations",
|
||
"variables": {
|
||
"calendarIds": [CALENDAR_ID],
|
||
"clinicSlug": CLINIC_SLUG,
|
||
"since": since_iso,
|
||
"until": "2025-11-30T21:59:59.999Z",
|
||
"locale": "cs",
|
||
"emptyCalendarIds": False,
|
||
},
|
||
"query": """query ClinicAgenda_ListClinicReservations(
|
||
$calendarIds: [UUID!],
|
||
$clinicSlug: String!,
|
||
$locale: Locale!,
|
||
$since: DateTime!,
|
||
$until: DateTime!,
|
||
$emptyCalendarIds: Boolean!
|
||
) {
|
||
reservations: listClinicReservations(
|
||
clinicSlug: $clinicSlug,
|
||
calendarIds: $calendarIds,
|
||
since: $since,
|
||
until: $until
|
||
) @skip(if: $emptyCalendarIds) {
|
||
id
|
||
start
|
||
end
|
||
note
|
||
done
|
||
color
|
||
request {
|
||
id
|
||
displayTitle(locale: $locale)
|
||
extendedPatient {
|
||
name
|
||
surname
|
||
dob
|
||
insuranceCompanyObject { shortName }
|
||
}
|
||
}
|
||
}
|
||
}""",
|
||
}
|
||
|
||
print("since:", since_iso)
|
||
print("until:", until_iso)
|
||
|
||
# ==================== Query API ====================
|
||
print("📡 Querying Medevio API for agenda...")
|
||
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
|
||
print("Status:", r.status_code)
|
||
|
||
try:
|
||
data = r.json()
|
||
except Exception as e:
|
||
print("❌ Could not parse JSON:", e)
|
||
print(r.text)
|
||
raise SystemExit()
|
||
|
||
if "data" not in data or "reservations" not in data["data"]:
|
||
raise SystemExit("⚠️ No 'reservations' data found in response.")
|
||
|
||
reservations = data["data"]["reservations"]
|
||
from dateutil import parser, tz
|
||
|
||
# ===== Process reservations into table =====
|
||
rows = []
|
||
for r in reservations:
|
||
req = r.get("request") or {}
|
||
patient = req.get("extendedPatient") or {}
|
||
insurance = patient.get("insuranceCompanyObject") or {}
|
||
|
||
try:
|
||
start_dt = parser.isoparse(r.get("start")).astimezone(tz.gettz("Europe/Prague"))
|
||
end_dt = parser.isoparse(r.get("end")).astimezone(tz.gettz("Europe/Prague"))
|
||
except Exception:
|
||
start_dt = end_dt = None
|
||
|
||
date_str = start_dt.strftime("%Y-%m-%d") if start_dt else ""
|
||
time_interval = f"{start_dt.strftime('%H:%M')}-{end_dt.strftime('%H:%M')}" if start_dt and end_dt else ""
|
||
|
||
rows.append({
|
||
"Date": date_str,
|
||
"Time": time_interval,
|
||
"Title": req.get("displayTitle") or "",
|
||
"Patient": f"{patient.get('surname','')} {patient.get('name','')}".strip(),
|
||
"DOB": patient.get("dob") or "",
|
||
"Insurance": insurance.get("shortName") or "",
|
||
"Note": r.get("note") or "",
|
||
"Color": r.get("color") or "",
|
||
"Request_ID": req.get("id") or "",
|
||
"Reservation_ID": r.get("id"),
|
||
})
|
||
|
||
df = pd.DataFrame(rows).sort_values(["Date", "Time"])
|
||
|
||
|
||
|
||
def kw_pattern(kw: str) -> str:
|
||
"""
|
||
Match the exact phrase kw (case-insensitive),
|
||
not as part of a '+something' continuation.
|
||
Examples:
|
||
'žloutenka a' ✅ matches '… žloutenka a …'
|
||
❌ NOT '… žloutenka a+b …'
|
||
'žloutenka a+b' ✅ matches exactly that phrase
|
||
"""
|
||
# start boundary: not preceded by a word char
|
||
# end guard: not followed by optional spaces + '+' + word
|
||
return rf"(?<!\w){re.escape(kw)}(?!\s*\+\s*\w)"
|
||
|
||
# ===== Excel export =====
|
||
EXPORT_DIR = Path(r"z:\Dropbox\Ordinace\Reporty")
|
||
EXPORT_DIR.mkdir(exist_ok=True, parents=True)
|
||
|
||
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||
xlsx_path = EXPORT_DIR / f"{timestamp} Agenda (30 dní dopředu).xlsx"
|
||
|
||
# Safely delete older Agenda reports
|
||
for old in EXPORT_DIR.glob("*Agenda (30 dní dopředu).xlsx"):
|
||
try:
|
||
old.unlink()
|
||
except PermissionError:
|
||
print(f"⚠️ File is open, skipping delete: {old}")
|
||
except Exception as e:
|
||
print(f"⚠️ Could not delete {old}: {e}")
|
||
|
||
# Export DataFrame
|
||
df.to_excel(xlsx_path, index=False)
|
||
wb = load_workbook(xlsx_path)
|
||
ws = wb.active
|
||
ws.title = "Agenda" # ✅ rename sheet
|
||
|
||
|
||
# === Apply styling and custom column widths ===
|
||
widths = {
|
||
1: 11, # Date
|
||
2: 13, # Time
|
||
3: 45, # Title
|
||
4: 30, # Patient
|
||
5: 15, # DOB
|
||
6: 15, # Insurance
|
||
7: 30, # Note
|
||
8: 15, # Color
|
||
9: 37, # Request_ID
|
||
10: 37 # Reservation_ID
|
||
}
|
||
|
||
# Define styles
|
||
header_fill = PatternFill("solid", fgColor="FFFF00") # real yellow
|
||
alt_fill = PatternFill("solid", fgColor="F2F2F2") # light grey alternate rows
|
||
thin_border = Border(
|
||
left=Side(style="thin", color="000000"),
|
||
right=Side(style="thin", color="000000"),
|
||
top=Side(style="thin", color="000000"),
|
||
bottom=Side(style="thin", color="000000")
|
||
)
|
||
|
||
# === Format header ===
|
||
for col_idx in range(1, len(df.columns) + 1):
|
||
col_letter = get_column_letter(col_idx)
|
||
cell = ws.cell(row=1, column=col_idx)
|
||
cell.font = Font(bold=True)
|
||
cell.alignment = Alignment(horizontal="center", vertical="center")
|
||
cell.fill = header_fill
|
||
cell.value = str(cell.value).upper()
|
||
cell.border = thin_border
|
||
ws.column_dimensions[col_letter].width = widths.get(col_idx, 20)
|
||
|
||
# === Format data rows ===
|
||
for r_idx, row in enumerate(ws.iter_rows(min_row=2, max_row=ws.max_row, max_col=ws.max_column), start=2):
|
||
for cell in row:
|
||
cell.border = thin_border
|
||
if r_idx % 2 == 0: # alternate row background
|
||
cell.fill = alt_fill
|
||
|
||
ws.freeze_panes = "A2"
|
||
|
||
from openpyxl.utils.dataframe import dataframe_to_rows
|
||
|
||
# === Vaccine sheet configuration ===
|
||
VACCINE_SHEETS = {
|
||
"Chřipka": ["očkování", "chřipka"],
|
||
"COVID": ["očkování", "covid"],
|
||
"Pneumokok": ["očkování", "pneumo"],
|
||
"Hep A": ["očkování", "žloutenka a"],
|
||
"Hep B": ["očkování", "žloutenka b"],
|
||
"Hep A+B": ["očkování", "žloutenka a+b"],
|
||
"Klíšťovka": ["očkování", "klíšť"]
|
||
}
|
||
|
||
# === Generate sheets based on keyword combinations ===
|
||
for sheet_name, keywords in VACCINE_SHEETS.items():
|
||
mask = pd.Series(True, index=df.index)
|
||
title_series = df["Title"].fillna("")
|
||
|
||
for kw in keywords:
|
||
pattern = kw_pattern(kw)
|
||
mask &= title_series.str.contains(pattern, flags=re.IGNORECASE, regex=True)
|
||
filtered_df = df[mask].copy()
|
||
|
||
if filtered_df.empty:
|
||
print(f"ℹ️ No matches for sheet '{sheet_name}' ({' AND '.join(keywords)})")
|
||
continue
|
||
|
||
ws_new = wb.create_sheet(title=sheet_name)
|
||
for r in dataframe_to_rows(filtered_df, index=False, header=True):
|
||
ws_new.append(r)
|
||
|
||
# === Apply formatting ===
|
||
for col_idx in range(1, len(filtered_df.columns) + 1):
|
||
col_letter = get_column_letter(col_idx)
|
||
c = ws_new.cell(row=1, column=col_idx)
|
||
c.font = Font(bold=True)
|
||
c.alignment = Alignment(horizontal="center", vertical="center")
|
||
c.fill = PatternFill("solid", fgColor="FFFF00") # bright yellow header
|
||
c.value = str(c.value).upper()
|
||
c.border = thin_border
|
||
ws_new.column_dimensions[col_letter].width = widths.get(col_idx, 20)
|
||
|
||
# Borders + alternating rows
|
||
for r_idx, row in enumerate(ws_new.iter_rows(min_row=2, max_row=ws_new.max_row, max_col=ws_new.max_column), start=2):
|
||
for cell in row:
|
||
cell.border = thin_border
|
||
if r_idx % 2 == 0:
|
||
cell.fill = PatternFill("solid", fgColor="F2F2F2")
|
||
|
||
ws_new.freeze_panes = "A2"
|
||
print(f"🟡 Created sheet '{sheet_name}' with {len(filtered_df)} rows ({' AND '.join(keywords)})")
|
||
|
||
|
||
|
||
wb.save(xlsx_path)
|
||
print(f"📘 Exported clean agenda view to:\n{xlsx_path}")
|