Compare commits

...

10 Commits

Author SHA1 Message Date
ec06de625d Merge remote-tracking branch 'origin/main' 2025-12-12 14:29:50 +01:00
ba4ff2e74b z230 2025-12-12 14:29:24 +01:00
fa2c68c01b Merge remote-tracking branch 'origin/main' 2025-12-04 06:18:38 +01:00
d5635532a7 notebook 2025-12-04 06:18:23 +01:00
michaela.buzalkova
25e033f0b4 lenovo 2025-12-03 22:45:00 +01:00
718bb5131c z230 2025-12-01 18:15:50 +01:00
b59c8e15e1 z230 2025-11-20 12:04:47 +01:00
6f87ef14f0 z230 2025-11-19 14:02:01 +01:00
7c5e0c8975 notebook 2025-11-19 06:40:22 +01:00
63c6905746 notebook 2025-11-05 06:36:01 +01:00
18 changed files with 1138 additions and 370 deletions

2
.env Normal file
View File

@@ -0,0 +1,2 @@
TELEGRAM_TOKEN=8493490456:AAETJRKAuiggQit405_L0UDgq2w2lk6_sTk
TELEGRAM_CHAT_ID=6639316354

View File

@@ -1,24 +0,0 @@
import pymysql
from pymysql.cursors import DictCursor
conn = pymysql.connect(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="fio",
charset="utf8mb4",
cursorclass=DictCursor
)
with conn.cursor() as cur:
cur.execute("SHOW TABLES;")
print("📋 Tables:", [r[f"Tables_in_fio"] for r in cur.fetchall()])
cur.execute("SELECT COUNT(*) AS cnt FROM transactions;")
print("🧾 Rows in `transactions`:", cur.fetchone()["cnt"])
cur.execute("SHOW COLUMNS FROM transactions;")
print("\n📊 Columns:")
for r in cur.fetchall():
print(" -", r["Field"])

View File

@@ -1,117 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Export Fio transactions (from MySQL → Excel)
--------------------------------------------
- Reads only cislo_uctu = '2800046620'
- For OZP (protiucet=2070101041) includes only positive objem
- Each sheet = insurance company (filtered by protiucet)
- First sheet = summary with total amounts and transaction counts
"""
import pandas as pd
import pymysql
from pathlib import Path
from datetime import datetime
# ======== CONFIG ========
MYSQL_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "fio",
"charset": "utf8mb4",
}
REPORTOVAT = {
"VZP": "1114007221",
"VOZP": "2010009091",
"ČPZP": "2054108761",
"OZP": "2070101041",
"ZPŠ": "2090309181",
"ZPMV": "2112108031",
}
EXPORT_PATH = Path(r"u:\Dropbox\!!!Days\Downloads Z230") / f"Fio_report_{datetime.now():%Y-%m-%d_%H-%M-%S}.xlsx"
# ======== LOAD DATA ========
def load_data():
print("🔄 Načítám data z MySQL (účet 2800046620, pro OZP jen kladné objemy)...")
conn = pymysql.connect(**MYSQL_CONFIG)
sql = """
SELECT *
FROM transactions
WHERE cislo_uctu = '2800046620'
AND (
protiucet <> '2070101041'
OR (protiucet = '2070101041' AND objem > 0)
);
"""
df = pd.read_sql(sql, conn)
conn.close()
df.columns = df.columns.str.strip()
print(f"✅ Načteno {len(df)} řádků, {len(df.columns)} sloupců.")
return df
# ======== EXPORT TO EXCEL ========
def export_to_excel(df):
summary_rows = [] # to collect summary per insurer
with pd.ExcelWriter(EXPORT_PATH, engine="openpyxl") as writer:
# --- INDIVIDUAL SHEETS ---
for name, acc in REPORTOVAT.items():
filtered = df[df["protiucet"].astype(str) == acc]
if filtered.empty:
print(f"⚠️ {name}: žádné transakce (účet {acc})")
summary_rows.append({
"Pojišťovna": name,
"Číslo účtu": acc,
"Počet transakcí": 0,
"Součet objemu": 0.0
})
continue
# safe numeric conversion
filtered = filtered.copy()
filtered["objem_num"] = (
filtered["objem"]
.astype(str)
.str.replace("\u00A0", "", regex=False)
.str.replace(",", ".", regex=False)
.astype(float)
)
# --- summary data ---
total_sum = filtered["objem_num"].sum()
total_count = len(filtered)
summary_rows.append({
"Pojišťovna": name,
"Číslo účtu": acc,
"Počet transakcí": total_count,
"Součet objemu": round(total_sum, 2)
})
# --- write detailed sheet ---
filtered.to_excel(writer, index=False, sheet_name=name)
print(f"{name}: {len(filtered)} řádků exportováno, součet {total_sum:,.2f}")
# --- SUMMARY SHEET ---
summary_df = pd.DataFrame(summary_rows)
summary_df["Součet objemu"] = summary_df["Součet objemu"].map("{:,.2f}".format)
summary_df.to_excel(writer, index=False, sheet_name="Přehled")
print("🧾 Přidán přehledový list s celkovými součty.")
print(f"\n📊 Hotovo! Soubor uložen jako:\n{EXPORT_PATH}")
# ======== MAIN ========
if __name__ == "__main__":
df = load_data()
export_to_excel(df)

View File

@@ -1,168 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fio CSV import → MySQL (dev version)
------------------------------------
- Always drops & recreates `transactions` table
- Uses real CSV headers as seen in "Vyhledane pohyby (3).csv"
- Unique key = (Číslo účtu, ID operace, ID pokynu)
"""
import csv
from pathlib import Path
from datetime import datetime
import pymysql
from pymysql.cursors import DictCursor
import re
# ======== CONFIG ========
CSV_PATH = Path(r"u:\Dropbox\!!!Days\Downloads Z230\Vyhledane pohyby (3).csv")
TABLE_NAME = "transactions"
MYSQL_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "fio",
"charset": "utf8mb4",
"cursorclass": DictCursor,
"autocommit": True,
}
# ======== HELPERS ========
def clean(s: str):
if not s:
return None
return s.strip() or None
def parse_date(raw: str):
raw = (raw or "").strip()
if not raw:
return None
try:
return datetime.strptime(raw, "%d.%m.%Y").date()
except ValueError:
return None
def parse_float(raw: str):
if raw is None:
return None
s = str(raw).strip()
for ch in (" ", "\u00A0", "\u202F", "\u2007"):
s = s.replace(ch, "")
s = s.replace(",", ".")
s = re.sub(r"[^0-9.+-]", "", s)
try:
return float(s)
except ValueError:
return None
# ======== DB ========
def get_mysql_connection():
return pymysql.connect(**MYSQL_CONFIG)
def recreate_table(conn):
"""Drop and recreate table with schema matching CSV structure."""
sql = f"""
DROP TABLE IF EXISTS `{TABLE_NAME}`;
CREATE TABLE `{TABLE_NAME}` (
id INT AUTO_INCREMENT PRIMARY KEY,
datum DATE,
objem DECIMAL(14,2),
mena CHAR(3),
cislo_uctu VARCHAR(40),
protiucet VARCHAR(40),
kod_banky VARCHAR(20),
ks VARCHAR(20),
vs VARCHAR(20),
ss VARCHAR(20),
zprava_pro_prijemce VARCHAR(500),
poznamka VARCHAR(500),
id_operace VARCHAR(50),
id_pokynu VARCHAR(50),
ks_1 VARCHAR(20),
nazev_banky VARCHAR(100),
nazev_protiuctu VARCHAR(200),
ss_1 VARCHAR(20),
typ VARCHAR(100),
upresneni_objem VARCHAR(100),
upresneni_mena VARCHAR(20),
vs_1 VARCHAR(20),
zadal VARCHAR(200),
imported_at DATETIME DEFAULT CURRENT_TIMESTAMP,
UNIQUE KEY uniq_tx (cislo_uctu, id_operace, id_pokynu)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
"""
with conn.cursor() as cur:
for stmt in sql.strip().split(";"):
if stmt.strip():
cur.execute(stmt)
print(f"✅ Tabulka `{TABLE_NAME}` znovu vytvořena podle CSV struktury.")
# ======== IMPORT ========
def import_fio_csv():
with open(CSV_PATH, "r", encoding="utf-8-sig", newline="") as f:
reader = csv.DictReader(f, delimiter=";", quotechar='"')
rows = list(reader)
total = len(rows)
print(f"📄 Načteno {total} řádků ze souboru {CSV_PATH.name}")
with get_mysql_connection() as conn:
recreate_table(conn)
inserted, skipped = 0, 0
for i, row in enumerate(rows, start=1):
data = {
"datum": parse_date(row.get("Datum")),
"objem": parse_float(row.get("Objem")),
"mena": clean(row.get("Měna")),
"cislo_uctu": clean(row.get("Číslo účtu")),
"protiucet": clean(row.get("Protiúčet")),
"kod_banky": clean(row.get("Kód banky")),
"ks": clean(row.get("KS")),
"vs": clean(row.get("VS")),
"ss": clean(row.get("SS")),
"zprava_pro_prijemce": clean(row.get("Zpráva pro příjemce")),
"poznamka": clean(row.get("Poznámka")),
"id_operace": clean(row.get("ID operace")),
"id_pokynu": clean(row.get("ID pokynu")),
"ks_1": clean(row.get("KS.1")),
"nazev_banky": clean(row.get("Název banky")),
"nazev_protiuctu": clean(row.get("Název protiúčtu")),
"ss_1": clean(row.get("SS.1")),
"typ": clean(row.get("Typ")),
"upresneni_objem": clean(row.get("Upřesnění - objem")),
"upresneni_mena": clean(row.get("Upřesnění - měna")),
"vs_1": clean(row.get("VS.1")),
"zadal": clean(row.get("Zadal")),
}
cols = ", ".join(data.keys())
placeholders = ", ".join(["%s"] * len(data))
sql = f"INSERT IGNORE INTO `{TABLE_NAME}` ({cols}) VALUES ({placeholders})"
with conn.cursor() as cur:
affected = cur.execute(sql, list(data.values()))
if affected:
inserted += 1
else:
skipped += 1
if i % 500 == 0 or i == total:
print(f" {i}/{total} zpracováno... ({inserted} vloženo, {skipped} duplicit)")
print(f"\n✅ Import dokončen: {inserted} nových, {skipped} duplicit přeskočeno.")
# ======== MAIN ========
if __name__ == "__main__":
import_fio_csv()

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Quick, verified dump of all Fio transactions from MySQL → Excel.
Column names are exactly as in DB.
"""
import pandas as pd
import pymysql
from pymysql.cursors import DictCursor
from pathlib import Path
from datetime import datetime
# ======== CONFIG ========
MYSQL_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "fio",
"charset": "utf8mb4",
}
EXPORT_PATH = Path(r"u:\Dropbox\!!!Days\Downloads Z230") / f"Fio_ALL_{datetime.now():%Y-%m-%d_%H-%M-%S}.xlsx"
# ======== MAIN ========
def dump_all_transactions():
with pymysql.connect(**MYSQL_CONFIG) as conn:
sql = """
SELECT
*
FROM transactions
ORDER BY datum DESC;
"""
df = pd.read_sql(sql, conn)
print(f"✅ Načteno {len(df)} transakcí z MySQL.")
# Save to Excel
df.to_excel(EXPORT_PATH, index=False)
print(f"📊 Excel export hotov:\n{EXPORT_PATH}")
if __name__ == "__main__":
dump_all_transactions()

View File

@@ -0,0 +1,299 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Query Medevio for the full agenda of 17 Oct 2025,
print raw API response, and export to Excel.
"""
import re
import json
import time
from pathlib import Path
import requests
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
from openpyxl.utils import get_column_letter
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from Functions import get_reports_folder
from openpyxl.utils.dataframe import dataframe_to_rows
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
CLINIC_SLUG = "mudr-buzalkova"
# ==================== Load Token ====================
def load_gateway_token(storage_path="medevio_storage.json"):
"""Return Medevio gateway-access-token from saved Playwright storage."""
path = Path(storage_path)
if not path.exists():
raise SystemExit(f"❌ Storage file not found: {path}")
with path.open("r", encoding="utf-8") as f:
state = json.load(f)
token = next(
(c["value"] for c in state["cookies"]
if c["name"] == "gateway-access-token"), None
)
if not token:
raise SystemExit("❌ gateway-access-token not found in storage file.")
return token
gateway_token = load_gateway_token()
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
"authorization": f"Bearer {gateway_token}",
}
# === Dynamic date range ===
dnes = datetime.utcnow().date()
since = datetime.combine(dnes, datetime.min.time()).replace(microsecond=1)
until = since + relativedelta(months=1) - timedelta(milliseconds=1)
since_iso = since.isoformat() + "Z"
until_iso = until.isoformat() + "Z"
payload = {
"operationName": "ClinicAgenda_ListClinicReservations",
"variables": {
"calendarIds": [CALENDAR_ID],
"clinicSlug": CLINIC_SLUG,
"since": since_iso,
"until": "2025-11-30T21:59:59.999Z",
"locale": "cs",
"emptyCalendarIds": False,
},
"query": """query ClinicAgenda_ListClinicReservations(
$calendarIds: [UUID!],
$clinicSlug: String!,
$locale: Locale!,
$since: DateTime!,
$until: DateTime!,
$emptyCalendarIds: Boolean!
) {
reservations: listClinicReservations(
clinicSlug: $clinicSlug,
calendarIds: $calendarIds,
since: $since,
until: $until
) @skip(if: $emptyCalendarIds) {
id
start
end
note
done
color
request {
id
displayTitle(locale: $locale)
extendedPatient {
name
surname
dob
insuranceCompanyObject { shortName }
}
}
}
}""",
}
print("since:", since_iso)
print("until:", until_iso)
# ==================== Query API ====================
print("📡 Querying Medevio API for agenda...")
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
print("Status:", r.status_code)
try:
data = r.json()
except Exception as e:
print("❌ Could not parse JSON:", e)
print(r.text)
raise SystemExit()
if "data" not in data or "reservations" not in data["data"]:
raise SystemExit("⚠️ No 'reservations' data found in response.")
reservations = data["data"]["reservations"]
from dateutil import parser, tz
# ===== Process reservations into table =====
rows = []
for r in reservations:
req = r.get("request") or {}
patient = req.get("extendedPatient") or {}
insurance = patient.get("insuranceCompanyObject") or {}
try:
start_dt = parser.isoparse(r.get("start")).astimezone(tz.gettz("Europe/Prague"))
end_dt = parser.isoparse(r.get("end")).astimezone(tz.gettz("Europe/Prague"))
except Exception:
start_dt = end_dt = None
date_str = start_dt.strftime("%Y-%m-%d") if start_dt else ""
time_interval = f"{start_dt.strftime('%H:%M')}-{end_dt.strftime('%H:%M')}" if start_dt and end_dt else ""
rows.append({
"Date": date_str,
"Time": time_interval,
"Title": req.get("displayTitle") or "",
"Patient": f"{patient.get('surname','')} {patient.get('name','')}".strip(),
"DOB": patient.get("dob") or "",
"Insurance": insurance.get("shortName") or "",
"Note": r.get("note") or "",
"Color": r.get("color") or "",
"Request_ID": req.get("id") or "",
"Reservation_ID": r.get("id"),
})
df = pd.DataFrame(rows).sort_values(["Date", "Time"])
def kw_pattern(kw: str) -> str:
"""
Match the exact phrase kw (case-insensitive),
not as part of a '+something' continuation.
Examples:
'žloutenka a' ✅ matches '… žloutenka a …'
❌ NOT '… žloutenka a+b …'
'žloutenka a+b' ✅ matches exactly that phrase
"""
# start boundary: not preceded by a word char
# end guard: not followed by optional spaces + '+' + word
return rf"(?<!\w){re.escape(kw)}(?!\s*\+\s*\w)"
# ===== Excel export =====
EXPORT_DIR = Path(get_reports_folder())
EXPORT_DIR.mkdir(exist_ok=True, parents=True)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
xlsx_path = EXPORT_DIR / f"{timestamp} Agenda (30 dní dopředu).xlsx"
# Safely delete older Agenda reports
for old in EXPORT_DIR.glob("*Agenda (30 dní dopředu).xlsx"):
try:
old.unlink()
except PermissionError:
print(f"⚠️ File is open, skipping delete: {old}")
except Exception as e:
print(f"⚠️ Could not delete {old}: {e}")
# Export DataFrame
df.to_excel(xlsx_path, index=False)
wb = load_workbook(xlsx_path)
ws = wb.active
ws.title = "Agenda" # ✅ rename sheet
# === Apply styling and custom column widths ===
widths = {
1: 11, # Date
2: 13, # Time
3: 45, # Title
4: 30, # Patient
5: 15, # DOB
6: 15, # Insurance
7: 30, # Note
8: 15, # Color
9: 37, # Request_ID
10: 37 # Reservation_ID
}
# Define styles
header_fill = PatternFill("solid", fgColor="FFFF00") # real yellow
alt_fill = PatternFill("solid", fgColor="F2F2F2") # light grey alternate rows
thin_border = Border(
left=Side(style="thin", color="000000"),
right=Side(style="thin", color="000000"),
top=Side(style="thin", color="000000"),
bottom=Side(style="thin", color="000000")
)
# === Format header ===
for col_idx in range(1, len(df.columns) + 1):
col_letter = get_column_letter(col_idx)
cell = ws.cell(row=1, column=col_idx)
cell.font = Font(bold=True)
cell.alignment = Alignment(horizontal="center", vertical="center")
cell.fill = header_fill
cell.value = str(cell.value).upper()
cell.border = thin_border
ws.column_dimensions[col_letter].width = widths.get(col_idx, 20)
# === Format data rows ===
for r_idx, row in enumerate(ws.iter_rows(min_row=2, max_row=ws.max_row, max_col=ws.max_column), start=2):
for cell in row:
cell.border = thin_border
if r_idx % 2 == 0: # alternate row background
cell.fill = alt_fill
ws.freeze_panes = "A2"
from openpyxl.utils.dataframe import dataframe_to_rows
# === Vaccine sheet configuration ===
VACCINE_SHEETS = {
"Chřipka": ["očkování", "chřipka"],
"COVID": ["očkování", "covid"],
"Pneumokok": ["očkování", "pneumo"],
"Hep A": ["očkování", "žloutenka a"],
"Hep B": ["očkování", "žloutenka b"],
"Hep A+B": ["očkování", "žloutenka a+b"],
"Klíšťovka": ["očkování", "klíšť"]
}
# === Generate sheets based on keyword combinations ===
for sheet_name, keywords in VACCINE_SHEETS.items():
mask = pd.Series(True, index=df.index)
title_series = df["Title"].fillna("")
for kw in keywords:
pattern = kw_pattern(kw)
mask &= title_series.str.contains(pattern, flags=re.IGNORECASE, regex=True)
filtered_df = df[mask].copy()
if filtered_df.empty:
print(f" No matches for sheet '{sheet_name}' ({' AND '.join(keywords)})")
continue
ws_new = wb.create_sheet(title=sheet_name)
for r in dataframe_to_rows(filtered_df, index=False, header=True):
ws_new.append(r)
# === Apply formatting ===
for col_idx in range(1, len(filtered_df.columns) + 1):
col_letter = get_column_letter(col_idx)
c = ws_new.cell(row=1, column=col_idx)
c.font = Font(bold=True)
c.alignment = Alignment(horizontal="center", vertical="center")
c.fill = PatternFill("solid", fgColor="FFFF00") # bright yellow header
c.value = str(c.value).upper()
c.border = thin_border
ws_new.column_dimensions[col_letter].width = widths.get(col_idx, 20)
# Borders + alternating rows
for r_idx, row in enumerate(ws_new.iter_rows(min_row=2, max_row=ws_new.max_row, max_col=ws_new.max_column), start=2):
for cell in row:
cell.border = thin_border
if r_idx % 2 == 0:
cell.fill = PatternFill("solid", fgColor="F2F2F2")
ws_new.freeze_panes = "A2"
print(f"🟡 Created sheet '{sheet_name}' with {len(filtered_df)} rows ({' AND '.join(keywords)})")
wb.save(xlsx_path)
print(f"📘 Exported clean agenda view to:\n{xlsx_path}")

View File

@@ -0,0 +1 @@
{"cookies": [{"name": "gateway-access-token", "value": "YwBgkf8McREDKs7vCZj0EZD2fJsuV8RyDPtYx7WiDoz0nFJ9kxId8kcNEPBLFSwM+Tiz80+SOdFwo+oj", "domain": "my.medevio.cz", "path": "/", "expires": 1763372319, "httpOnly": false, "secure": false, "sameSite": "Lax"}, {"name": "aws-waf-token", "value": "b6a1d4eb-4350-40e5-8e52-1f5f9600fbb8:CgoAr9pC8c6zAAAA:OYwXLY5OyitSQPl5v2oIlS+hIxsrb5LxV4VjCyE2gJCFFE5PQu+0Zbxse2ZIofrNv5QKs0TYUDTmxPhZyTr9Qtjnq2gsVQxWHXzrbebv3Z7RbzB63u6Ymn3Fo8IbDev3CfCNcNuxCKltFEXLqSCjI2vqNY+7HZkgQBIqy2wMgzli3aSLq0w8lWYtZzyyot7q8RPXWMGTfaBUo2reY0SOSffm9rAivE9PszNfPid71CvNrGAAoxRbwb25eVujlyIcDVWe5vZ9Iw==", "domain": ".my.medevio.cz", "path": "/", "expires": 1761125920, "httpOnly": false, "secure": true, "sameSite": "Lax"}], "origins": [{"origin": "https://my.medevio.cz", "localStorage": [{"name": "awswaf_token_refresh_timestamp", "value": "1760780309860"}, {"name": "awswaf_session_storage", "value": "b6a1d4eb-4350-40e5-8e52-1f5f9600fbb8:CgoAr9pC8c+zAAAA:+vw//1NzmePjPpbGCJzUB+orCRivtJd098DbDX4AnABiGRw/+ql6ShqvFY4YdCY7w2tegb5mEPBdAmc4sNi22kNR9BuEoAgCUiMhkU1AZWfzM51zPfTh7SveCrREZ7xdvxcqKPMmfVLRYX5E4+UWh22z/LKQ7+d9VERp3J+wWCUW3dFFirkezy3N7b2FVjTlY/RxsZwhejQziTG/L3CkIFFP3mOReNgBvDpj7aKoM1knY4IL4TZ8E7zNv3nTsvzACLYvnUutVOUcofN1TfOzwZshSKsEXsMzrQn8PzLccX1jM5VSzce7gfEzl0zSPsT8NB3Sna+rhMIttDNYgvbW1HsfG2LIeKMR27Zf8hkslDRVVkcU/Kp2jLOEdhhrBKGjKY2o9/uX3NExdzh5MEKQSSRtmue01BpWYILPH23rMsz4YSmF+Ough5OeQoC95rkcYwVXMhwvUN9Zfp9UZ4xCNfFUex5dOrg9aJntYRnaceeocGUttNI5AdT0i3+osV6XHXzKxeqO8zLCS9BIsCzxaHfdqqem5DorMceuGKz+QqksatIQAA=="}, {"name": "Application.Intl.locale", "value": "cs"}, {"name": "Password.prefill", "value": "{\"username\":\"vladimir.buzalka@buzalka.cz\",\"type\":\"email\"}"}]}]}

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import pandas as pd
from datetime import datetime, timedelta
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill, Alignment
from pathlib import Path
import os
# ==============================
# ⚙️ CONFIG
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
# Output location
timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
OUTPUT_DIR = Path(r"U:\Dropbox\!!!Days\Downloads Z230")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
OUTPUT_FILE = OUTPUT_DIR / f"{timestamp} Medevio report.xlsx"
# ==============================
# 🧹 >>> NEW: Remove old reports
# ==============================
for file in OUTPUT_DIR.glob("* Medevio report.xlsx"):
try:
file.unlink()
print("Removed old report:", file)
except Exception as e:
print("Could not remove:", file, "Error:", e)
# ==============================
# 📥 FETCH DATA
# ==============================
conn = pymysql.connect(**DB_CONFIG)
two_months_ago = (datetime.now() - timedelta(days=1000)).strftime("%Y-%m-%d %H:%M:%S")
sql = """
SELECT
id,
pacient_prijmeni,
pacient_jmeno,
pacient_rodnecislo,
displayTitle,
createdAt,
updatedAt,
doneAt,
removedAt,
attachmentsProcessed,
messagesProcessed,
communicationprocessed,
questionnaireprocessed,
lastSync
FROM pozadavky
WHERE createdAt >= %s
ORDER BY updatedAt DESC
"""
df = pd.read_sql(sql, conn, params=(two_months_ago,))
conn.close()
# ==============================
# 💾 SAVE TO EXCEL
# ==============================
df.to_excel(OUTPUT_FILE, index=False)
# ==============================
# 🎨 FORMAT EXCEL
# ==============================
wb = load_workbook(OUTPUT_FILE)
ws = wb.active
# Yellow header
header_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
header_font = Font(bold=True)
for cell in ws[1]:
cell.fill = header_fill
cell.font = header_font
cell.alignment = Alignment(horizontal="center")
# >>> NEW: AutoFilter
ws.auto_filter.ref = ws.dimensions
# Auto column width
for column in ws.columns:
max_length = 0
column_letter = column[0].column_letter
for cell in column:
try:
if cell.value is not None:
max_length = max(max_length, len(str(cell.value)))
except:
pass
ws.column_dimensions[column_letter].width = max_length + 2
wb.save(OUTPUT_FILE)
print("Report saved:", OUTPUT_FILE)

View File

@@ -0,0 +1,30 @@
import pymysql
import pandas as pd
from datetime import datetime, timedelta
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
conn = pymysql.connect(**DB_CONFIG)
two_months_ago = (datetime.now() - timedelta(days=60)).strftime("%Y-%m-%d %H:%M:%S")
sql = """
SELECT *
FROM pozadavky
WHERE createdAt >= %s
ORDER BY createdAt DESC
"""
df = pd.read_sql(sql, conn, params=(two_months_ago,))
conn.close()
print("Rows returned:", len(df))
print(df.head(10))

View File

@@ -0,0 +1,144 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import pandas as pd
from datetime import datetime, timedelta
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill, Alignment
from pathlib import Path
import os
# ==============================
# ⚙️ CONFIG
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
# Output location
timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
OUTPUT_DIR = Path(r"U:\Dropbox\!!!Days\Downloads Z230")
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
OUTPUT_FILE = OUTPUT_DIR / f"{timestamp} Medevio report.xlsx"
# ==============================
# 🧹 Remove old reports
# ==============================
for file in OUTPUT_DIR.glob("* Medevio report.xlsx"):
try:
file.unlink()
print("Removed old report:", file)
except Exception as e:
print("Could not remove:", file, "Error:", e)
# ==============================
# 📥 FETCH DATA — POZADAVKY
# ==============================
conn = pymysql.connect(**DB_CONFIG)
two_months_ago = (datetime.now() - timedelta(days=1000)).strftime("%Y-%m-%d %H:%M:%S")
sql_pozadavky = """
SELECT
id,
pacient_prijmeni,
pacient_jmeno,
pacient_rodnecislo,
displayTitle,
createdAt,
updatedAt,
doneAt,
removedAt,
attachmentsProcessed,
messagesProcessed,
communicationprocessed,
questionnaireprocessed,
lastSync
FROM pozadavky
WHERE createdAt >= %s
ORDER BY updatedAt DESC
"""
df_poz = pd.read_sql(sql_pozadavky, conn, params=(two_months_ago,))
# ==============================
# 📥 FETCH DATA — MESSAGES (WITH JOIN)
# ==============================
ids = tuple(df_poz["id"].tolist())
if len(ids) == 1:
ids_sql = f"('{ids[0]}')"
else:
ids_sql = ids
sql_messages = f"""
SELECT
p.pacient_jmeno,
p.pacient_prijmeni,
p.pacient_rodnecislo,
p.displayTitle AS pozadavek_title,
m.id,
m.text,
m.sender_name,
m.created_at,
m.read_at,
m.updated_at,
m.attachment_url,
m.attachment_description,
m.attachment_content_type,
m.inserted_at
FROM medevio_conversation m
LEFT JOIN pozadavky p
ON m.request_id COLLATE utf8mb4_unicode_ci
= p.id COLLATE utf8mb4_unicode_ci
WHERE m.request_id IN {ids_sql}
ORDER BY m.created_at DESC
"""
df_msg = pd.read_sql(sql_messages, conn)
conn.close()
# ==============================
# 💾 SAVE BOTH SHEETS
# ==============================
with pd.ExcelWriter(OUTPUT_FILE, engine="openpyxl") as writer:
df_poz.to_excel(writer, sheet_name="pozadavky", index=False)
df_msg.to_excel(writer, sheet_name="messages", index=False)
# ==============================
# 🎨 FORMAT EXCEL
# ==============================
wb = load_workbook(OUTPUT_FILE)
yellow = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
header_font = Font(bold=True)
def format_sheet(ws):
for cell in ws[1]:
cell.fill = yellow
cell.font = header_font
cell.alignment = Alignment(horizontal="center")
ws.auto_filter.ref = ws.dimensions
for column in ws.columns:
max_length = 0
col_letter = column[0].column_letter
for cell in column:
if cell.value:
max_length = max(max_length, len(str(cell.value)))
ws.column_dimensions[col_letter].width = max_length + 2
format_sheet(wb["pozadavky"])
format_sheet(wb["messages"])
wb.save(OUTPUT_FILE)
print("Report saved:", OUTPUT_FILE)

212
PSA/01 PSA.py Normal file
View File

@@ -0,0 +1,212 @@
import firebirdsql as fb,os
import pandas as pd
# TCP to the Firebird 2.5 server. Use the DB path as seen by the *server* (Windows path).
conn = fb.connect(
host="192.168.1.10",
port=3050,
database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes
user="SYSDBA",
password="masterkey",
charset="WIN1250", # adjust if needed
)
# Tiny helper to fetch directly into DataFrame (avoids the pandas/SQLAlchemy warning)
def query_df(sql, params=None):
cur = conn.cursor()
cur.execute(sql, params or ())
rows = cur.fetchall()
cols = [d[0].strip() for d in cur.description] # Firebird pads column names
return pd.DataFrame(rows, columns=cols)
# Smoke test
print(query_df("SELECT 1 AS ONE FROM RDB$DATABASE"))
# Your table
df = query_df("SELECT FIRST 100 * FROM kar")
print(df)
from datetime import datetime
start = datetime(2025, 1, 1)
end = datetime(2026, 1, 1)
sql = """
SELECT
/*vh.idvh,*/
vh.idpacient,
kar.prijmeni,
kar.jmeno,
kar.rodcis,
vh.datum,
/*vh.idhodn,*/
/*vd.poradi,*/
/*vd.idmetod,*/
/* NEW: list of matching dokladd entries within ±7 days, one cell */
(
SELECT LIST(CAST(dd.datose AS VARCHAR(10)) || ' ' || dd.kod, ', ')
FROM dokladd dd
WHERE dd.rodcis = kar.rodcis
AND (dd.kod = '01130' or dd.kod = '01131' OR dd.kod = '01132' OR dd.kod = '01133' OR dd.kod = '01134')
AND dd.datose BETWEEN vh.datum - 365 AND vh.datum + 365
) AS vykodovano,
lm.kodtext,
lm.nazev,
vd.vysl,
lj.jedn,
ls.normdol,
ls.normhor
FROM labvh vh
JOIN labvd vd ON vd.idvh = vh.idvh
JOIN kar ON kar.idpac = vh.idpacient
JOIN labmetod lm ON lm.idmetod = vd.idmetod
JOIN labjedn lj ON lj.idjedn = vd.idjedn
JOIN labskaly ls ON ls.idskaly = vd.idskaly
WHERE vh.datum >= ?
AND vh.datum < ?
AND lm.nazev CONTAINING 'PSA'
/*ORDER BY kar.idpac, vh.datum, vd.poradi;*/
ORDER BY vh.datum desc;
"""
df_direct = query_df(sql, (start, end))
import re
import numpy as np
# --- 0) Helper: parse numeric value from string like "5,6", "<0.1", "3.2 mmol/L" ---
num_re = re.compile(r'[-+]?\d+(?:[.,]\d+)?(?:[eE][-+]?\d+)?')
def to_num(x):
if x is None:
return np.nan
s = str(x).strip()
if not s:
return np.nan
m = num_re.search(s.replace('\u00A0', ' ')) # remove NBSP if any
if not m:
return np.nan
val_str = m.group(0).replace(',', '.')
try:
val = float(val_str)
except ValueError:
return np.nan
# Heuristic for qualifiers:
# "<x" -> take half of x (below detection limit), ">x" -> take x (at least)
if s.lstrip().startswith('<'):
return val * 0.5
if s.lstrip().startswith('>'):
return val
return val
# --- 1) Prepare numeric columns + ratio in pandas before export ---
# Assumes df_direct exists with columns 'VYSL' and 'NORMHOR' (case per your SELECT)
df_direct["VYSL_NUM"] = df_direct["VYSL"].apply(to_num)
df_direct["NORMHOR_NUM"] = df_direct["NORMHOR"].apply(to_num)
# Avoid division by zero/NaN
den = df_direct["NORMHOR_NUM"].replace(0, np.nan)
df_direct["RATIO"] = (df_direct["VYSL_NUM"] / den).clip(lower=0) # can exceed 1 if over ULN
from datetime import datetime
from pathlib import Path
from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import Alignment, Border, Side
from openpyxl.formatting.rule import ColorScaleRule
from openpyxl.styles import PatternFill
from openpyxl.formatting.rule import FormulaRule
base_path = Path(r"z:\Dropbox\Ordinace\Reporty")
base_path.mkdir(parents=True, exist_ok=True)
# ================= DELETE OLD PSA REPORTS ==================
for fname in os.listdir(base_path):
if fname.endswith("PSA report.xlsx"):
try:
os.remove(base_path / fname)
print(f"🗑️ Deleted old PSA report: {fname}")
except Exception as e:
print(f"⚠️ Could not delete {fname}: {e}")
# ================= CREATE NEW FILENAME ==================
timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
output_file = base_path / f"{timestamp} PSA report.xlsx"
print(f"📄 New PSA report will be saved as: {output_file}")
# ---- 2) Export DataFrame to Excel ----
# Assumes df_direct already exists (your joined query result)
df_direct.to_excel(output_file, index=False, sheet_name="PSA")
# ---- 3) Open with openpyxl for formatting ----
wb = load_workbook(output_file)
ws = wb["PSA"]
# Auto width for columns
for col in ws.columns:
max_len = 0
col_letter = get_column_letter(col[0].column)
for cell in col:
try:
if cell.value is not None:
max_len = max(max_len, len(str(cell.value)))
except Exception:
pass
ws.column_dimensions[col_letter].width = min(max_len + 2, 50) # cap width
# Thin border style
thin_border = Border(
left=Side(style="thin"),
right=Side(style="thin"),
top=Side(style="thin"),
bottom=Side(style="thin"),
)
# Apply borders to all cells and center A, B, E
for row in ws.iter_rows(min_row=1, max_row=ws.max_row, min_col=1, max_col=ws.max_column):
for cell in row:
cell.border = thin_border
if cell.column_letter in ["A", "B", "E"]:
cell.alignment = Alignment(horizontal="center")
# Enable filter on header row and freeze it
ws.auto_filter.ref = ws.dimensions
ws.freeze_panes = "A2"
# map headers
hdr = {c.value: i+1 for i, c in enumerate(ws[1])}
vysl_idx = hdr.get("VYSL")
ratio_idx = hdr.get("RATIO")
if not (vysl_idx and ratio_idx):
raise RuntimeError("Missing required columns: VYSL and/or RATIO")
vysl_col = get_column_letter(vysl_idx)
ratio_col = get_column_letter(ratio_idx)
max_row = ws.max_row
rng_vysl = f"{vysl_col}2:{vysl_col}{max_row}"
green = PatternFill(start_color="63BE7B", end_color="63BE7B", fill_type="solid")
yellow = PatternFill(start_color="FFEB84", end_color="FFEB84", fill_type="solid")
red = PatternFill(start_color="F8696B", end_color="F8696B", fill_type="solid")
# Non-overlapping rules; stop when one matches
ws.conditional_formatting.add(
rng_vysl,
FormulaRule(formula=[f"${ratio_col}2<=0.80"], fill=green, stopIfTrue=True)
)
ws.conditional_formatting.add(
rng_vysl,
FormulaRule(formula=[f"AND(${ratio_col}2>0.80, ${ratio_col}2<1)"], fill=yellow, stopIfTrue=True)
)
ws.conditional_formatting.add(
rng_vysl,
FormulaRule(formula=[f"${ratio_col}2>=1"], fill=red, stopIfTrue=True)
)
wb.save(output_file)
print(f"Saved: {output_file}")

View File

@@ -1,11 +1,11 @@
import firebirdsql as fb
import firebirdsql as fb,os
import pandas as pd
# TCP to the Firebird 2.5 server. Use the DB path as seen by the *server* (Windows path).
conn = fb.connect(
host="192.168.1.4",
host="192.168.1.10",
port=3050,
database=r"z:\Medicus 3\data\MEDICUS.FDB", # raw string for backslashes
database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes
user="SYSDBA",
password="masterkey",
charset="WIN1250", # adjust if needed
@@ -66,7 +66,8 @@ JOIN labskaly ls ON ls.idskaly = vd.idskaly
WHERE vh.datum >= ?
AND vh.datum < ?
AND lm.nazev CONTAINING 'PSA'
ORDER BY kar.idpac, vh.datum, vd.poradi;
/*ORDER BY kar.idpac, vh.datum, vd.poradi;*/
ORDER BY vh.datum desc;
"""
df_direct = query_df(sql, (start, end))
@@ -118,11 +119,23 @@ from openpyxl.formatting.rule import ColorScaleRule
from openpyxl.styles import PatternFill
from openpyxl.formatting.rule import FormulaRule
# ---- 1) Build timestamped output path ----
base_path = Path("u:\Dropbox\!!!Days\Downloads Z230")
base_path.mkdir(parents=True, exist_ok=True) # ensure folder exists
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_file = base_path / f"lab_results_2025_{timestamp}.xlsx"
base_path = Path(r"z:\Dropbox\Ordinace\Reporty")
base_path.mkdir(parents=True, exist_ok=True)
# ================= DELETE OLD PSA REPORTS ==================
for fname in os.listdir(base_path):
if fname.endswith("PSA report.xlsx"):
try:
os.remove(base_path / fname)
print(f"🗑️ Deleted old PSA report: {fname}")
except Exception as e:
print(f"⚠️ Could not delete {fname}: {e}")
# ================= CREATE NEW FILENAME ==================
timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
output_file = base_path / f"{timestamp} PSA report.xlsx"
print(f"📄 New PSA report will be saved as: {output_file}")
# ---- 2) Export DataFrame to Excel ----
# Assumes df_direct already exists (your joined query result)

View File

@@ -0,0 +1,7 @@
from EmailMessagingGraph import send_mail
send_mail(
to="vladimir.buzalka@buzalka.cz",
subject="Hotovo",
body="Report doběhl OK.",
)

View File

@@ -0,0 +1,91 @@
"""
EmailMessagingGraph.py
----------------------
Private Microsoft Graph mail sender
Application permissions, shared mailbox
"""
import msal
import requests
from functools import lru_cache
from typing import Union, List
# =========================
# PRIVATE CONFIG (ONLY YOU)
# =========================
TENANT_ID = "7d269944-37a4-43a1-8140-c7517dc426e9"
CLIENT_ID = "4b222bfd-78c9-4239-a53f-43006b3ed07f"
CLIENT_SECRET = "Txg8Q~MjhocuopxsJyJBhPmDfMxZ2r5WpTFj1dfk"
SENDER = "reports@buzalka.cz"
AUTHORITY = f"https://login.microsoftonline.com/{TENANT_ID}"
SCOPE = ["https://graph.microsoft.com/.default"]
@lru_cache(maxsize=1)
def _get_token() -> str:
app = msal.ConfidentialClientApplication(
CLIENT_ID,
authority=AUTHORITY,
client_credential=CLIENT_SECRET,
)
token = app.acquire_token_for_client(scopes=SCOPE)
if "access_token" not in token:
raise RuntimeError(f"Graph auth failed: {token}")
return token["access_token"]
def send_mail(
to: Union[str, List[str]],
subject: str,
body: str,
*,
html: bool = False,
):
"""
Send email via Microsoft Graph.
:param to: email or list of emails
:param subject: subject
:param body: email body
:param html: True = HTML, False = plain text
"""
if isinstance(to, str):
to = [to]
payload = {
"message": {
"subject": subject,
"body": {
"contentType": "HTML" if html else "Text",
"content": body,
},
"toRecipients": [
{"emailAddress": {"address": addr}} for addr in to
],
},
"saveToSentItems": "true",
}
headers = {
"Authorization": f"Bearer {_get_token()}",
"Content-Type": "application/json",
}
r = requests.post(
f"https://graph.microsoft.com/v1.0/users/{SENDER}/sendMail",
headers=headers,
json=payload,
timeout=30,
)
if r.status_code != 202:
raise RuntimeError(
f"sendMail failed [{r.status_code}]: {r.text}"
)

35
TelegramMessaging.py Normal file
View File

@@ -0,0 +1,35 @@
# TelegramMessaging.py
import os
import requests
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID")
def send_message(text: str) -> bool:
"""
Send a plain text message to a Telegram chat using a bot.
Returns True on success, False otherwise.
"""
if not TELEGRAM_TOKEN or not TELEGRAM_CHAT_ID:
print("TelegramMessaging: Missing TELEGRAM_TOKEN or TELEGRAM_CHAT_ID in environment.")
return False
url = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage"
payload = {
"chat_id": TELEGRAM_CHAT_ID,
"text": text
}
try:
response = requests.post(url, json=payload, timeout=10)
if response.status_code == 200:
return True
else:
print(f"TelegramMessaging: Telegram API returned {response.status_code}: {response.text}")
return False
except Exception as e:
print(f"TelegramMessaging: Error sending message: {e}")
return False

View File

@@ -3,23 +3,23 @@
from pathlib import Path
import time
import fdb
import firebirdsql as fb
import pandas as pd
import re
from openpyxl import load_workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
from openpyxl.styles import Font, PatternFill, Alignment
from openpyxl.utils import get_column_letter
from Functions import get_medicus_connection
# from Functions import get_medicus_connection
# ================== Výstupní cesta ==================
BASE_DIR = Path(r"z:\Dropbox\Ordinace\Reporty") # uprav dle potřeby
timestamp = time.strftime("%Y-%m-%d %H-%M-%S")
xlsx_name = f"Pacienti očkování {timestamp}.xlsx"
xlsx_name = f"{timestamp} Očkování report.xlsx"
xlsx_path = BASE_DIR / xlsx_name
# ================== Smazání starých souborů ==================
for old_file in BASE_DIR.glob("Pacienti očkování *.xlsx"):
for old_file in BASE_DIR.glob("*očkování report.xlsx"):
try:
if old_file != xlsx_path: # skip the file were about to create
old_file.unlink()
@@ -39,7 +39,15 @@ SHEETS = {
}
# ================== Připojení k DB ==================
con = get_medicus_connection()
# con = get_medicus_connection()
con = fb.connect(
host="192.168.1.10",
port=3050,
database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes
user="SYSDBA",
password="masterkey",
charset="WIN1250", # adjust if needed
)
# ================== SQL dotaz ==================
sql = """
SELECT

View File

@@ -0,0 +1,168 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
import time
import firebirdsql as fb
import pandas as pd
import re
from openpyxl import load_workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
from openpyxl.styles import Font, PatternFill, Alignment
from openpyxl.utils import get_column_letter
# from Functions import get_medicus_connection
# ================== Výstupní cesta ==================
BASE_DIR = Path(r"z:\Dropbox\Ordinace\Reporty") # uprav dle potřeby
timestamp = time.strftime("%Y-%m-%d %H-%M-%S")
xlsx_name = f"{timestamp} Očkování report.xlsx"
xlsx_path = BASE_DIR / xlsx_name
# ================== Smazání starých souborů ==================
for old_file in BASE_DIR.glob("*očkování report.xlsx"):
try:
if old_file != xlsx_path: # skip the file were about to create
old_file.unlink()
print(f"Smazán starý soubor: {old_file.name}")
except Exception as e:
print(f"⚠️ Nelze smazat {old_file.name}: {e}")
# ================== Definice skupin vakcín ==================
SHEETS = {
"COVID-19": ["commirnaty", "spikevax", "nuvaxovid"],
"Chřipka": ["vaxigrip", "influvac", "fluarix", "afluria"],
"Klíšťová encefalitida": ["fsme", "encepur"],
"Tetanus": ["tetavax", "boostrix", "adacel"],
"HepA": ["avaxim", "havrix","vaqta"],
"HepB": ["engerix"],
"HepA+B": ["twinrix"],
}
# ================== Připojení k DB ==================
# con = get_medicus_connection()
con = fb.connect(
host="192.168.1.10",
port=3050,
database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes
user="SYSDBA",
password="masterkey",
charset="WIN1250", # adjust if needed
)
# ================== SQL dotaz ==================
sql = """
SELECT
kar.rodcis AS "Rodné číslo",
kar.prijmeni AS "Příjmení",
kar.jmeno AS "Jméno",
ockzaz.datum AS "Datum očkování",
ockzaz.kodmz AS "Kód MZ",
ockzaz.poznamka AS "Šarže",
ockzaz.latka AS "Látka",
ockzaz.nazev AS "Název",
ockzaz.expire AS "Expirace",
(
SELECT LIST(l.kod, ', ')
FROM lecd l
WHERE l.rodcis = kar.rodcis
AND l.datose = CAST(ockzaz.datum AS DATE)
) AS "LECD kódy (ten den)",
(
SELECT LIST(d.kod, ', ')
FROM dokladd d
WHERE d.rodcis = kar.rodcis
AND d.datose = CAST(ockzaz.datum AS DATE)
) AS "Výkony (ten den)"
FROM registr
JOIN kar ON registr.idpac = kar.idpac
JOIN ockzaz ON registr.idpac = ockzaz.idpac
WHERE
registr.datum_zruseni IS NULL
AND kar.vyrazen <> 'A'
AND kar.rodcis IS NOT NULL
AND idicp <> 0
AND EXTRACT(YEAR FROM ockzaz.datum) = 2025
ORDER BY ockzaz.datum DESC
"""
# ================== Načtení do DataFrame ==================
df = pd.read_sql(sql, con)
con.close()
# ================== Datové typy ==================
for col in ["Kód MZ", "Šarže", "Rodné číslo", "Látka", "Název", "Příjmení", "Jméno", "LECD kódy", "Výkony"]:
if col in df.columns:
df[col] = df[col].astype("string")
for dcol in ["Datum očkování", "Expirace"]:
if dcol in df.columns:
df[dcol] = pd.to_datetime(df[dcol], errors="coerce")
# ================== Uložení do Excelu více listů ==================
with pd.ExcelWriter(xlsx_path, engine="openpyxl") as writer:
for sheet_name, vakciny in SHEETS.items():
pattern = "|".join(re.escape(v) for v in vakciny if v)
mask = df["Látka"].astype(str).str.contains(pattern, case=False, na=False)
df_filtered = df[mask]
if not df_filtered.empty:
df_filtered.to_excel(writer, index=False, sheet_name=sheet_name)
# navíc celkový přehled všech očkování
df.to_excel(writer, index=False, sheet_name="Vše")
# ================== Formátování ==================
wb = load_workbook(xlsx_path)
def autosize_columns(ws):
for col_idx in range(1, ws.max_column + 1):
col_letter = get_column_letter(col_idx)
max_len = 0
for cell in ws[col_letter]:
val = "" if cell.value is None else str(cell.value)
if len(val) > max_len:
max_len = len(val)
ws.column_dimensions[col_letter].width = min(max(12, max_len + 2), 60)
def safe_table_name(sheet_name):
"""Return an Excel-safe, unique table name."""
name = re.sub(r"[^0-9A-Za-z_]", "_", sheet_name)
return f"tbl_{name[:25]}"
def style_table(ws):
max_row = ws.max_row
max_col = ws.max_column
if max_col == 0:
return
header_fill = PatternFill("solid", fgColor="D9E1F2")
for cell in ws[1]:
cell.font = Font(bold=True)
cell.fill = header_fill
cell.alignment = Alignment(vertical="center")
ws.freeze_panes = "A2"
if max_row < 2:
autosize_columns(ws)
return
ref = f"A1:{get_column_letter(max_col)}{max_row}"
tbl = Table(displayName=safe_table_name(ws.title), ref=ref)
tbl.tableStyleInfo = TableStyleInfo(
name="TableStyleMedium9", showRowStripes=True, showColumnStripes=False
)
ws.add_table(tbl)
autosize_columns(ws)
def format_dates(ws, columns_names):
header = [c.value for c in ws[1]]
date_cols = [header.index(name) + 1 for name in columns_names if name in header]
for col_idx in date_cols:
for row in ws.iter_rows(min_row=2, min_col=col_idx, max_col=col_idx, max_row=ws.max_row):
row[0].number_format = "DD.MM.YYYY"
for ws in wb.worksheets:
style_table(ws)
format_dates(ws, ["Datum očkování", "Expirace"])
wb.save(xlsx_path)
print(f"✅ Hotovo. Uloženo do: {xlsx_path.resolve()}")

5
test_telegram.py Normal file
View File

@@ -0,0 +1,5 @@
from TelegramMessaging import send_message
send_message("Test: Telegram integrace funguje!")