diff --git a/40 Fio 02 diagnostika 2.py b/40 Fio 02 diagnostika 2.py deleted file mode 100644 index c5eb899..0000000 --- a/40 Fio 02 diagnostika 2.py +++ /dev/null @@ -1,24 +0,0 @@ -import pymysql -from pymysql.cursors import DictCursor - -conn = pymysql.connect( - host="192.168.1.76", - port=3307, - user="root", - password="Vlado9674+", - database="fio", - charset="utf8mb4", - cursorclass=DictCursor -) - -with conn.cursor() as cur: - cur.execute("SHOW TABLES;") - print("📋 Tables:", [r[f"Tables_in_fio"] for r in cur.fetchall()]) - - cur.execute("SELECT COUNT(*) AS cnt FROM transactions;") - print("🧾 Rows in `transactions`:", cur.fetchone()["cnt"]) - - cur.execute("SHOW COLUMNS FROM transactions;") - print("\n📊 Columns:") - for r in cur.fetchall(): - print(" -", r["Field"]) diff --git a/40 Fio 03 excel.py b/40 Fio 03 excel.py deleted file mode 100644 index 4b9652e..0000000 --- a/40 Fio 03 excel.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Export Fio transactions (from MySQL → Excel) --------------------------------------------- -- Reads only cislo_uctu = '2800046620' -- For OZP (protiucet=2070101041) includes only positive objem -- Each sheet = insurance company (filtered by protiucet) -- First sheet = summary with total amounts and transaction counts -""" - -import pandas as pd -import pymysql -from pathlib import Path -from datetime import datetime - -# ======== CONFIG ======== -MYSQL_CONFIG = { - "host": "192.168.1.76", - "port": 3307, - "user": "root", - "password": "Vlado9674+", - "database": "fio", - "charset": "utf8mb4", -} - -REPORTOVAT = { - "VZP": "1114007221", - "VOZP": "2010009091", - "ČPZP": "2054108761", - "OZP": "2070101041", - "ZPŠ": "2090309181", - "ZPMV": "2112108031", -} - -EXPORT_PATH = Path(r"u:\Dropbox\!!!Days\Downloads Z230") / f"Fio_report_{datetime.now():%Y-%m-%d_%H-%M-%S}.xlsx" - - -# ======== LOAD DATA ======== -def load_data(): - print("🔄 Načítám data z MySQL (účet 2800046620, pro OZP jen kladné objemy)...") - conn = pymysql.connect(**MYSQL_CONFIG) - - sql = """ - SELECT * - FROM transactions - WHERE cislo_uctu = '2800046620' - AND ( - protiucet <> '2070101041' - OR (protiucet = '2070101041' AND objem > 0) - ); - """ - df = pd.read_sql(sql, conn) - conn.close() - - df.columns = df.columns.str.strip() - print(f"✅ Načteno {len(df)} řádků, {len(df.columns)} sloupců.") - return df - - -# ======== EXPORT TO EXCEL ======== -def export_to_excel(df): - summary_rows = [] # to collect summary per insurer - - with pd.ExcelWriter(EXPORT_PATH, engine="openpyxl") as writer: - # --- INDIVIDUAL SHEETS --- - for name, acc in REPORTOVAT.items(): - filtered = df[df["protiucet"].astype(str) == acc] - if filtered.empty: - print(f"⚠️ {name}: žádné transakce (účet {acc})") - summary_rows.append({ - "Pojišťovna": name, - "Číslo účtu": acc, - "Počet transakcí": 0, - "Součet objemu": 0.0 - }) - continue - - # safe numeric conversion - filtered = filtered.copy() - filtered["objem_num"] = ( - filtered["objem"] - .astype(str) - .str.replace("\u00A0", "", regex=False) - .str.replace(",", ".", regex=False) - .astype(float) - ) - - # --- summary data --- - total_sum = filtered["objem_num"].sum() - total_count = len(filtered) - - summary_rows.append({ - "Pojišťovna": name, - "Číslo účtu": acc, - "Počet transakcí": total_count, - "Součet objemu": round(total_sum, 2) - }) - - # --- write detailed sheet --- - filtered.to_excel(writer, index=False, sheet_name=name) - print(f"✅ {name}: {len(filtered)} řádků exportováno, součet {total_sum:,.2f} Kč") - - # --- SUMMARY SHEET --- - summary_df = pd.DataFrame(summary_rows) - summary_df["Součet objemu"] = summary_df["Součet objemu"].map("{:,.2f} Kč".format) - summary_df.to_excel(writer, index=False, sheet_name="Přehled") - print("🧾 Přidán přehledový list s celkovými součty.") - - print(f"\n📊 Hotovo! Soubor uložen jako:\n{EXPORT_PATH}") - - -# ======== MAIN ======== -if __name__ == "__main__": - df = load_data() - export_to_excel(df) diff --git a/40 fio 01.py b/40 fio 01.py deleted file mode 100644 index 81c60e8..0000000 --- a/40 fio 01.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Fio CSV import → MySQL (dev version) ------------------------------------- -- Always drops & recreates `transactions` table -- Uses real CSV headers as seen in "Vyhledane pohyby (3).csv" -- Unique key = (Číslo účtu, ID operace, ID pokynu) -""" - -import csv -from pathlib import Path -from datetime import datetime -import pymysql -from pymysql.cursors import DictCursor -import re - -# ======== CONFIG ======== -CSV_PATH = Path(r"u:\Dropbox\!!!Days\Downloads Z230\Vyhledane pohyby (3).csv") -TABLE_NAME = "transactions" - -MYSQL_CONFIG = { - "host": "192.168.1.76", - "port": 3307, - "user": "root", - "password": "Vlado9674+", - "database": "fio", - "charset": "utf8mb4", - "cursorclass": DictCursor, - "autocommit": True, -} - - -# ======== HELPERS ======== -def clean(s: str): - if not s: - return None - return s.strip() or None - - -def parse_date(raw: str): - raw = (raw or "").strip() - if not raw: - return None - try: - return datetime.strptime(raw, "%d.%m.%Y").date() - except ValueError: - return None - - -def parse_float(raw: str): - if raw is None: - return None - s = str(raw).strip() - for ch in (" ", "\u00A0", "\u202F", "\u2007"): - s = s.replace(ch, "") - s = s.replace(",", ".") - s = re.sub(r"[^0-9.+-]", "", s) - try: - return float(s) - except ValueError: - return None - - -# ======== DB ======== -def get_mysql_connection(): - return pymysql.connect(**MYSQL_CONFIG) - - -def recreate_table(conn): - """Drop and recreate table with schema matching CSV structure.""" - sql = f""" - DROP TABLE IF EXISTS `{TABLE_NAME}`; - CREATE TABLE `{TABLE_NAME}` ( - id INT AUTO_INCREMENT PRIMARY KEY, - datum DATE, - objem DECIMAL(14,2), - mena CHAR(3), - cislo_uctu VARCHAR(40), - protiucet VARCHAR(40), - kod_banky VARCHAR(20), - ks VARCHAR(20), - vs VARCHAR(20), - ss VARCHAR(20), - zprava_pro_prijemce VARCHAR(500), - poznamka VARCHAR(500), - id_operace VARCHAR(50), - id_pokynu VARCHAR(50), - ks_1 VARCHAR(20), - nazev_banky VARCHAR(100), - nazev_protiuctu VARCHAR(200), - ss_1 VARCHAR(20), - typ VARCHAR(100), - upresneni_objem VARCHAR(100), - upresneni_mena VARCHAR(20), - vs_1 VARCHAR(20), - zadal VARCHAR(200), - imported_at DATETIME DEFAULT CURRENT_TIMESTAMP, - UNIQUE KEY uniq_tx (cislo_uctu, id_operace, id_pokynu) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - """ - with conn.cursor() as cur: - for stmt in sql.strip().split(";"): - if stmt.strip(): - cur.execute(stmt) - print(f"✅ Tabulka `{TABLE_NAME}` znovu vytvořena podle CSV struktury.") - - -# ======== IMPORT ======== -def import_fio_csv(): - with open(CSV_PATH, "r", encoding="utf-8-sig", newline="") as f: - reader = csv.DictReader(f, delimiter=";", quotechar='"') - rows = list(reader) - - total = len(rows) - print(f"📄 Načteno {total} řádků ze souboru {CSV_PATH.name}") - - with get_mysql_connection() as conn: - recreate_table(conn) - inserted, skipped = 0, 0 - - for i, row in enumerate(rows, start=1): - data = { - "datum": parse_date(row.get("Datum")), - "objem": parse_float(row.get("Objem")), - "mena": clean(row.get("Měna")), - "cislo_uctu": clean(row.get("Číslo účtu")), - "protiucet": clean(row.get("Protiúčet")), - "kod_banky": clean(row.get("Kód banky")), - "ks": clean(row.get("KS")), - "vs": clean(row.get("VS")), - "ss": clean(row.get("SS")), - "zprava_pro_prijemce": clean(row.get("Zpráva pro příjemce")), - "poznamka": clean(row.get("Poznámka")), - "id_operace": clean(row.get("ID operace")), - "id_pokynu": clean(row.get("ID pokynu")), - "ks_1": clean(row.get("KS.1")), - "nazev_banky": clean(row.get("Název banky")), - "nazev_protiuctu": clean(row.get("Název protiúčtu")), - "ss_1": clean(row.get("SS.1")), - "typ": clean(row.get("Typ")), - "upresneni_objem": clean(row.get("Upřesnění - objem")), - "upresneni_mena": clean(row.get("Upřesnění - měna")), - "vs_1": clean(row.get("VS.1")), - "zadal": clean(row.get("Zadal")), - } - - cols = ", ".join(data.keys()) - placeholders = ", ".join(["%s"] * len(data)) - sql = f"INSERT IGNORE INTO `{TABLE_NAME}` ({cols}) VALUES ({placeholders})" - - with conn.cursor() as cur: - affected = cur.execute(sql, list(data.values())) - if affected: - inserted += 1 - else: - skipped += 1 - - if i % 500 == 0 or i == total: - print(f" {i}/{total} zpracováno... ({inserted} vloženo, {skipped} duplicit)") - - print(f"\n✅ Import dokončen: {inserted} nových, {skipped} duplicit přeskočeno.") - - -# ======== MAIN ======== -if __name__ == "__main__": - import_fio_csv() diff --git a/40 fio 02 diagnostika.py b/40 fio 02 diagnostika.py deleted file mode 100644 index 2fe5f64..0000000 --- a/40 fio 02 diagnostika.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Quick, verified dump of all Fio transactions from MySQL → Excel. -Column names are exactly as in DB. -""" - -import pandas as pd -import pymysql -from pymysql.cursors import DictCursor -from pathlib import Path -from datetime import datetime - -# ======== CONFIG ======== -MYSQL_CONFIG = { - "host": "192.168.1.76", - "port": 3307, - "user": "root", - "password": "Vlado9674+", - "database": "fio", - "charset": "utf8mb4", - -} - -EXPORT_PATH = Path(r"u:\Dropbox\!!!Days\Downloads Z230") / f"Fio_ALL_{datetime.now():%Y-%m-%d_%H-%M-%S}.xlsx" - -# ======== MAIN ======== -def dump_all_transactions(): - with pymysql.connect(**MYSQL_CONFIG) as conn: - sql = """ - SELECT - * - FROM transactions - ORDER BY datum DESC; - """ - df = pd.read_sql(sql, conn) - - print(f"✅ Načteno {len(df)} transakcí z MySQL.") - - # Save to Excel - df.to_excel(EXPORT_PATH, index=False) - print(f"📊 Excel export hotov:\n{EXPORT_PATH}") - - -if __name__ == "__main__": - dump_all_transactions() diff --git a/PSA/01 PSA.py b/PSA/01 PSA.py index 9c284c6..3fe5f33 100644 --- a/PSA/01 PSA.py +++ b/PSA/01 PSA.py @@ -1,11 +1,11 @@ -import firebirdsql as fb +import firebirdsql as fb,os import pandas as pd # TCP to the Firebird 2.5 server. Use the DB path as seen by the *server* (Windows path). conn = fb.connect( - host="192.168.1.4", + host="192.168.1.10", port=3050, - database=r"z:\Medicus 3\data\MEDICUS.FDB", # raw string for backslashes + database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes user="SYSDBA", password="masterkey", charset="WIN1250", # adjust if needed @@ -49,7 +49,7 @@ SELECT FROM dokladd dd WHERE dd.rodcis = kar.rodcis AND (dd.kod = '01130' or dd.kod = '01131' OR dd.kod = '01132' OR dd.kod = '01133' OR dd.kod = '01134') - AND dd.datose BETWEEN vh.datum - 7 AND vh.datum + 7 + AND dd.datose BETWEEN vh.datum - 365 AND vh.datum + 365 ) AS vykodovano, lm.kodtext, lm.nazev, @@ -119,11 +119,23 @@ from openpyxl.formatting.rule import ColorScaleRule from openpyxl.styles import PatternFill from openpyxl.formatting.rule import FormulaRule -# ---- 1) Build timestamped output path ---- -base_path = Path("u:\Dropbox\!!!Days\Downloads Z230") -base_path.mkdir(parents=True, exist_ok=True) # ensure folder exists -timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") -output_file = base_path / f"lab_results_2025_{timestamp}.xlsx" + +base_path = Path(r"z:\Dropbox\Ordinace\Reporty") +base_path.mkdir(parents=True, exist_ok=True) + +# ================= DELETE OLD PSA REPORTS ================== +for fname in os.listdir(base_path): + if fname.endswith("PSA report.xlsx"): + try: + os.remove(base_path / fname) + print(f"🗑️ Deleted old PSA report: {fname}") + except Exception as e: + print(f"⚠️ Could not delete {fname}: {e}") + +# ================= CREATE NEW FILENAME ================== +timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S") +output_file = base_path / f"{timestamp} PSA report.xlsx" +print(f"📄 New PSA report will be saved as: {output_file}") # ---- 2) Export DataFrame to Excel ---- # Assumes df_direct already exists (your joined query result) diff --git a/PSA/Reporter PSA.py b/PSA/Reporter PSA.py new file mode 100644 index 0000000..eaf5313 --- /dev/null +++ b/PSA/Reporter PSA.py @@ -0,0 +1,212 @@ +import firebirdsql as fb,os +import pandas as pd + +# TCP to the Firebird 2.5 server. Use the DB path as seen by the *server* (Windows path). +conn = fb.connect( + host="192.168.1.10", + port=3050, + database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes + user="SYSDBA", + password="masterkey", + charset="WIN1250", # adjust if needed +) + +# Tiny helper to fetch directly into DataFrame (avoids the pandas/SQLAlchemy warning) +def query_df(sql, params=None): + cur = conn.cursor() + cur.execute(sql, params or ()) + rows = cur.fetchall() + cols = [d[0].strip() for d in cur.description] # Firebird pads column names + return pd.DataFrame(rows, columns=cols) + +# Smoke test +print(query_df("SELECT 1 AS ONE FROM RDB$DATABASE")) + +# Your table +df = query_df("SELECT FIRST 100 * FROM kar") +print(df) + + + +from datetime import datetime +start = datetime(2025, 1, 1) +end = datetime(2026, 1, 1) + +sql = """ +SELECT + /*vh.idvh,*/ + vh.idpacient, + kar.prijmeni, + kar.jmeno, + kar.rodcis, + vh.datum, + /*vh.idhodn,*/ + /*vd.poradi,*/ + /*vd.idmetod,*/ +/* NEW: list of matching dokladd entries within ±7 days, one cell */ + ( + SELECT LIST(CAST(dd.datose AS VARCHAR(10)) || ' ' || dd.kod, ', ') + FROM dokladd dd + WHERE dd.rodcis = kar.rodcis + AND (dd.kod = '01130' or dd.kod = '01131' OR dd.kod = '01132' OR dd.kod = '01133' OR dd.kod = '01134') + AND dd.datose BETWEEN vh.datum - 7 AND vh.datum + 7 + ) AS vykodovano, + lm.kodtext, + lm.nazev, + vd.vysl, + lj.jedn, + ls.normdol, + ls.normhor +FROM labvh vh +JOIN labvd vd ON vd.idvh = vh.idvh +JOIN kar ON kar.idpac = vh.idpacient +JOIN labmetod lm ON lm.idmetod = vd.idmetod +JOIN labjedn lj ON lj.idjedn = vd.idjedn +JOIN labskaly ls ON ls.idskaly = vd.idskaly +WHERE vh.datum >= ? + AND vh.datum < ? + AND lm.nazev CONTAINING 'PSA' +/*ORDER BY kar.idpac, vh.datum, vd.poradi;*/ +ORDER BY vh.datum desc; +""" + +df_direct = query_df(sql, (start, end)) + +import re +import numpy as np + +# --- 0) Helper: parse numeric value from string like "5,6", "<0.1", "3.2 mmol/L" --- +num_re = re.compile(r'[-+]?\d+(?:[.,]\d+)?(?:[eE][-+]?\d+)?') + +def to_num(x): + if x is None: + return np.nan + s = str(x).strip() + if not s: + return np.nan + m = num_re.search(s.replace('\u00A0', ' ')) # remove NBSP if any + if not m: + return np.nan + val_str = m.group(0).replace(',', '.') + try: + val = float(val_str) + except ValueError: + return np.nan + # Heuristic for qualifiers: + # " take half of x (below detection limit), ">x" -> take x (at least) + if s.lstrip().startswith('<'): + return val * 0.5 + if s.lstrip().startswith('>'): + return val + return val + +# --- 1) Prepare numeric columns + ratio in pandas before export --- +# Assumes df_direct exists with columns 'VYSL' and 'NORMHOR' (case per your SELECT) +df_direct["VYSL_NUM"] = df_direct["VYSL"].apply(to_num) +df_direct["NORMHOR_NUM"] = df_direct["NORMHOR"].apply(to_num) + +# Avoid division by zero/NaN +den = df_direct["NORMHOR_NUM"].replace(0, np.nan) +df_direct["RATIO"] = (df_direct["VYSL_NUM"] / den).clip(lower=0) # can exceed 1 if over ULN + + +from datetime import datetime +from pathlib import Path +from openpyxl import load_workbook +from openpyxl.utils import get_column_letter +from openpyxl.styles import Alignment, Border, Side +from openpyxl.formatting.rule import ColorScaleRule +from openpyxl.styles import PatternFill +from openpyxl.formatting.rule import FormulaRule + + +base_path = Path(r"z:\Dropbox\Ordinace\Reporty") +base_path.mkdir(parents=True, exist_ok=True) + +# ================= DELETE OLD PSA REPORTS ================== +for fname in os.listdir(base_path): + if fname.endswith("PSA report.xlsx"): + try: + os.remove(base_path / fname) + print(f"🗑️ Deleted old PSA report: {fname}") + except Exception as e: + print(f"⚠️ Could not delete {fname}: {e}") + +# ================= CREATE NEW FILENAME ================== +timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S") +output_file = base_path / f"{timestamp} PSA report.xlsx" +print(f"📄 New PSA report will be saved as: {output_file}") + +# ---- 2) Export DataFrame to Excel ---- +# Assumes df_direct already exists (your joined query result) +df_direct.to_excel(output_file, index=False, sheet_name="PSA") + +# ---- 3) Open with openpyxl for formatting ---- +wb = load_workbook(output_file) +ws = wb["PSA"] + +# Auto width for columns +for col in ws.columns: + max_len = 0 + col_letter = get_column_letter(col[0].column) + for cell in col: + try: + if cell.value is not None: + max_len = max(max_len, len(str(cell.value))) + except Exception: + pass + ws.column_dimensions[col_letter].width = min(max_len + 2, 50) # cap width + +# Thin border style +thin_border = Border( + left=Side(style="thin"), + right=Side(style="thin"), + top=Side(style="thin"), + bottom=Side(style="thin"), +) + +# Apply borders to all cells and center A, B, E +for row in ws.iter_rows(min_row=1, max_row=ws.max_row, min_col=1, max_col=ws.max_column): + for cell in row: + cell.border = thin_border + if cell.column_letter in ["A", "B", "E"]: + cell.alignment = Alignment(horizontal="center") + +# Enable filter on header row and freeze it +ws.auto_filter.ref = ws.dimensions +ws.freeze_panes = "A2" + + +# map headers +hdr = {c.value: i+1 for i, c in enumerate(ws[1])} +vysl_idx = hdr.get("VYSL") +ratio_idx = hdr.get("RATIO") +if not (vysl_idx and ratio_idx): + raise RuntimeError("Missing required columns: VYSL and/or RATIO") + +vysl_col = get_column_letter(vysl_idx) +ratio_col = get_column_letter(ratio_idx) +max_row = ws.max_row +rng_vysl = f"{vysl_col}2:{vysl_col}{max_row}" + +green = PatternFill(start_color="63BE7B", end_color="63BE7B", fill_type="solid") +yellow = PatternFill(start_color="FFEB84", end_color="FFEB84", fill_type="solid") +red = PatternFill(start_color="F8696B", end_color="F8696B", fill_type="solid") + +# Non-overlapping rules; stop when one matches +ws.conditional_formatting.add( + rng_vysl, + FormulaRule(formula=[f"${ratio_col}2<=0.80"], fill=green, stopIfTrue=True) +) +ws.conditional_formatting.add( + rng_vysl, + FormulaRule(formula=[f"AND(${ratio_col}2>0.80, ${ratio_col}2<1)"], fill=yellow, stopIfTrue=True) +) +ws.conditional_formatting.add( + rng_vysl, + FormulaRule(formula=[f"${ratio_col}2>=1"], fill=red, stopIfTrue=True) +) + + +wb.save(output_file) +print(f"Saved: {output_file}") diff --git a/12 Vakcina na samostatnych listech.py b/Vakcíny/12 Vakcina na samostatnych listech.py similarity index 92% rename from 12 Vakcina na samostatnych listech.py rename to Vakcíny/12 Vakcina na samostatnych listech.py index 87172e9..200b177 100644 --- a/12 Vakcina na samostatnych listech.py +++ b/Vakcíny/12 Vakcina na samostatnych listech.py @@ -3,23 +3,23 @@ from pathlib import Path import time -import fdb +import firebirdsql as fb import pandas as pd import re from openpyxl import load_workbook from openpyxl.worksheet.table import Table, TableStyleInfo from openpyxl.styles import Font, PatternFill, Alignment from openpyxl.utils import get_column_letter -from Functions import get_medicus_connection +# from Functions import get_medicus_connection # ================== Výstupní cesta ================== BASE_DIR = Path(r"z:\Dropbox\Ordinace\Reporty") # uprav dle potřeby timestamp = time.strftime("%Y-%m-%d %H-%M-%S") -xlsx_name = f"Pacienti očkování {timestamp}.xlsx" +xlsx_name = f"{timestamp} Očkování report.xlsx" xlsx_path = BASE_DIR / xlsx_name # ================== Smazání starých souborů ================== -for old_file in BASE_DIR.glob("Pacienti očkování *.xlsx"): +for old_file in BASE_DIR.glob("*očkování report.xlsx"): try: if old_file != xlsx_path: # skip the file we’re about to create old_file.unlink() @@ -39,7 +39,15 @@ SHEETS = { } # ================== Připojení k DB ================== -con = get_medicus_connection() +# con = get_medicus_connection() +con = fb.connect( + host="192.168.1.10", + port=3050, + database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes + user="SYSDBA", + password="masterkey", + charset="WIN1250", # adjust if needed +) # ================== SQL dotaz ================== sql = """ SELECT diff --git a/Vakcíny/Reporter Očkování report.py b/Vakcíny/Reporter Očkování report.py new file mode 100644 index 0000000..200b177 --- /dev/null +++ b/Vakcíny/Reporter Očkování report.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from pathlib import Path +import time +import firebirdsql as fb +import pandas as pd +import re +from openpyxl import load_workbook +from openpyxl.worksheet.table import Table, TableStyleInfo +from openpyxl.styles import Font, PatternFill, Alignment +from openpyxl.utils import get_column_letter +# from Functions import get_medicus_connection + +# ================== Výstupní cesta ================== +BASE_DIR = Path(r"z:\Dropbox\Ordinace\Reporty") # uprav dle potřeby +timestamp = time.strftime("%Y-%m-%d %H-%M-%S") +xlsx_name = f"{timestamp} Očkování report.xlsx" +xlsx_path = BASE_DIR / xlsx_name + +# ================== Smazání starých souborů ================== +for old_file in BASE_DIR.glob("*očkování report.xlsx"): + try: + if old_file != xlsx_path: # skip the file we’re about to create + old_file.unlink() + print(f"Smazán starý soubor: {old_file.name}") + except Exception as e: + print(f"⚠️ Nelze smazat {old_file.name}: {e}") + +# ================== Definice skupin vakcín ================== +SHEETS = { + "COVID-19": ["commirnaty", "spikevax", "nuvaxovid"], + "Chřipka": ["vaxigrip", "influvac", "fluarix", "afluria"], + "Klíšťová encefalitida": ["fsme", "encepur"], + "Tetanus": ["tetavax", "boostrix", "adacel"], + "HepA": ["avaxim", "havrix","vaqta"], + "HepB": ["engerix"], + "HepA+B": ["twinrix"], +} + +# ================== Připojení k DB ================== +# con = get_medicus_connection() +con = fb.connect( + host="192.168.1.10", + port=3050, + database=r"m:\Medicus\data\MEDICUS.FDB", # raw string for backslashes + user="SYSDBA", + password="masterkey", + charset="WIN1250", # adjust if needed +) +# ================== SQL dotaz ================== +sql = """ +SELECT + kar.rodcis AS "Rodné číslo", + kar.prijmeni AS "Příjmení", + kar.jmeno AS "Jméno", + ockzaz.datum AS "Datum očkování", + ockzaz.kodmz AS "Kód MZ", + ockzaz.poznamka AS "Šarže", + ockzaz.latka AS "Látka", + ockzaz.nazev AS "Název", + ockzaz.expire AS "Expirace", + ( + SELECT LIST(l.kod, ', ') + FROM lecd l + WHERE l.rodcis = kar.rodcis + AND l.datose = CAST(ockzaz.datum AS DATE) + ) AS "LECD kódy (ten den)", + ( + SELECT LIST(d.kod, ', ') + FROM dokladd d + WHERE d.rodcis = kar.rodcis + AND d.datose = CAST(ockzaz.datum AS DATE) + ) AS "Výkony (ten den)" +FROM registr +JOIN kar ON registr.idpac = kar.idpac +JOIN ockzaz ON registr.idpac = ockzaz.idpac +WHERE + registr.datum_zruseni IS NULL + AND kar.vyrazen <> 'A' + AND kar.rodcis IS NOT NULL + AND idicp <> 0 + AND EXTRACT(YEAR FROM ockzaz.datum) = 2025 +ORDER BY ockzaz.datum DESC +""" + +# ================== Načtení do DataFrame ================== +df = pd.read_sql(sql, con) +con.close() + +# ================== Datové typy ================== +for col in ["Kód MZ", "Šarže", "Rodné číslo", "Látka", "Název", "Příjmení", "Jméno", "LECD kódy", "Výkony"]: + if col in df.columns: + df[col] = df[col].astype("string") + +for dcol in ["Datum očkování", "Expirace"]: + if dcol in df.columns: + df[dcol] = pd.to_datetime(df[dcol], errors="coerce") + +# ================== Uložení do Excelu – více listů ================== +with pd.ExcelWriter(xlsx_path, engine="openpyxl") as writer: + for sheet_name, vakciny in SHEETS.items(): + pattern = "|".join(re.escape(v) for v in vakciny if v) + mask = df["Látka"].astype(str).str.contains(pattern, case=False, na=False) + df_filtered = df[mask] + if not df_filtered.empty: + df_filtered.to_excel(writer, index=False, sheet_name=sheet_name) + # navíc celkový přehled všech očkování + df.to_excel(writer, index=False, sheet_name="Vše") + +# ================== Formátování ================== +wb = load_workbook(xlsx_path) + +def autosize_columns(ws): + for col_idx in range(1, ws.max_column + 1): + col_letter = get_column_letter(col_idx) + max_len = 0 + for cell in ws[col_letter]: + val = "" if cell.value is None else str(cell.value) + if len(val) > max_len: + max_len = len(val) + ws.column_dimensions[col_letter].width = min(max(12, max_len + 2), 60) + +def safe_table_name(sheet_name): + """Return an Excel-safe, unique table name.""" + name = re.sub(r"[^0-9A-Za-z_]", "_", sheet_name) + return f"tbl_{name[:25]}" + +def style_table(ws): + max_row = ws.max_row + max_col = ws.max_column + if max_col == 0: + return + + header_fill = PatternFill("solid", fgColor="D9E1F2") + for cell in ws[1]: + cell.font = Font(bold=True) + cell.fill = header_fill + cell.alignment = Alignment(vertical="center") + + ws.freeze_panes = "A2" + + if max_row < 2: + autosize_columns(ws) + return + + ref = f"A1:{get_column_letter(max_col)}{max_row}" + tbl = Table(displayName=safe_table_name(ws.title), ref=ref) + tbl.tableStyleInfo = TableStyleInfo( + name="TableStyleMedium9", showRowStripes=True, showColumnStripes=False + ) + ws.add_table(tbl) + autosize_columns(ws) + +def format_dates(ws, columns_names): + header = [c.value for c in ws[1]] + date_cols = [header.index(name) + 1 for name in columns_names if name in header] + for col_idx in date_cols: + for row in ws.iter_rows(min_row=2, min_col=col_idx, max_col=col_idx, max_row=ws.max_row): + row[0].number_format = "DD.MM.YYYY" + +for ws in wb.worksheets: + style_table(ws) + format_dates(ws, ["Datum očkování", "Expirace"]) + +wb.save(xlsx_path) + +print(f"✅ Hotovo. Uloženo do: {xlsx_path.resolve()}")