notebookvb

This commit is contained in:
Vladimir Buzalka
2026-04-29 06:55:23 +02:00
parent a9c143ba24
commit daad4adeab
113 changed files with 16563 additions and 0 deletions
+1
View File
@@ -0,0 +1 @@
ANTHROPIC_API_KEY=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
+16
View File
@@ -0,0 +1,16 @@
# Virtual environment
.venv/
# Python
__pycache__/
*.pyc
# PyCharm / IDE
.idea/
# Claude worktrees
.claude/worktrees/
# OS
.DS_Store
Thumbs.db
@@ -0,0 +1,203 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetch Medevio pending (ACTIVE) patient requests and return a pandas DataFrame.
Reads Bearer token from token.txt (single line, token only).
"""
import requests
import pandas as pd
import time
from pathlib import Path
from typing import List, Dict, Any
# CONFIG ---------------------------------------------------------------------
TOKEN_FILE = str(Path(__file__).resolve().parent.parent / "token.txt") # centralized token
GRAPHQL_URL = "https://app.medevio.cz/graphql"
CLINIC_SLUG = "mudr-buzalkova" # adjust if needed
LOCALE = "cs"
PAGE_SIZE = 50 # how many items to request per page
REQUEST_WAIT = 0.2 # seconds between requests to be polite
# ---------------------------------------------------------------------------
GRAPHQL_QUERY = r"""
query ClinicLegacyRequestList_ListPatientRequestsForClinic(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requests: listPatientRequestsForClinic(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
id
createdAt
dueDate
displayTitle(locale: $locale)
doneAt
removedAt
priority
evaluationResult(locale: $locale) {
fields {
name
value
}
}
clinicId
extendedPatient {
id
identificationNumber
kind
name
note
owner { name surname }
key
status
surname
type
user { id name surname }
isUnknownPatient
}
lastMessage {
createdAt
id
readAt
sender { id name surname clinicId }
text
}
queue { id name }
reservations { id canceledAt done start }
tags(onlyImportant: true) { id name color icon }
priceWhenCreated
currencyWhenCreated
}
}
"""
def read_token(path: str) -> str:
with open(path, "r", encoding="utf-8") as f:
t = f.read().strip()
if t.startswith("Bearer "):
t = t.split(" ", 1)[1]
return t
def fetch_requests(token: str,
clinic_slug: str = CLINIC_SLUG,
locale: str = LOCALE,
page_size: int = PAGE_SIZE) -> List[Dict[str, Any]]:
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
all_items: List[Dict[str, Any]] = []
offset = 0
while True:
variables = {
"clinicSlug": clinic_slug,
"queueId": None,
"queueAssignment": "ANY",
"state": "ACTIVE",
"pageInfo": {"first": page_size, "offset": offset},
"locale": locale,
}
payload = {"query": GRAPHQL_QUERY, "variables": variables, "operationName": "ClinicLegacyRequestList_ListPatientRequestsForClinic"}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers, timeout=30)
r.raise_for_status()
js = r.json()
# Basic error handling
if "errors" in js:
raise RuntimeError(f"GraphQL returned errors: {js['errors']}")
items = js.get("data", {}).get("requests", [])
if not items:
break
all_items.extend(items)
# If fewer than requested, we are at the end
if len(items) < page_size:
break
offset += page_size
time.sleep(REQUEST_WAIT)
return all_items
def flatten_item(item: Dict[str, Any]) -> Dict[str, Any]:
patient = item.get("extendedPatient") or {}
last_msg = item.get("lastMessage") or {}
queue = item.get("queue") or {}
# evaluationResult fields -> map of name:value (if exists)
eval_map = {}
eval_block = item.get("evaluationResult") or {}
for fld in (eval_block.get("fields") or []):
name = fld.get("name")
value = fld.get("value")
if name:
eval_map[name] = value
flat = {
"id": item.get("id"),
"createdAt": item.get("createdAt"),
"dueDate": item.get("dueDate"),
"displayTitle": item.get("displayTitle"),
"doneAt": item.get("doneAt"),
"removedAt": item.get("removedAt"),
"priority": item.get("priority"),
"clinicId": item.get("clinicId"),
"patient_id": patient.get("id"),
"patient_identificationNumber": patient.get("identificationNumber"),
"patient_name": patient.get("name"),
"patient_surname": patient.get("surname"),
"patient_status": patient.get("status"),
"lastMessage_id": last_msg.get("id"),
"lastMessage_createdAt": last_msg.get("createdAt"),
"lastMessage_text": last_msg.get("text"),
"queue_id": queue.get("id"),
"queue_name": queue.get("name"),
"priceWhenCreated": item.get("priceWhenCreated"),
"currencyWhenCreated": item.get("currencyWhenCreated"),
}
# merge evaluation fields (if any) prefixed by "eval_"
for k, v in eval_map.items():
flat[f"eval_{k}"] = v
return flat
def to_dataframe(items: List[Dict[str, Any]]) -> pd.DataFrame:
rows = [flatten_item(it) for it in items]
df = pd.DataFrame(rows)
# try parsing dates
for col in ("createdAt", "dueDate", "doneAt", "lastMessage_createdAt", "removedAt"):
if col in df.columns:
df[col] = pd.to_datetime(df[col], errors="coerce")
return df
def main():
token = read_token(TOKEN_FILE)
print("Fetching pending (ACTIVE) requests from Medevio...")
items = fetch_requests(token)
print(f"Fetched {len(items)} items.")
df = to_dataframe(items)
pd.set_option("display.max_rows", 20)
pd.set_option("display.max_colwidth", 160)
print(df.head(50))
# optionally save
df.to_excel("medevio_pending_requests.xlsx", index=False)
print("Saved medevio_pending_requests.xlsx")
if __name__ == "__main__":
main()
@@ -0,0 +1,104 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from pathlib import Path
import requests # 👈 this is new
# --- Settings ----------------------------------------------------
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
SHOW_FULL_TOKEN = False # set True if you want to print the full token
# -----------------------------------------------------------------
GRAPHQL_QUERY = r"""
query ClinicLegacyRequestList_ListPatientRequestsForClinic(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requests: listPatientRequestsForClinic(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
id
createdAt
dueDate
displayTitle(locale: $locale)
doneAt
removedAt
priority
evaluationResult(locale: $locale) { fields { name value } }
clinicId
extendedPatient {
id
identificationNumber
kind
name
surname
status
isUnknownPatient
}
lastMessage { id text createdAt }
queue { id name }
reservations { id canceledAt done start }
tags(onlyImportant: true) { id name color icon }
priceWhenCreated
currencyWhenCreated
}
}
"""
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"state": "ACTIVE", # pending / nevyřízené
"pageInfo": {"first": 30, "offset": 0},
"locale": "cs",
}
payload = {
"operationName": "ClinicLegacyRequestList_ListPatientRequestsForClinic",
"query": GRAPHQL_QUERY,
"variables": variables,
}
# === Actually call Medevio API ==================================
print("📡 Querying Medevio GraphQL API...\n")
url = "https://api.medevio.cz/graphql"
r = requests.post(url, json=payload, headers=headers)
print(f"HTTP status: {r.status_code}\n")
# --- Try to decode JSON
try:
data = r.json()
print("=== Raw JSON response ===")
print(json.dumps(data, indent=2, ensure_ascii=False))
except Exception as e:
print("❌ Failed to decode JSON:", e)
print("Raw text:\n", r.text)
if __name__ == "__main__":
main()
@@ -0,0 +1,96 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from pathlib import Path
import requests
# --- Settings ----------------------------------------------------
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
# -----------------------------------------------------------------
GRAPHQL_QUERY = r"""
query ClinicLegacyRequestList_ListPatientRequestsForClinic(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requests: listPatientRequestsForClinic(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
id
displayTitle(locale: $locale)
extendedPatient {
name
surname
identificationNumber
}
}
}
"""
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"state": "ACTIVE", # pending / nevyřízené
"pageInfo": {"first": 30, "offset": 0},
"locale": "cs",
}
payload = {
"operationName": "ClinicLegacyRequestList_ListPatientRequestsForClinic",
"query": GRAPHQL_QUERY,
"variables": variables,
}
url = "https://api.medevio.cz/graphql"
print("📡 Querying Medevio GraphQL API...\n")
r = requests.post(url, json=payload, headers=headers)
print(f"HTTP status: {r.status_code}\n")
# --- Parse JSON safely
try:
data = r.json()
except Exception as e:
print("❌ Failed to decode JSON:", e)
print("Raw text:\n", r.text)
return
requests_data = data.get("data", {}).get("requests", [])
if not requests_data:
print("⚠️ No requests found or invalid response.")
return
print(f"📋 Found {len(requests_data)} active requests:\n")
for req in requests_data:
patient = req.get("extendedPatient", {})
print(f"- {patient.get('surname','')} {patient.get('name','')} "
f"({patient.get('identificationNumber','')}) "
f"{req.get('displayTitle','')} [ID: {req.get('id')}]")
if __name__ == "__main__":
main()
+101
View File
@@ -0,0 +1,101 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from pathlib import Path
import requests
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
# --- Try including `updatedAt` field directly ---
GRAPHQL_QUERY = r"""
query ClinicRequestGrid_ListPatientRequestsForClinic2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
pageInfo: $pageInfo
) {
count
patientRequests {
id
createdAt
updatedAt # 👈 TESTUJEME, jestli Medevio toto pole podporuje
doneAt
removedAt
displayTitle(locale: $locale)
lastMessage { createdAt }
extendedPatient {
name
surname
identificationNumber
}
}
}
}
"""
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": 3, "offset": 0},
"locale": "cs",
}
payload = {
"operationName": "ClinicRequestGrid_ListPatientRequestsForClinic2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
url = "https://api.medevio.cz/graphql"
print("📡 Querying Medevio GraphQL API (testing `updatedAt` field)...\n")
r = requests.post(url, json=payload, headers=headers)
print(f"HTTP status: {r.status_code}\n")
try:
data = r.json()
except Exception as e:
print("❌ Failed to parse JSON:", e)
print("Raw text:\n", r.text)
return
print("=== JSON response ===")
print(json.dumps(data, indent=2, ensure_ascii=False))
# Quick check: did it return an error message about updatedAt?
errors = data.get("errors")
if errors:
print("\n⚠️ Medevio returned GraphQL error:")
for e in errors:
print(f"{e.get('message')}")
else:
print("\n✅ No errors, `updatedAt` might exist in schema!")
if __name__ == "__main__":
main()
+85
View File
@@ -0,0 +1,85 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from pathlib import Path
import requests
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
GRAPHQL_QUERY = r"""
query ClinicRequestGrid_ListPatientRequestsForClinic2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug
queueId: $queueId
queueAssignment: $queueAssignment
pageInfo: $pageInfo
) {
count
patientRequests {
id
createdAt
doneAt
displayTitle(locale: $locale)
extendedPatient {
name
surname
identificationNumber
}
}
}
}
"""
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
# 👇 state zcela vynechán
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": 10, "offset": 0},
"locale": "cs",
}
payload = {
"operationName": "ClinicRequestGrid_ListPatientRequestsForClinic2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
url = "https://api.medevio.cz/graphql"
print("📡 Querying Medevio GraphQL API (no state argument)...\n")
r = requests.post(url, json=payload, headers=headers)
print(f"HTTP status: {r.status_code}\n")
try:
data = r.json()
print("=== JSON response ===")
print(json.dumps(data, indent=2, ensure_ascii=False))
except Exception as e:
print("❌ Failed to parse JSON:", e)
print("Raw text:\n", r.text)
if __name__ == "__main__":
main()
@@ -0,0 +1,182 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import time
import pymysql
import requests
from pathlib import Path
from datetime import datetime
# ================================
# 🔧 CONFIGURATION
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 100
STATES = ["ACTIVE", "DONE"] # optionally add "REMOVED"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestGrid_ListPatientRequestsForClinic2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$pageInfo: PageInfo!,
$locale: Locale!,
$state: PatientRequestState
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
pageInfo: $pageInfo,
state: $state
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
}
}
}
"""
# ================================
# 🔑 TOKEN
# ================================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
# ================================
# 🕒 DATETIME CONVERSION
# ================================
def to_mysql_dt(iso_str):
"""Convert ISO 8601 (with Z) to MySQL DATETIME."""
if not iso_str:
return None
try:
dt = datetime.fromisoformat(iso_str.replace("Z", "+00:00"))
return dt.strftime("%Y-%m-%d %H:%M:%S")
except Exception:
return None
# ================================
# 💾 UPSERT TO MYSQL
# ================================
def upsert(conn, r):
p = (r.get("extendedPatient") or {})
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r.get("id"),
r.get("displayTitle"),
to_mysql_dt(r.get("createdAt")),
to_mysql_dt(r.get("updatedAt")),
to_mysql_dt(r.get("doneAt")),
to_mysql_dt(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# 📡 FETCH ONE BATCH
# ================================
def fetch_batch(headers, state, offset):
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
"state": state,
}
payload = {
"operationName": "ClinicRequestGrid_ListPatientRequestsForClinic2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json().get("data", {}).get("requestsResponse", {})
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# 🧠 MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
total_downloaded = 0
total_upserted = 0
for state in STATES:
print(f"\n📡 STATE = {state}")
offset = 0
state_total = None
while True:
batch, count_total = fetch_batch(headers, state, offset)
if state_total is None:
state_total = count_total
print(f" • Total from server: {state_total}")
if not batch:
break
print(f" • Offset {offset:>5}: got {len(batch)}")
for r in batch:
upsert(conn, r)
total_upserted += 1
total_downloaded += len(batch)
offset += BATCH_SIZE
if offset >= state_total:
break
time.sleep(0.4) # respect API
conn.close()
print(f"\n✅ Done. Downloaded {total_downloaded} items, upserted {total_upserted} rows (states: {', '.join(STATES)}).")
if __name__ == "__main__":
main()
@@ -0,0 +1,224 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import time
import pymysql
import requests
from pathlib import Path
from datetime import datetime
# ================================
# 🔧 CONFIGURATION
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 1000
STATES = ["ACTIVE", "DONE"] # optionally add "REMOVED"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestGrid_ListPatientRequestsForClinic2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$pageInfo: PageInfo!,
$locale: Locale!,
$state: PatientRequestState
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
pageInfo: $pageInfo,
state: $state
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
}
}
}
"""
# ================================
# 🔑 TOKEN
# ================================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
# ================================
# 🕒 DATETIME CONVERSION
# ================================
def to_mysql_dt(iso_str):
"""Convert ISO 8601 (with Z) to MySQL DATETIME."""
if not iso_str:
return None
try:
dt = datetime.fromisoformat(iso_str.replace("Z", "+00:00"))
return dt.strftime("%Y-%m-%d %H:%M:%S")
except Exception:
return None
# ================================
# 💾 UPSERT TO MYSQL
# ================================
def upsert_many(conn, batch):
"""Upsert multiple records in one commit."""
if not batch:
return
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = []
for r in batch:
p = (r.get("extendedPatient") or {})
vals.append((
r.get("id"),
r.get("displayTitle"),
to_mysql_dt(r.get("createdAt")),
to_mysql_dt(r.get("updatedAt")),
to_mysql_dt(r.get("doneAt")),
to_mysql_dt(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
))
with conn.cursor() as cur:
cur.executemany(sql, vals)
conn.commit()
def upsert(conn, r):
p = (r.get("extendedPatient") or {})
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r.get("id"),
r.get("displayTitle"),
to_mysql_dt(r.get("createdAt")),
to_mysql_dt(r.get("updatedAt")),
to_mysql_dt(r.get("doneAt")),
to_mysql_dt(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# 📡 FETCH ONE BATCH
# ================================
def fetch_batch(headers, state, offset):
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
"state": state,
}
payload = {
"operationName": "ClinicRequestGrid_ListPatientRequestsForClinic2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json().get("data", {}).get("requestsResponse", {})
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# 🧠 MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
total_downloaded = 0
total_upserted = 0
for state in STATES:
print(f"\n📡 STATE = {state}")
offset = 0
state_total = None
while True:
batch, count_total = fetch_batch(headers, state, offset)
if state_total is None:
state_total = count_total
print(f" • Total from server: {state_total}")
if not batch:
break
print(f" • Offset {offset:>5}: got {len(batch)}")
# Perform one efficient upsert for the entire batch
upsert_many(conn, batch)
total_upserted += len(batch)
total_downloaded += len(batch)
offset += BATCH_SIZE
if offset >= state_total:
break
time.sleep(10) # respect API
conn.close()
print(f"\n✅ Done. Downloaded {total_downloaded} items, upserted {total_upserted} rows (states: {', '.join(STATES)}).")
if __name__ == "__main__":
main()
@@ -0,0 +1,92 @@
import requests
import json
from pathlib import Path
# === Nastavení ===
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
REQUEST_ID = "092a0c63-28be-4c6b-ab3b-204e1e2641d4"
OUTPUT_DIR = Path(r"u:\Dropbox\!!!Days\Downloads Z230\Medevio_přílohy")
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2(
$requestId: UUID!,
$isDoctor: Boolean!
) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient @include(if: $isDoctor)
}
}
}
"""
variables = {
"isDoctor": True,
"requestId": REQUEST_ID,
}
headers = {
"Authorization": f"Bearer {read_token(TOKEN_PATH)}",
"Content-Type": "application/json",
"Accept": "application/json",
}
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
print("📡 Querying Medevio API for attachments...\n")
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
print(f"HTTP status: {r.status_code}\n")
data = r.json()
records = data.get("data", {}).get("patientRequestMedicalRecords", [])
if not records:
print("⚠️ No attachments found.")
exit()
# === Uložení ===
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
print(f"📂 Saving {len(records)} attachments to: {OUTPUT_DIR}\n")
for rec in records:
med = rec.get("medicalRecord", {})
url = med.get("downloadUrl")
name = med.get("description", med.get("id")) or "unknown.pdf"
if not url:
print(f"❌ Skipped {name} (no download URL)")
continue
safe_name = name.replace("/", "_").replace("\\", "_")
out_path = OUTPUT_DIR / safe_name
print(f"⬇️ Downloading: {safe_name}")
try:
file_data = requests.get(url, timeout=30)
file_data.raise_for_status()
out_path.write_bytes(file_data.content)
print(f"✅ Saved: {out_path.name} ({len(file_data.content)/1024:.1f} KB)")
except Exception as e:
print(f"❌ Error saving {safe_name}: {e}")
print("\n🎉 Done!")
+59
View File
@@ -0,0 +1,59 @@
import requests
import json
from pathlib import Path
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
REQUEST_ID = "092a0c63-28be-4c6b-ab3b-204e1e2641d4"
CLINIC_SLUG = "mudr-buzalkova"
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2(
$requestId: UUID!,
) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
variables = {
"requestId": REQUEST_ID,
}
headers = {
"Authorization": f"Bearer {read_token(TOKEN_PATH)}",
"Content-Type": "application/json",
"Accept": "application/json",
}
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
print("📡 Querying Medevio API...\n")
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
print(f"HTTP status: {r.status_code}\n")
print(json.dumps(r.json(), indent=2, ensure_ascii=False))
@@ -0,0 +1,204 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os,zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import shutil
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BASE_DIR = Path(r"u:\Dropbox\ordinace\Dokumentace_ke_zpracování\Medevio_přílohy")
BASE_DIR.mkdir(parents=True, exist_ok=True)
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2(
$requestId: UUID!,
) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
def short_crc8(uuid_str: str) -> str:
"""Return deterministic 8-char hex string from any input string (CRC32)."""
return f"{zlib.crc32(uuid_str.encode('utf-8')) & 0xffffffff:08x}"
def extract_filename_from_url(url: str) -> str:
"""Extracts filename from S3-style URL (between last '/' and first '?')."""
try:
filename = url.split("/")[-1].split("?")[0]
return filename
except Exception:
return "unknown_filename"
def safe_rename(src: Path, dst: Path, retries: int = 5, delay: float = 3.0):
"""Rename a folder with retries to avoid Dropbox/OneDrive sync lock issues."""
for attempt in range(1, retries + 1):
try:
src.rename(dst)
return # success
except PermissionError as e:
print(f" ⚠️ Rename attempt {attempt}/{retries} failed ({e}) — waiting {delay}s...")
time.sleep(delay)
except Exception as e:
print(f" ❌ Unexpected rename error: {e}")
break
print(f" 🚫 Failed to rename '{src}''{dst}' after {retries} attempts.")
# ==============================
# 🔑 TOKEN
# ==============================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
# ==============================
# 💾 DOWNLOAD FILE
# ==============================
def download_file(url: str, out_path: Path):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
out_path.parent.mkdir(parents=True, exist_ok=True)
with open(out_path, "wb") as f:
f.write(r.content)
print(f" 💾 Saved: {out_path.relative_to(BASE_DIR)}")
except Exception as e:
print(f" ⚠️ Failed to download {out_path.name}: {e}")
# ==============================
# 📡 FETCH ATTACHMENTS
# ==============================
def fetch_attachments(headers, request_id):
variables = {
"requestId": request_id,
}
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
if r.status_code != 200:
print(f"❌ HTTP {r.status_code}")
return []
data = r.json().get("data", {}).get("patientRequestMedicalRecords", [])
return data
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
with conn.cursor() as cur:
cur.execute("""
SELECT id, displayTitle, pacient_prijmeni, pacient_jmeno, createdAt
FROM pozadavky
WHERE displayTitle = 'Odeslat lékařskou zprávu'
""")
rows = cur.fetchall()
print(f"📋 Found {len(rows)} 'Odeslat lékařskou zprávu' requests")
for i, row in enumerate(rows, 1):
req_id = row["id"]
print(req_id)
prijmeni = row.get("pacient_prijmeni") or "Neznamy"
jmeno = row.get("pacient_jmeno") or ""
created = row.get("createdAt")
created_date = None
if created:
try:
created_date = datetime.strptime(str(created), "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
except Exception:
created_date = "unknown"
patient_dir = BASE_DIR / f"{prijmeni}, {jmeno}" / created_date
print(f"\n[{i}/{len(rows)}] 📂 {patient_dir.relative_to(BASE_DIR)}")
attachments = fetch_attachments(headers, req_id)
# print(attachments)
if not attachments:
print(" ⚠️ No attachments")
continue
# vytvoř krátký CRC32 hash z UUID
uuid_short = short_crc8(str(req_id))
# Dočasná složka bez počtu
temp_dir = BASE_DIR / f"{prijmeni}, {jmeno}" / f"{created_date} {uuid_short}"
temp_dir.mkdir(parents=True, exist_ok=True)
for a in attachments:
m = a.get("medicalRecord") or {}
# fname = m.get("description") or f"{m.get('id')}.bin"
url = m.get("downloadUrl")
fname = extract_filename_from_url(url)
if url:
out_path = temp_dir / fname
download_file(url, out_path)
# Po stažení všech příloh spočítej skutečné soubory
real_count = len([f for f in temp_dir.iterdir() if f.is_file()])
# Přejmenuj složku na finální název s počtem
final_dir = temp_dir.parent / f"{temp_dir.name} ({real_count})"
if real_count != 0:
safe_rename(temp_dir, final_dir)
print(f" 📎 Saved {real_count} attachments → {final_dir.relative_to(BASE_DIR)}")
else:
print(f" ⚠️ No attachments for {temp_dir.name}")
temp_dir.rmdir() # smaž prázdnou složku
conn.close()
print("\n✅ Done!")
if __name__ == "__main__":
main()
@@ -0,0 +1,209 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download all 'Odeslat lékařskou zprávu' attachments from Medevio API
and store them (including binary content) directly into MySQL table `medevio_downloads`.
Each attachment (PDF, image, etc.) is fetched once and saved as LONGBLOB.
Duplicate protection is ensured via UNIQUE KEY on `attachment_id`.
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2($requestId: UUID!) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
# ==============================
# 🧮 HELPERS
# ==============================
def short_crc8(uuid_str: str) -> str:
"""Return deterministic 8-char hex string from any input string (CRC32)."""
return f"{zlib.crc32(uuid_str.encode('utf-8')) & 0xffffffff:08x}"
def extract_filename_from_url(url: str) -> str:
"""Extracts filename from S3-style URL (between last '/' and first '?')."""
try:
return url.split("/")[-1].split("?")[0]
except Exception:
return "unknown_filename"
def read_token(p: Path) -> str:
"""Read Bearer token from file."""
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
# ==============================
# 📡 FETCH ATTACHMENTS
# ==============================
def fetch_attachments(headers, request_id):
variables = {"requestId": request_id}
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
print(f"❌ HTTP {r.status_code} for request {request_id}")
return []
data = r.json().get("data", {}).get("patientRequestMedicalRecords", [])
return data
# ==============================
# 💾 SAVE TO MYSQL (with skip)
# ==============================
def insert_download(cur, req_id, a, m, jmeno, prijmeni, created_date, existing_ids):
attachment_id = a.get("id")
if attachment_id in existing_ids:
print(f" ⏭️ Skipping already downloaded attachment {attachment_id}")
return
url = m.get("downloadUrl")
if not url:
print(" ⚠️ No download URL")
return
filename = extract_filename_from_url(url)
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
content = r.content
except Exception as e:
print(f" ⚠️ Failed to download {url}: {e}")
return
file_size = len(content)
attachment_type = a.get("attachmentType")
content_type = m.get("contentType")
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type, filename,
content_type, file_size, pacient_jmeno, pacient_prijmeni,
created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
attachment_type,
filename,
content_type,
file_size,
jmeno,
prijmeni,
created_date,
content
))
print(f" 💾 Saved {filename} ({file_size/1024:.1f} kB)")
existing_ids.add(attachment_id) # add to skip list
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
print("📦 Loading list of already downloaded attachments...")
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
print(f"✅ Found {len(existing_ids)} attachments already saved.")
with conn.cursor() as cur:
cur.execute("""
SELECT id, displayTitle, pacient_prijmeni, pacient_jmeno, createdAt
FROM pozadavky
WHERE displayTitle = 'Odeslat lékařskou zprávu'
""")
rows = cur.fetchall()
print(f"📋 Found {len(rows)} 'Odeslat lékařskou zprávu' requests")
for i, row in enumerate(rows, 1):
req_id = row["id"]
prijmeni = row.get("pacient_prijmeni") or "Neznamy"
jmeno = row.get("pacient_jmeno") or ""
created = row.get("createdAt")
try:
created_date = datetime.strptime(str(created), "%Y-%m-%d %H:%M:%S")
except Exception:
created_date = None
print(f"\n[{i}/{len(rows)}] 🧾 {prijmeni}, {jmeno} ({req_id})")
attachments = fetch_attachments(headers, req_id)
if not attachments:
print(" ⚠️ No attachments")
continue
with conn.cursor() as cur:
for a in attachments:
m = a.get("medicalRecord") or {}
insert_download(cur, req_id, a, m, jmeno, prijmeni, created_date, existing_ids)
conn.commit()
print(f"{len(attachments)} attachments saved for {prijmeni}, {jmeno}")
time.sleep(0.5) # be nice to the API
conn.close()
print("\n✅ Done! All attachments stored in MySQL table `medevio_downloads`.")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,208 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetch communication threads (messages) from Medevio API
for pozadavky where communicationprocessed IS NULL or outdated,
optionally filtered by creation date.
Stores results in MySQL table `medevio_messages`.
"""
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ✅ Optional: Only process requests created after this date
# Leave empty ("") to process all
CREATED_AFTER = "2025-11-09" # 🕓 Adjust freely, or set to "" for no limit
# ==============================
# 🔐 TOKEN
# ==============================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
return tok.split(" ", 1)[1] if tok.startswith("Bearer ") else tok
headers = {
"Authorization": f"Bearer {read_token(TOKEN_PATH)}",
"Content-Type": "application/json",
}
# ==============================
# 🧩 GRAPHQL QUERY
# ==============================
GRAPHQL_QUERY = """
query UseMessages_ListMessages($requestId: String!, $updatedSince: DateTime) {
messages: listMessages(
patientRequestId: $requestId
updatedSince: $updatedSince
) {
id
createdAt
text
updatedAt
readAt
sender { id name surname clinicId }
medicalRecord { downloadUrl description contentType }
}
}
"""
# ==============================
# 🧮 HELPERS
# ==============================
def normalize_ts(ts: str):
"""Convert ISO 8601 string to MySQL DATETIME format."""
if not ts:
return None
ts = ts.replace("T", " ").replace("Z", "")
if "." in ts:
ts = ts.split(".")[0]
return ts
# ==============================
# 📡 FETCH MESSAGES
# ==============================
def fetch_messages(request_id):
payload = {
"operationName": "UseMessages_ListMessages",
"variables": {"requestId": request_id, "updatedSince": None},
"query": GRAPHQL_QUERY,
}
r = requests.post(GRAPHQL_URL, headers=headers, json=payload, timeout=30)
if r.status_code != 200:
print(f"❌ HTTP {r.status_code}: {r.text}")
return []
return r.json().get("data", {}).get("messages", []) or []
# ==============================
# 💾 CREATE TABLE IF NEEDED
# ==============================
def ensure_table_exists(conn):
with conn.cursor() as cur:
cur.execute("""
CREATE TABLE IF NOT EXISTS medevio_messages (
id VARCHAR(64) PRIMARY KEY,
request_id VARCHAR(64),
sender_name VARCHAR(255),
sender_id VARCHAR(64),
sender_clinic_id VARCHAR(64),
text TEXT,
created_at DATETIME NULL,
read_at DATETIME NULL,
updated_at DATETIME NULL,
attachment_url TEXT,
attachment_description TEXT,
attachment_content_type VARCHAR(128),
inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
""")
conn.commit()
# ==============================
# 💾 INSERT MESSAGE
# ==============================
def insert_message(cur, req_id, msg):
sender = msg.get("sender") or {}
medrec = msg.get("medicalRecord") or {}
cur.execute("""
REPLACE INTO medevio_messages (
id, request_id, sender_name, sender_id, sender_clinic_id, text,
created_at, read_at, updated_at,
attachment_url, attachment_description, attachment_content_type
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
""", (
msg.get("id"),
req_id,
f"{sender.get('name','')} {sender.get('surname','')}".strip(),
sender.get("id"),
sender.get("clinicId"),
msg.get("text"),
normalize_ts(msg.get("createdAt")),
normalize_ts(msg.get("readAt")),
normalize_ts(msg.get("updatedAt")),
medrec.get("downloadUrl"),
medrec.get("description"),
medrec.get("contentType")
))
# ==============================
# 🧠 MAIN
# ==============================
def main():
conn = pymysql.connect(**DB_CONFIG)
ensure_table_exists(conn)
with conn.cursor() as cur:
sql = """
SELECT id, createdAt, updatedAt, communicationprocessed
FROM pozadavky
WHERE (communicationprocessed IS NULL OR communicationprocessed < updatedAt)
"""
if CREATED_AFTER:
sql += " AND createdAt >= %s"
cur.execute(sql, (CREATED_AFTER,))
else:
cur.execute(sql)
rows = cur.fetchall()
if not rows:
print("✅ No pending communication updates.")
return
print(f"📋 Found {len(rows)} requests needing communication check.")
for i, row in enumerate(rows, 1):
req_id = row["id"]
print(f"\n[{i}/{len(rows)}] 🔍 Fetching communication for {req_id} ...")
messages = fetch_messages(req_id)
print(f" 💬 {len(messages)} messages found.")
# Update timestamp even if none found
with conn.cursor() as cur:
if messages:
for msg in messages:
insert_message(cur, req_id, msg)
cur.execute("""
UPDATE pozadavky
SET communicationprocessed = NOW()
WHERE id = %s
""", (req_id,))
conn.commit()
print(f" ✅ Processed {len(messages)} messages for {req_id}")
time.sleep(0.5) # avoid hammering the API
conn.close()
print("\n✅ All communication threads processed and timestamps updated.")
# ==============================
if __name__ == "__main__":
main()
+194
View File
@@ -0,0 +1,194 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download and store Medevio questionnaires (userNote + eCRF) for all patient requests.
Uses the verified working query "GetPatientRequest2".
"""
import json
import requests
import pymysql
from datetime import datetime
from pathlib import Path
import time
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
from datetime import datetime
def fix_datetime(dt_str):
"""Convert ISO 8601 string with 'Z' or ms into MySQL DATETIME format."""
if not dt_str:
return None
try:
# Remove trailing Z and parse flexible ISO format
return datetime.fromisoformat(dt_str.replace("Z", "").replace("+00:00", ""))
except Exception:
return None
# ✅ Optional: limit which requests to process
CREATED_AFTER = "2025-11-09" # set "" to disable
# ==============================
# 🧮 HELPERS
# ==============================
def read_token(p: Path) -> str:
"""Read Bearer token from file."""
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
GRAPHQL_QUERY = r"""
query GetPatientRequest2($requestId: UUID!, $clinicSlug: String!, $locale: Locale!) {
request: getPatientRequest2(patientRequestId: $requestId, clinicSlug: $clinicSlug) {
id
displayTitle(locale: $locale)
createdAt
updatedAt
userNote
eventType
extendedPatient(clinicSlug: $clinicSlug) {
name
surname
identificationNumber
}
ecrfFilledData(locale: $locale) {
name
groups {
label
fields {
name
label
type
value
}
}
}
}
}
"""
def fetch_questionnaire(headers, request_id, clinic_slug):
"""Fetch questionnaire for given request ID."""
payload = {
"operationName": "GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": {
"requestId": request_id,
"clinicSlug": clinic_slug,
"locale": "cs",
},
}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers, timeout=40)
if r.status_code != 200:
print(f"❌ HTTP {r.status_code} for {request_id}: {r.text}")
return None
return r.json().get("data", {}).get("request")
def insert_questionnaire(cur, req):
"""Insert questionnaire data into MySQL."""
if not req:
return
patient = req.get("extendedPatient") or {}
ecrf_data = req.get("ecrfFilledData")
created_at = fix_datetime(req.get("createdAt"))
updated_at = fix_datetime(req.get("updatedAt"))
cur.execute("""
INSERT INTO medevio_questionnaires (
request_id, patient_name, patient_surname, patient_identification,
created_at, updated_at, user_note, ecrf_json
)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
updated_at = VALUES(updated_at),
user_note = VALUES(user_note),
ecrf_json = VALUES(ecrf_json),
updated_local = NOW()
""", (
req.get("id"),
patient.get("name"),
patient.get("surname"),
patient.get("identificationNumber"),
created_at,
updated_at,
req.get("userNote"),
json.dumps(ecrf_data, ensure_ascii=False),
))
print(f" 💾 Stored questionnaire for {patient.get('surname','')} {patient.get('name','')}")
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
with conn.cursor() as cur:
sql = """
SELECT id, pacient_jmeno, pacient_prijmeni, createdAt, updatedAt, questionnaireprocessed
FROM pozadavky
WHERE (questionnaireprocessed IS NULL OR questionnaireprocessed < updatedAt)
"""
if CREATED_AFTER:
sql += " AND createdAt >= %s"
cur.execute(sql, (CREATED_AFTER,))
else:
cur.execute(sql)
rows = cur.fetchall()
print(f"📋 Found {len(rows)} requests needing questionnaire check.")
for i, row in enumerate(rows, 1):
req_id = row["id"]
print(f"\n[{i}/{len(rows)}] 🔍 Fetching questionnaire for {req_id} ...")
req = fetch_questionnaire(headers, req_id, CLINIC_SLUG)
if not req:
print(" ⚠️ No questionnaire data found.")
continue
with conn.cursor() as cur:
insert_questionnaire(cur, req)
cur.execute("UPDATE pozadavky SET questionnaireprocessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
time.sleep(0.4) # polite pacing
conn.close()
print("\n✅ Done! All questionnaires stored in MySQL table `medevio_questionnaires`.")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,59 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import pymysql
from datetime import datetime, timedelta
# ================================
# ⚙️ CONFIGURATION
# ================================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
# kam uložit výstup
OUTPUT_DIR = r"U:\Dropbox\!!!Days\Downloads Z230"
DAYS_BACK = 700 # posledních X dní
# ================================
# 📘 SQL dotaz
# ================================
SQL = f"""
SELECT
m.id AS Message_ID,
m.request_id AS Request_ID,
m.created_at AS Datum_vytvoření,
m.sender_name AS Odesílatel,
m.text AS Text_zprávy,
m.pacient_jmeno AS Pacient_jméno,
m.pacient_prijmeni AS Pacient_příjmení,
m.pacient_rodnecislo AS Rodné_číslo
FROM medevio_messages m
WHERE m.created_at >= NOW() - INTERVAL {DAYS_BACK} DAY
ORDER BY m.created_at DESC;
"""
# ================================
# 🧠 MAIN
# ================================
def main():
conn = pymysql.connect(**DB_CONFIG)
df = pd.read_sql(SQL, conn)
conn.close()
today = datetime.now().strftime("%Y-%m-%d")
output_path = f"{OUTPUT_DIR}\\Medevio_messages_report_{today}.xlsx"
df.to_excel(output_path, index=False)
print(f"✅ Export hotov: {output_path}")
print(f"📄 Počet řádků: {len(df)}")
if __name__ == "__main__":
main()
@@ -0,0 +1,153 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from datetime import datetime
from pathlib import Path
import time, socket
# ===============================
# ⚙️ CONFIG
# ===============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetMessages(
$clinicSlug: String!,
$requestId: ID!
) {
clinicRequestDetail_GetPatientRequestMessages(
clinicSlug: $clinicSlug,
requestId: $requestId
) {
id
text
createdAt
sender {
id
name
}
extendedPatient {
name
surname
identificationNumber
}
}
}
"""
# ===============================
# 🔑 Token reader
# ===============================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
return tok.split(" ", 1)[1] if tok.startswith("Bearer ") else tok
# ===============================
# 🕒 Helper
# ===============================
def to_mysql_dt(iso_str):
if not iso_str:
return None
try:
dt = datetime.fromisoformat(iso_str.replace("Z", "+00:00"))
return dt.strftime("%Y-%m-%d %H:%M:%S")
except Exception:
return None
# ===============================
# 💾 Upsert
# ===============================
def upsert_message(conn, msg, request_id):
s = msg.get("sender") or {}
p = msg.get("extendedPatient") or {}
sql = """
INSERT INTO medevio_messages (
id, request_id, sender_name, sender_id, text, created_at,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
text=VALUES(text),
created_at=VALUES(created_at),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
msg.get("id"),
request_id,
s.get("name"),
s.get("id"),
msg.get("text"),
to_mysql_dt(msg.get("createdAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ===============================
# 📡 Fetch messages for one request
# ===============================
def fetch_messages(headers, request_id):
payload = {
"operationName": "ClinicRequestDetail_GetMessages",
"query": GRAPHQL_QUERY,
"variables": {"clinicSlug": CLINIC_SLUG, "requestId": request_id},
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json().get("data", {}).get("clinicRequestDetail_GetPatientRequestMessages", [])
return data
# ===============================
# 🧠 Main
# ===============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
cur = conn.cursor()
# vezmeme všechny request_id z tabulky pozadavky
cur.execute("SELECT id FROM pozadavky ORDER BY updatedAt DESC")
request_ids = [r["id"] for r in cur.fetchall()]
print(f"📋 Found {len(request_ids)} požadavků.")
for i, rid in enumerate(request_ids, 1):
try:
msgs = fetch_messages(headers, rid)
for msg in msgs:
upsert_message(conn, msg, rid)
print(f"[{i}/{len(request_ids)}] {rid}{len(msgs)} zpráv uloženo.")
time.sleep(0.4)
except Exception as e:
print(f"❌ Chyba při načítání {rid}: {e}")
conn.close()
print("\n✅ Hotovo, všechny zprávy synchronizovány.")
if __name__ == "__main__":
main()
+179
View File
@@ -0,0 +1,179 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from pathlib import Path
from datetime import datetime
import time
# ================================
# ⚙️ CONFIGURATION
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 100
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestGrid_ListPatientRequestsForClinic2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$pageInfo: PageInfo!,
$locale: Locale!,
$state: PatientRequestState
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
pageInfo: $pageInfo,
state: $state
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
}
}
}
"""
# ================================
# 🔑 TOKEN
# ================================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
return tok.split(" ", 1)[1] if tok.startswith("Bearer ") else tok
# ================================
# 📡 FETCH FUNCTION
# ================================
def fetch_requests(headers, state, offset=0):
"""Fetch a batch of patient requests for a given state."""
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
"state": state,
}
payload = {
"operationName": "ClinicRequestGrid_ListPatientRequestsForClinic2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
for attempt in range(3): # up to 3 attempts
try:
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
r.raise_for_status()
resp = r.json().get("data", {}).get("requestsResponse", {})
return resp.get("patientRequests", []), resp.get("count", 0)
except requests.exceptions.RequestException as e:
print(f"⚠️ Chyba při načítání (pokus {attempt+1}/3): {e}")
time.sleep(5)
return [], 0
# ================================
# 💾 UPDATE ALL MESSAGES BY PATIENT DATA
# ================================
def update_all_messages(conn, patient):
"""Update all messages belonging to this request with patient data."""
p = patient.get("extendedPatient") or {}
if not p:
return 0
sql = """
UPDATE medevio_messages
SET pacient_jmeno=%s,
pacient_prijmeni=%s,
pacient_rodnecislo=%s
WHERE request_id=%s
"""
vals = (p.get("name"), p.get("surname"), p.get("identificationNumber"), patient.get("id"))
with conn.cursor() as cur:
cur.execute(sql, vals)
affected = cur.rowcount
conn.commit()
return affected
# ================================
# 🧠 MAIN
# ================================
def process_state(conn, headers, state):
print(f"\n=== 🟦 Zpracovávám {state} požadavky ===")
offset = 0
total_processed = 0
total_updated = 0
while True:
batch, total_count = fetch_requests(headers, state, offset)
if not batch:
break
print(f"📦 Dávka od offsetu {offset} ({len(batch)} záznamů z {total_count})")
for r in batch:
updated = update_all_messages(conn, r)
total_processed += 1
total_updated += updated
if updated:
print(f"{r.get('id')}{updated} zpráv aktualizováno")
offset += BATCH_SIZE
if offset >= total_count:
break
time.sleep(0.4)
print(f"{state}: zpracováno {total_processed} požadavků, aktualizováno {total_updated} zpráv.")
return total_processed, total_updated
# ================================
# 🚀 ENTRY POINT
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
print(f"\n=== Medevio mass patient sync @ {datetime.now():%Y-%m-%d %H:%M:%S} ===")
total_p, total_u = process_state(conn, headers, "ACTIVE")
done_p, done_u = process_state(conn, headers, "DONE")
conn.close()
print("\n=== 🧾 SOUHRN ===")
print(f"ACTIVE: {total_p} požadavků, {total_u} zpráv aktualizováno")
print(f"DONE: {done_p} požadavků, {done_u} zpráv aktualizováno")
print("===========================================")
print(f"CELKEM: {total_p + done_p} požadavků, {total_u + done_u} zpráv aktualizováno ✅")
if __name__ == "__main__":
main()
@@ -0,0 +1,228 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download all attachments for pozadavky where attachmentsProcessed IS NULL
and (optionally) createdAt is newer than a configurable cutoff date.
Store them in MySQL table `medevio_downloads`, and update pozadavky.attachmentsProcessed = NOW().
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ✅ Optional: Only process requests created after this date
# Leave empty ("") to process all
CREATED_AFTER = "2025-01-01" # 🕓 Adjust freely, or set to "" for no limit
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2($requestId: UUID!) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
# ==============================
# 🧮 HELPERS
# ==============================
def short_crc8(uuid_str: str) -> str:
"""Return deterministic 8-char hex string from any input string (CRC32)."""
return f"{zlib.crc32(uuid_str.encode('utf-8')) & 0xffffffff:08x}"
def extract_filename_from_url(url: str) -> str:
"""Extracts filename from S3-style URL (between last '/' and first '?')."""
try:
return url.split("/")[-1].split("?")[0]
except Exception:
return "unknown_filename"
def read_token(p: Path) -> str:
"""Read Bearer token from file."""
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
# ==============================
# 📡 FETCH ATTACHMENTS
# ==============================
def fetch_attachments(headers, request_id):
variables = {"requestId": request_id}
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
print(f"❌ HTTP {r.status_code} for request {request_id}")
return []
data = r.json().get("data", {}).get("patientRequestMedicalRecords", [])
return data
# ==============================
# 💾 SAVE TO MYSQL (with skip)
# ==============================
def insert_download(cur, req_id, a, m, jmeno, prijmeni, created_date, existing_ids):
attachment_id = a.get("id")
if attachment_id in existing_ids:
print(f" ⏭️ Skipping already downloaded attachment {attachment_id}")
return False
url = m.get("downloadUrl")
if not url:
print(" ⚠️ No download URL")
return False
filename = extract_filename_from_url(url)
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
content = r.content
except Exception as e:
print(f" ⚠️ Failed to download {url}: {e}")
return False
file_size = len(content)
attachment_type = a.get("attachmentType")
content_type = m.get("contentType")
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type, filename,
content_type, file_size, pacient_jmeno, pacient_prijmeni,
created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
attachment_type,
filename,
content_type,
file_size,
jmeno,
prijmeni,
created_date,
content
))
existing_ids.add(attachment_id)
print(f" 💾 Saved {filename} ({file_size/1024:.1f} kB)")
return True
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
print("📦 Loading list of already downloaded attachments...")
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
print(f"✅ Found {len(existing_ids)} attachments already saved.")
# ✅ Dynamic SQL with optional createdAt filter
sql = """
SELECT id, displayTitle, pacient_prijmeni, pacient_jmeno, createdAt
FROM pozadavky
WHERE attachmentsProcessed IS NULL
"""
params = []
if CREATED_AFTER:
sql += " AND createdAt >= %s"
params.append(CREATED_AFTER)
with conn.cursor() as cur:
cur.execute(sql, params)
rows = cur.fetchall()
print(f"📋 Found {len(rows)} pozadavky to process (attachmentsProcessed IS NULL"
+ (f", created >= {CREATED_AFTER}" if CREATED_AFTER else "") + ")")
for i, row in enumerate(rows, 1):
time.sleep(1) # polite API delay
req_id = row["id"]
prijmeni = row.get("pacient_prijmeni") or "Neznamy"
jmeno = row.get("pacient_jmeno") or ""
created = row.get("createdAt")
try:
created_date = datetime.strptime(str(created), "%Y-%m-%d %H:%M:%S")
except Exception:
created_date = None
print(f"\n[{i}/{len(rows)}] 🧾 {prijmeni}, {jmeno} ({req_id})")
attachments = fetch_attachments(headers, req_id)
if not attachments:
print(" ⚠️ No attachments found")
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET attachmentsProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
continue
with conn.cursor() as cur:
for a in attachments:
m = a.get("medicalRecord") or {}
insert_download(cur, req_id, a, m, jmeno, prijmeni, created_date, existing_ids)
conn.commit()
# ✅ mark processed
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET attachmentsProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
print(f"{len(attachments)} attachments processed for {prijmeni}, {jmeno}")
time.sleep(0.3) # polite API delay
conn.close()
print("\n✅ Done! All new attachments processed and pozadavky updated.")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,240 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from pathlib import Path
from datetime import datetime, timezone
import time
from dateutil import parser
# Force UTF-8 output even under Windows Task Scheduler
import sys
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
# Python < 3.7 fallback (not needed for you, but safe)
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ================================
# 🔧 CONFIGURATION
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 100
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ⭐ NOVÝ TESTOVANÝ DOTAZ obsahuje lastMessage.createdAt
GRAPHQL_QUERY = r"""
query ClinicRequestList2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
lastMessage {
createdAt
}
}
}
}
"""
# ================================
# 🧿 SAFE DATETIME PARSER (ALWAYS UTC → LOCAL)
# ================================
def to_mysql_dt_utc(iso_str):
"""
Parse Medevio timestamps safely.
Treat timestamps WITHOUT timezone as UTC.
Convert to local time before saving to MySQL.
"""
if not iso_str:
return None
try:
dt = parser.isoparse(iso_str)
# If tz is missing → assume UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
# Convert to local timezone
dt_local = dt.astimezone()
return dt_local.strftime("%Y-%m-%d %H:%M:%S")
except:
return None
# ================================
# 🔑 TOKEN
# ================================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
return tok.split(" ", 1)[1]
return tok
# ================================
# 💾 UPSERT (včetně správného updatedAt)
# ================================
def upsert(conn, r):
p = r.get("extendedPatient") or {}
# raw timestamps z API nyní přes nový parser
api_updated = to_mysql_dt_utc(r.get("updatedAt"))
last_msg = r.get("lastMessage") or {}
msg_updated = to_mysql_dt_utc(last_msg.get("createdAt"))
# nejnovější změna
def max_dt(a, b):
if a and b:
return max(a, b)
return a or b
final_updated = max_dt(api_updated, msg_updated)
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r.get("id"),
r.get("displayTitle"),
to_mysql_dt_utc(r.get("createdAt")),
final_updated,
to_mysql_dt_utc(r.get("doneAt")),
to_mysql_dt_utc(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# 📡 FETCH ACTIVE PAGE
# ================================
def fetch_active(headers, offset):
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
"state": "ACTIVE",
}
payload = {
"operationName": "ClinicRequestList2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json().get("data", {}).get("requestsResponse", {})
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# 🧠 MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
print(f"\n=== Sync ACTIVE požadavků @ {datetime.now():%Y-%m-%d %H:%M:%S} ===")
offset = 0
total_processed = 0
total_count = None
while True:
batch, count = fetch_active(headers, offset)
if total_count is None:
total_count = count
print(f"📡 Celkem ACTIVE v Medevio: {count}")
if not batch:
break
for r in batch:
upsert(conn, r)
total_processed += len(batch)
print(f"{total_processed}/{total_count} ACTIVE processed")
if offset + BATCH_SIZE >= count:
break
offset += BATCH_SIZE
time.sleep(0.4)
conn.close()
print("\n✅ ACTIVE sync hotovo!\n")
# ================================
if __name__ == "__main__":
main()
@@ -0,0 +1,210 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from pathlib import Path
from datetime import datetime
from dateutil import parser
# ================================
# 🔧 CONFIGURATION
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
LIMIT = 500 # batch size / number of records
FULL_DOWNLOAD = False # 🔥 TOGGLE: False = last X, True = ALL batches
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ⭐ Query with lastMessage
GRAPHQL_QUERY = r"""
query ClinicRequestList2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
lastMessage {
createdAt
}
}
}
}
"""
# ================================
# TOKEN
# ================================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
return tok.split(" ", 1)[1] if tok.startswith("Bearer ") else tok
# ================================
# DATETIME PARSER (UTC → MySQL)
# ================================
def to_mysql_dt(iso_str):
if not iso_str:
return None
try:
dt = parser.isoparse(iso_str) # ISO8601 → aware datetime (UTC)
dt = dt.astimezone() # convert to local timezone
return dt.strftime("%Y-%m-%d %H:%M:%S")
except:
return None
# ================================
# UPSERT REQUEST
# ================================
def upsert(conn, r):
p = r.get("extendedPatient") or {}
api_updated = to_mysql_dt(r.get("updatedAt"))
last_msg = r.get("lastMessage") or {}
msg_at = to_mysql_dt(last_msg.get("createdAt"))
def max_dt(a, b):
if a and b:
return max(a, b)
return a or b
final_updated = max_dt(api_updated, msg_at)
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r["id"],
r.get("displayTitle"),
to_mysql_dt(r.get("createdAt")),
final_updated,
to_mysql_dt(r.get("doneAt")),
to_mysql_dt(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# FETCH DONE REQUESTS (one batch)
# ================================
def fetch_done(headers, offset):
vars = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": LIMIT, "offset": offset},
"locale": "cs",
"state": "DONE",
}
payload = {
"operationName": "ClinicRequestList2",
"query": GRAPHQL_QUERY,
"variables": vars,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json()["data"]["requestsResponse"]
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
print(f"\n=== Sync CLOSED requests @ {datetime.now():%Y-%m-%d %H:%M:%S} ===")
offset = 0
total_count = None
total_processed = 0
while True:
batch, count = fetch_done(headers, offset)
if total_count is None:
total_count = count
print(f"📡 Total DONE in Medevio: {count}")
if not batch:
break
print(f" • Processing batch offset={offset} size={len(batch)}")
for r in batch:
upsert(conn, r)
total_processed += len(batch)
if not FULL_DOWNLOAD:
# process only last LIMIT records
break
# FULL DOWNLOAD → fetch next batch
offset += LIMIT
if offset >= count:
break
conn.close()
print(f"\n✅ DONE — {total_processed} requests synced.\n")
if __name__ == "__main__":
main()
@@ -0,0 +1,227 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download and store Medevio questionnaires (userNote + eCRF) for all patient requests.
Uses the verified working query "GetPatientRequest2".
"""
import json
import requests
import pymysql
from datetime import datetime
from pathlib import Path
import time
import sys
# Force UTF-8 output even under Windows Task Scheduler
import sys
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
# Python < 3.7 fallback (not needed for you, but safe)
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ==============================
# 🛡 SAFE PRINT FOR CP1250 / EMOJI
# ==============================
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
# strip emoji + anything above BMP
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
# final ASCII fallback
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ==============================
# 🕒 DATETIME FIXER
# ==============================
def fix_datetime(dt_str):
"""Convert ISO 8601 string with 'Z' or ms into MySQL DATETIME format."""
if not dt_str:
return None
try:
return datetime.fromisoformat(dt_str.replace("Z", "").replace("+00:00", ""))
except Exception:
return None
# Optional filter
CREATED_AFTER = "2025-01-01"
# ==============================
# 🧮 HELPERS
# ==============================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
return tok.split(" ", 1)[1]
return tok
GRAPHQL_QUERY = r"""
query GetPatientRequest2($requestId: UUID!, $clinicSlug: String!, $locale: Locale!) {
request: getPatientRequest2(patientRequestId: $requestId, clinicSlug: $clinicSlug) {
id
displayTitle(locale: $locale)
createdAt
updatedAt
userNote
eventType
extendedPatient(clinicSlug: $clinicSlug) {
name
surname
identificationNumber
}
ecrfFilledData(locale: $locale) {
name
groups {
label
fields {
name
label
type
value
}
}
}
}
}
"""
def fetch_questionnaire(headers, request_id, clinic_slug):
"""Fetch questionnaire for given request ID."""
payload = {
"operationName": "GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": {
"requestId": request_id,
"clinicSlug": clinic_slug,
"locale": "cs",
},
}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers, timeout=40)
if r.status_code != 200:
safe_print(f"❌ HTTP {r.status_code} for {request_id}: {r.text}")
return None
return r.json().get("data", {}).get("request")
def insert_questionnaire(cur, req):
"""Insert questionnaire data into MySQL."""
if not req:
return
patient = req.get("extendedPatient") or {}
ecrf_data = req.get("ecrfFilledData")
created_at = fix_datetime(req.get("createdAt"))
updated_at = fix_datetime(req.get("updatedAt"))
cur.execute("""
INSERT INTO medevio_questionnaires (
request_id, created_at, updated_at, user_note, ecrf_json
)
VALUES (%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
updated_at = VALUES(updated_at),
user_note = VALUES(user_note),
ecrf_json = VALUES(ecrf_json),
updated_local = NOW()
""", (
req.get("id"),
created_at,
updated_at,
req.get("userNote"),
json.dumps(ecrf_data, ensure_ascii=False),
))
safe_print(f" 💾 Stored questionnaire for {patient.get('surname','')} {patient.get('name','')}")
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# load list of requests
with conn.cursor() as cur:
sql = """
SELECT id, pacient_jmeno, pacient_prijmeni, createdAt, updatedAt, questionnaireprocessed
FROM pozadavky
WHERE (questionnaireprocessed IS NULL OR questionnaireprocessed < updatedAt)
"""
if CREATED_AFTER:
sql += " AND createdAt >= %s"
cur.execute(sql, (CREATED_AFTER,))
else:
cur.execute(sql)
rows = cur.fetchall()
safe_print(f"📋 Found {len(rows)} requests needing questionnaire check.")
# process each one
for i, row in enumerate(rows, 1):
req_id = row["id"]
safe_print(f"\n[{i}/{len(rows)}] 🔍 Fetching questionnaire for {req_id} ...")
req = fetch_questionnaire(headers, req_id, CLINIC_SLUG)
if not req:
safe_print(" ⚠️ No questionnaire data found.")
continue
with conn.cursor() as cur:
insert_questionnaire(cur, req)
cur.execute(
"UPDATE pozadavky SET questionnaireprocessed = NOW() WHERE id = %s",
(req_id,)
)
conn.commit()
time.sleep(0.6)
conn.close()
safe_print("\n✅ Done! All questionnaires stored in MySQL table `medevio_questionnaires`.")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,287 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Stáhne konverzaci pro požadavky, kde:
messagesProcessed IS NULL OR messagesProcessed < updatedAt.
Vloží do medevio_conversation a přílohy do medevio_downloads.
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import sys
# Force UTF-8 output even under Windows Task Scheduler
import sys
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
# Python < 3.7 fallback (not needed for you, but safe)
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ==============================
# 🛡 SAFE PRINT FOR CP1250 / EMOJI
# ==============================
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc or not enc.lower().startswith("utf"):
# strip emoji + characters outside BMP for Task Scheduler (CP1250)
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
# fallback pure ASCII
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY_MESSAGES = r"""
query UseMessages_ListMessages($requestId: String!, $updatedSince: DateTime) {
messages: listMessages(patientRequestId: $requestId, updatedSince: $updatedSince) {
id
createdAt
updatedAt
readAt
text
type
sender {
id
name
surname
clinicId
}
medicalRecord {
id
description
contentType
url
downloadUrl
token
createdAt
updatedAt
}
}
}
"""
# ==============================
# ⏱ DATETIME PARSER
# ==============================
def parse_dt(s):
if not s:
return None
try:
return datetime.fromisoformat(s.replace("Z", "+00:00"))
except:
pass
try:
return datetime.strptime(s[:19], "%Y-%m-%dT%H:%M:%S")
except:
return None
# ==============================
# 🔐 TOKEN
# ==============================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
return tok.replace("Bearer ", "")
# ==============================
# 📡 FETCH MESSAGES
# ==============================
def fetch_messages(headers, request_id):
payload = {
"operationName": "UseMessages_ListMessages",
"query": GRAPHQL_QUERY_MESSAGES,
"variables": {"requestId": request_id, "updatedSince": None},
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
safe_print(f"❌ HTTP {r.status_code} for request {request_id}")
return []
return r.json().get("data", {}).get("messages", []) or []
# ==============================
# 💾 SAVE MESSAGE
# ==============================
def insert_message(cur, req_id, msg):
sender = msg.get("sender") or {}
sender_name = " ".join(
x for x in [sender.get("name"), sender.get("surname")] if x
) or None
sql = """
INSERT INTO medevio_conversation (
id, request_id,
sender_name, sender_id, sender_clinic_id,
text, created_at, read_at, updated_at,
attachment_url, attachment_description, attachment_content_type
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
sender_name = VALUES(sender_name),
sender_id = VALUES(sender_id),
sender_clinic_id = VALUES(sender_clinic_id),
text = VALUES(text),
created_at = VALUES(created_at),
read_at = VALUES(read_at),
updated_at = VALUES(updated_at),
attachment_url = VALUES(attachment_url),
attachment_description = VALUES(attachment_description),
attachment_content_type = VALUES(attachment_content_type)
"""
mr = msg.get("medicalRecord") or {}
cur.execute(sql, (
msg.get("id"),
req_id,
sender_name,
sender.get("id"),
sender.get("clinicId"),
msg.get("text"),
parse_dt(msg.get("createdAt")),
parse_dt(msg.get("readAt")),
parse_dt(msg.get("updatedAt")),
mr.get("downloadUrl") or mr.get("url"),
mr.get("description"),
mr.get("contentType")
))
# ==============================
# 💾 DOWNLOAD MESSAGE ATTACHMENT
# ==============================
def insert_download(cur, req_id, msg, existing_ids):
mr = msg.get("medicalRecord") or {}
attachment_id = mr.get("id")
if not attachment_id:
return
if attachment_id in existing_ids:
return
url = mr.get("downloadUrl") or mr.get("url")
if not url:
return
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
data = r.content
except Exception as e:
safe_print(f"⚠️ Failed to download: {e}")
return
filename = url.split("/")[-1].split("?")[0]
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size, created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
"MESSAGE_ATTACHMENT",
filename,
mr.get("contentType"),
len(data),
parse_dt(msg.get("createdAt")),
data
))
existing_ids.add(attachment_id)
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# ---- Load existing attachments
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
safe_print(f"📦 Already downloaded attachments: {len(existing_ids)}\n")
# ---- Select pozadavky needing message sync
sql = """
SELECT id
FROM pozadavky
WHERE messagesProcessed IS NULL
OR messagesProcessed < updatedAt
"""
with conn.cursor() as cur:
cur.execute(sql)
requests_to_process = cur.fetchall()
safe_print(f"📋 Found {len(requests_to_process)} pozadavků requiring message sync.\n")
# ---- Process each record
for idx, row in enumerate(requests_to_process, 1):
req_id = row["id"]
safe_print(f"[{idx}/{len(requests_to_process)}] Processing {req_id}")
messages = fetch_messages(headers, req_id)
with conn.cursor() as cur:
for msg in messages:
insert_message(cur, req_id, msg)
insert_download(cur, req_id, msg, existing_ids)
conn.commit()
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET messagesProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
safe_print(f"{len(messages)} messages saved\n")
time.sleep(0.25)
conn.close()
safe_print("🎉 Done!")
if __name__ == "__main__":
main()
@@ -0,0 +1,293 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Delta sync Medevio communication.
Stáhne pouze zprávy změněné po messagesProcessed pro každý požadavek.
"""
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import sys
# ==============================
# UTF-8 SAFE OUTPUT
# ==============================
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# CONFIG
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY_MESSAGES = r"""
query UseMessages_ListMessages($requestId: String!, $updatedSince: DateTime) {
messages: listMessages(
patientRequestId: $requestId,
updatedSince: $updatedSince
) {
id
createdAt
updatedAt
readAt
text
type
sender {
id
name
surname
clinicId
}
medicalRecord {
id
description
contentType
url
downloadUrl
createdAt
updatedAt
}
}
}
"""
# ==============================
# HELPERS
# ==============================
def parse_dt(s):
if not s:
return None
try:
return datetime.fromisoformat(s.replace("Z", "+00:00"))
except Exception:
return None
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
return tok.replace("Bearer ", "")
# ==============================
# FETCH MESSAGES (DELTA)
# ==============================
def fetch_messages(headers, request_id, updated_since):
payload = {
"operationName": "UseMessages_ListMessages",
"query": GRAPHQL_QUERY_MESSAGES,
"variables": {
"requestId": request_id,
"updatedSince": updated_since,
},
}
r = requests.post(
"https://api.medevio.cz/graphql",
json=payload,
headers=headers,
timeout=30
)
if r.status_code != 200:
safe_print(f"❌ HTTP {r.status_code} for request {request_id}")
return []
j = r.json()
if "errors" in j:
safe_print(f"❌ GraphQL error for {request_id}: {j['errors']}")
return []
return j.get("data", {}).get("messages", []) or []
# ==============================
# INSERT MESSAGE
# ==============================
def insert_message(cur, req_id, msg):
sender = msg.get("sender") or {}
sender_name = " ".join(
x for x in [sender.get("name"), sender.get("surname")] if x
) or None
mr = msg.get("medicalRecord") or {}
sql = """
INSERT INTO medevio_conversation (
id, request_id,
sender_name, sender_id, sender_clinic_id,
text, created_at, read_at, updated_at,
attachment_url, attachment_description, attachment_content_type
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
sender_name = VALUES(sender_name),
sender_id = VALUES(sender_id),
sender_clinic_id = VALUES(sender_clinic_id),
text = VALUES(text),
created_at = VALUES(created_at),
read_at = VALUES(read_at),
updated_at = VALUES(updated_at),
attachment_url = VALUES(attachment_url),
attachment_description = VALUES(attachment_description),
attachment_content_type = VALUES(attachment_content_type)
"""
cur.execute(sql, (
msg.get("id"),
req_id,
sender_name,
sender.get("id"),
sender.get("clinicId"),
msg.get("text"),
parse_dt(msg.get("createdAt")),
parse_dt(msg.get("readAt")),
parse_dt(msg.get("updatedAt")),
mr.get("downloadUrl") or mr.get("url"),
mr.get("description"),
mr.get("contentType")
))
# ==============================
# INSERT ATTACHMENT (DEDUP)
# ==============================
def insert_download(cur, req_id, msg, existing_ids):
mr = msg.get("medicalRecord") or {}
attachment_id = mr.get("id")
if not attachment_id or attachment_id in existing_ids:
return
url = mr.get("downloadUrl") or mr.get("url")
if not url:
return
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
data = r.content
except Exception as e:
safe_print(f"⚠️ Attachment download failed: {e}")
return
filename = url.split("/")[-1].split("?")[0]
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size, created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
"MESSAGE_ATTACHMENT",
filename,
mr.get("contentType"),
len(data),
parse_dt(msg.get("createdAt")),
data
))
existing_ids.add(attachment_id)
# ==============================
# MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# existing attachments
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {r["attachment_id"] for r in cur.fetchall()}
# select requests needing sync
with conn.cursor() as cur:
cur.execute("""
SELECT id, messagesProcessed
FROM pozadavky
WHERE messagesProcessed IS NULL
OR messagesProcessed < updatedAt
""")
rows = cur.fetchall()
safe_print(f"📋 Found {len(rows)} requests for message delta-sync\n")
for i, row in enumerate(rows, 1):
req_id = row["id"]
updated_since = row["messagesProcessed"]
if updated_since:
updated_since = updated_since.replace(microsecond=0).isoformat() + "Z"
safe_print(f"[{i}/{len(rows)}] {req_id}")
messages = fetch_messages(headers, req_id, updated_since)
if not messages:
safe_print(" ⏭ No new messages")
else:
with conn.cursor() as cur:
for msg in messages:
insert_message(cur, req_id, msg)
insert_download(cur, req_id, msg, existing_ids)
conn.commit()
safe_print(f"{len(messages)} new/updated messages")
with conn.cursor() as cur:
cur.execute(
"UPDATE pozadavky SET messagesProcessed = NOW() WHERE id = %s",
(req_id,)
)
conn.commit()
time.sleep(0.25)
conn.close()
safe_print("\n🎉 Delta message sync DONE")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,246 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download all attachments for pozadavky where attachmentsProcessed IS NULL
and (optionally) createdAt is newer than a cutoff date.
Store them in MySQL table `medevio_downloads`, and update pozadavky.attachmentsProcessed.
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import sys
# Force UTF-8 output even under Windows Task Scheduler
import sys
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
# Python < 3.7 fallback (not needed for you, but safe)
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ==============================
# 🛡 SAFE PRINT FOR CP1250 / EMOJI
# ==============================
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc or not enc.lower().startswith("utf"):
# strip emoji + characters outside BMP
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
# ASCII fallback
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
CREATED_AFTER = "2024-12-01" # optional filter
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2($requestId: UUID!) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
# ==============================
# 🧮 HELPERS
# ==============================
def extract_filename_from_url(url: str) -> str:
try:
return url.split("/")[-1].split("?")[0]
except:
return "unknown_filename"
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
return tok.split(" ", 1)[1] if tok.startswith("Bearer ") else tok
# ==============================
# 📡 FETCH ATTACHMENTS
# ==============================
def fetch_attachments(headers, request_id):
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": {"requestId": request_id},
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
safe_print(f"❌ HTTP {r.status_code} for request {request_id}")
return []
return r.json().get("data", {}).get("patientRequestMedicalRecords", [])
# ==============================
# 💾 SAVE TO MYSQL
# ==============================
def insert_download(cur, req_id, a, m, created_date, existing_ids):
attachment_id = a.get("id")
if attachment_id in existing_ids:
safe_print(f" ⏭️ Already downloaded {attachment_id}")
return False
url = m.get("downloadUrl")
if not url:
safe_print(" ⚠️ Missing download URL")
return False
filename = extract_filename_from_url(url)
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
content = r.content
except Exception as e:
safe_print(f" ⚠️ Download failed {url}: {e}")
return False
file_size = len(content)
attachment_type = a.get("attachmentType")
content_type = m.get("contentType")
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size,
created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
attachment_type,
filename,
content_type,
file_size,
created_date,
content,
))
existing_ids.add(attachment_id)
safe_print(f" 💾 Saved {filename} ({file_size/1024:.1f} kB)")
return True
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# Load existing attachments
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
safe_print(f"{len(existing_ids)} attachments already saved.")
# Build query for pozadavky
sql = """
SELECT id, pacient_prijmeni, pacient_jmeno, createdAt, updatedAt, attachmentsProcessed
FROM pozadavky
WHERE attachmentsProcessed IS NULL
OR updatedAt > attachmentsProcessed
"""
params = []
if CREATED_AFTER:
sql += " AND createdAt >= %s"
params.append(CREATED_AFTER)
with conn.cursor() as cur:
cur.execute(sql, params)
req_rows = cur.fetchall()
safe_print(f"📋 Found {len(req_rows)} pozadavky to process.")
# Process each pozadavek
for i, row in enumerate(req_rows, 1):
req_id = row["id"]
prijmeni = row.get("pacient_prijmeni") or "Neznamy"
jmeno = row.get("pacient_jmeno") or ""
created_date = row.get("createdAt") or datetime.now()
safe_print(f"\n[{i}/{len(req_rows)}] 🧾 {prijmeni}, {jmeno} ({req_id})")
attachments = fetch_attachments(headers, req_id)
if not attachments:
safe_print(" ⚠️ No attachments found")
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET attachmentsProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
continue
with conn.cursor() as cur:
for a in attachments:
m = a.get("medicalRecord") or {}
insert_download(cur, req_id, a, m, created_date, existing_ids)
conn.commit()
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET attachmentsProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
safe_print(f" ✅ Done ({len(attachments)} attachments)")
time.sleep(0.3)
conn.close()
safe_print("\n🎯 All attachments processed.")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,252 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
import time
import sys
# Force UTF-8 output even under Windows Task Scheduler
import sys
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
# Python < 3.7 fallback (not needed for you, but safe)
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ==============================
# 🛡 SAFE PRINT FOR CP1250 / EMOJI
# ==============================
def safe_print(text: str = ""):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
# Strip emoji and characters outside BMP for Task Scheduler
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
# ASCII fallback
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
"""Replace invalid filename characters with underscore."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def make_abbrev(title: str) -> str:
if not title:
return ""
words = re.findall(r"[A-Za-zÁ-Žá-ž0-9]+", title)
abbr = ""
for w in words:
if w.isdigit():
abbr += w
else:
abbr += w[0]
return abbr.upper()
# ==============================
# 🧹 DELETE UNEXPECTED FILES
# ==============================
def clean_folder(folder: Path, valid_files: set):
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file():
if f.name.startswith(""):
continue
sanitized = sanitize_name(f.name)
if sanitized not in valid_files:
safe_print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
safe_print(f"⚠️ Could not delete {f}: {e}")
# ==============================
# 📦 DB CONNECTION
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
safe_print("🔍 Loading metadata from DB (FAST)…")
cur_meta.execute("""
SELECT d.id AS download_id,
d.request_id,
d.filename,
d.created_at,
p.updatedAt AS req_updated_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni,
p.displayTitle
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
ORDER BY p.updatedAt DESC
""")
rows = cur_meta.fetchall()
safe_print(f"📋 Found {len(rows)} attachment records.\n")
# ==============================
# 🧠 MAIN LOOP WITH PROGRESS
# ==============================
unique_request_ids = []
seen = set()
for r in rows:
req_id = r["request_id"]
if req_id not in seen:
unique_request_ids.append(req_id)
seen.add(req_id)
total_requests = len(unique_request_ids)
safe_print(f"🔄 Processing {total_requests} unique requests...\n")
processed_requests = set()
current_index = 0
for r in rows:
req_id = r["request_id"]
if req_id in processed_requests:
continue
processed_requests.add(req_id)
current_index += 1
percent = (current_index / total_requests) * 100
safe_print(f"\n[ {percent:5.1f}% ] Processing request {current_index} / {total_requests}{req_id}")
# ========== FETCH VALID FILENAMES ==========
cur_meta.execute(
"SELECT filename FROM medevio_downloads WHERE request_id=%s",
(req_id,)
)
valid_files = {sanitize_name(row["filename"]) for row in cur_meta.fetchall()}
# ========== BUILD FOLDER NAME ==========
updated_at = r["req_updated_at"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(r["prijmeni"] or "Unknown")
jmeno = sanitize_name(r["jmeno"] or "")
title = r.get("displayTitle") or ""
abbr = make_abbrev(title)
clean_folder_name = sanitize_name(
f"{date_str} {prijmeni}, {jmeno} [{abbr}] {req_id}"
)
# ========== DETECT EXISTING FOLDER ==========
existing_folder = None
for f in BASE_DIR.iterdir():
if f.is_dir() and req_id in f.name:
existing_folder = f
break
main_folder = existing_folder if existing_folder else BASE_DIR / clean_folder_name
# ========== MERGE DUPLICATES ==========
possible_dups = [
f for f in BASE_DIR.iterdir()
if f.is_dir() and req_id in f.name and f != main_folder
]
for dup in possible_dups:
safe_print(f"♻️ Merging duplicate folder: {dup.name}")
clean_folder(dup, valid_files)
main_folder.mkdir(parents=True, exist_ok=True)
for f in dup.iterdir():
if f.is_file():
target = main_folder / f.name
if not target.exists():
f.rename(target)
shutil.rmtree(dup, ignore_errors=True)
# ========== CLEAN MAIN FOLDER ==========
clean_folder(main_folder, valid_files)
# ========== DOWNLOAD MISSING FILES ==========
added_new_file = False
main_folder.mkdir(parents=True, exist_ok=True)
for filename in valid_files:
dest_plain = main_folder / filename
dest_marked = main_folder / ("" + filename)
if dest_plain.exists() or dest_marked.exists():
continue
added_new_file = True
cur_blob.execute(
"SELECT file_content FROM medevio_downloads "
"WHERE request_id=%s AND filename=%s",
(req_id, filename)
)
row = cur_blob.fetchone()
if not row:
continue
content = row[0]
if not content:
continue
with open(dest_plain, "wb") as f:
f.write(content)
safe_print(f"💾 Wrote: {dest_plain.relative_to(BASE_DIR)}")
# ========== REMOVE ▲ FLAG IF NEW FILES ADDED ==========
if added_new_file and "" in main_folder.name:
new_name = main_folder.name.replace("", "").strip()
new_path = main_folder.parent / new_name
if new_path != main_folder:
try:
main_folder.rename(new_path)
safe_print(f"🔄 Folder flag ▲ removed → {new_name}")
main_folder = new_path
except Exception as e:
safe_print(f"⚠️ Could not rename folder: {e}")
safe_print("\n🎯 Export complete.\n")
cur_blob.close()
cur_meta.close()
conn.close()
@@ -0,0 +1,224 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
from collections import defaultdict
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
# ==============================
# 🔧 HELPERS
# ==============================
def sanitize_name(name: str) -> str:
"""Replace invalid Windows filename characters."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def make_abbrev(title: str) -> str:
"""Create abbreviation from title."""
if not title:
return ""
words = re.findall(r"[A-Za-zÁ-Žá-ž0-9]+", title)
abbr = ""
for w in words:
if w.isdigit():
abbr += w
else:
abbr += w[0]
return abbr.upper()
def clean_folder(folder: Path, valid_files: set):
"""Remove unexpected files except ▲ files."""
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file():
if f.name.startswith(""):
continue
sanitized = sanitize_name(f.name)
if sanitized not in valid_files:
print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
print(f"⚠️ Could not delete {f}: {e}")
# ==============================
# 📦 DB CONNECTION
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
print("🔍 Loading only requests with NEW attachments…")
cur_meta.execute("""
SELECT
p.id AS request_id,
p.displayTitle,
p.pacient_jmeno,
p.pacient_prijmeni,
p.updatedAt,
p.attachmentsProcessed,
d.filename,
d.created_at
FROM pozadavky p
JOIN medevio_downloads d ON d.request_id = p.id
LEFT JOIN (
SELECT request_id, MAX(created_at) AS last_attachment_ts
FROM medevio_downloads
GROUP BY request_id
) x ON x.request_id = p.id
WHERE p.attachmentsProcessed IS NULL
OR p.attachmentsProcessed < x.last_attachment_ts
ORDER BY p.updatedAt DESC;
""")
rows = cur_meta.fetchall()
print(f"📋 Found {len(rows)} attachment rows belonging to requests needing processing.\n")
# ==============================
# 🧠 PREPARE REQUEST GROUPING
# ==============================
grouped = defaultdict(list)
for r in rows:
grouped[r["request_id"]].append(r)
unique_request_ids = list(grouped.keys())
total_requests = len(unique_request_ids)
print(f"🔄 Processing {total_requests} requests needing updates…\n")
# ==============================
# 🧠 MAIN LOOP
# ==============================
index = 0
for req_id in unique_request_ids:
index += 1
pct = (index / total_requests) * 100
print(f"\n[ {pct:5.1f}% ] Processing request {index}/{total_requests}{req_id}")
req_rows = grouped[req_id]
first = req_rows[0]
# Build folder name
updated_at = first["updatedAt"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(first["pacient_prijmeni"] or "Unknown")
jmeno = sanitize_name(first["pacient_jmeno"] or "")
abbr = make_abbrev(first["displayTitle"])
desired_folder_name = sanitize_name(f"{date_str} {prijmeni}, {jmeno} [{abbr}] {req_id}")
# Detect existing folder for request
main_folder = None
for f in BASE_DIR.iterdir():
if f.is_dir() and req_id in f.name:
main_folder = f
break
if not main_folder:
main_folder = BASE_DIR / desired_folder_name
main_folder.mkdir(parents=True, exist_ok=True)
# Build valid filename set
valid_files = {sanitize_name(r["filename"]) for r in req_rows}
# Clean unexpected non-▲ files
clean_folder(main_folder, valid_files)
# Track if ANY new files were downloaded
added_new_file = False
# DOWNLOAD MISSING FILES
for r in req_rows:
filename = sanitize_name(r["filename"])
dest_plain = main_folder / filename
dest_flag = main_folder / ("" + filename)
# Skip if file already exists (plain or ▲)
if dest_plain.exists() or dest_flag.exists():
continue
# Fetch content
cur_blob.execute("""
SELECT file_content
FROM medevio_downloads
WHERE request_id=%s AND filename=%s
""", (req_id, r["filename"]))
row = cur_blob.fetchone()
if not row or not row[0]:
continue
with open(dest_plain, "wb") as f:
f.write(row[0])
print(f"💾 Wrote: {dest_plain.relative_to(BASE_DIR)}")
added_new_file = True
# ------------------------------------
# 🟦 FOLDER ▲ LOGIC (IMPORTANT)
# ------------------------------------
if added_new_file:
# If folder contains ▲ in its name → remove it
if "" in main_folder.name:
new_name = main_folder.name.replace("", "").strip()
new_path = main_folder.parent / new_name
try:
main_folder.rename(new_path)
print(f"🔄 Folder flag ▲ removed → {new_name}")
main_folder = new_path
except Exception as e:
print(f"⚠️ Could not rename folder: {e}")
else:
# NO new files → NEVER rename folder
pass
# Mark request as processed
cur_meta.execute(
"UPDATE pozadavky SET attachmentsProcessed = NOW() WHERE id=%s",
(req_id,)
)
conn.commit()
# ==============================
# 🏁 DONE
# ==============================
print("\n🎯 Export complete.\n")
cur_blob.close()
cur_meta.close()
conn.close()
@@ -0,0 +1,193 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
"""Replace invalid filename characters with underscore."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def make_abbrev(title: str) -> str:
"""Create abbreviation from displayTitle."""
if not title:
return ""
words = re.findall(r"[A-Za-zÁ-Žá-ž0-9]+", title)
abbr = ""
for w in words:
abbr += w if w.isdigit() else w[0]
return abbr.upper()
# ==============================
# 🧹 DELETE UNEXPECTED FILES
# ==============================
def clean_folder(folder: Path, valid_files: set):
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file():
if f.name.startswith(""):
continue
sanitized = sanitize_name(f.name)
if sanitized not in valid_files:
print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
print(f"⚠️ Could not delete {f}: {e}")
# ==============================
# 📦 DB CONNECTION
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
print("🔍 Loading ALL metadata without file_content…")
# ⭐ Load ALL metadata once (NO BLOBs)
cur_meta.execute("""
SELECT
d.request_id,
d.filename,
d.created_at,
p.updatedAt AS req_updated_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni,
p.displayTitle
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
ORDER BY p.updatedAt DESC;
""")
rows = cur_meta.fetchall()
print(f"📋 Found {len(rows)} metadata rows.\n")
# ==============================
# 🧠 PRE-GROUP METADATA
# ==============================
# Build dictionary: request_id → all metadata rows for that request
grouped = {}
for row in rows:
grouped.setdefault(row["request_id"], []).append(row)
unique_request_ids = list(grouped.keys())
total_requests = len(unique_request_ids)
print(f"🔄 Processing {total_requests} unique requests…\n")
# ==============================
# 🧠 MAIN LOOP
# ==============================
for idx, req_id in enumerate(unique_request_ids, start=1):
pct = (idx / total_requests) * 100
req_rows = grouped[req_id]
first = req_rows[0]
print(f"\n[ {pct:5.1f}% ] Processing request {idx}/{total_requests}{req_id}")
# ======================
# Build folder name
# ======================
updated_at = first["req_updated_at"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(first["prijmeni"] or "Unknown")
jmeno = sanitize_name(first["jmeno"] or "")
abbr = make_abbrev(first["displayTitle"] or "")
clean_folder_name = sanitize_name(f"{date_str} {prijmeni}, {jmeno} [{abbr}] {req_id}")
# Detect existing folder
existing_folder = None
for f in BASE_DIR.iterdir():
if f.is_dir() and req_id in f.name:
existing_folder = f
break
main_folder = existing_folder if existing_folder else BASE_DIR / clean_folder_name
main_folder.mkdir(parents=True, exist_ok=True)
# ======================
# Valid files for this request
# ======================
valid_files = {sanitize_name(r["filename"]) for r in req_rows}
# Clean unexpected files
clean_folder(main_folder, valid_files)
# ======================
# DOWNLOAD MISSING FILES → only now load BLOBs
# ======================
added_new_file = False
for r in req_rows:
filename = sanitize_name(r["filename"])
dest_plain = main_folder / filename
dest_marked = main_folder / ("" + filename)
if dest_plain.exists() or dest_marked.exists():
continue
added_new_file = True
# ⭐ Load BLOB only when needed
cur_blob.execute("""
SELECT file_content
FROM medevio_downloads
WHERE request_id=%s AND filename=%s
""", (req_id, r["filename"]))
row = cur_blob.fetchone()
if not row or not row[0]:
continue
with open(dest_plain, "wb") as f:
f.write(row[0])
print(f"💾 Wrote: {dest_plain.relative_to(BASE_DIR)}")
# ======================
# Folder-level ▲ logic
# ======================
if added_new_file and "" in main_folder.name:
new_name = main_folder.name.replace("", "").strip()
new_path = main_folder.parent / new_name
try:
main_folder.rename(new_path)
main_folder = new_path
print(f"🔄 Folder flag ▲ removed → {new_name}")
except Exception as e:
print(f"⚠️ Could not rename folder: {e}")
cur_blob.close()
cur_meta.close()
conn.close()
print("\n🎯 Export complete.\n")
@@ -0,0 +1,146 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def clean_folder(folder: Path, valid_files: set):
"""Remove files that do NOT exist in MySQL for this request."""
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file() and sanitize_name(f.name) not in valid_files:
print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
print(f"⚠️ Cannot delete {f}: {e}")
# ==============================
# 📥 LOAD EVERYTHING IN ONE QUERY
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur = conn.cursor(pymysql.cursors.DictCursor)
print("📥 Loading ALL metadata + BLOBs with ONE MySQL query…")
cur.execute("""
SELECT
d.id AS download_id,
d.request_id,
d.filename,
d.file_content,
p.updatedAt AS req_updated_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
ORDER BY p.updatedAt DESC, d.created_at ASC
""")
rows = cur.fetchall()
print(f"📦 Loaded {len(rows)} total file rows.\n")
conn.close()
# ==============================
# 🔄 ORGANIZE ROWS PER REQUEST
# ==============================
requests = {} # req_id → list of file dicts
for r in rows:
req_id = r["request_id"]
if req_id not in requests:
requests[req_id] = []
requests[req_id].append(r)
print(f"📌 Unique requests: {len(requests)}\n")
# ==============================
# 🧠 MAIN LOOP SAME LOGIC AS BEFORE
# ==============================
for req_id, filelist in requests.items():
# ========== GET UPDATEDAT (same logic) ==========
any_row = filelist[0]
updated_at = any_row["req_updated_at"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(any_row["prijmeni"] or "Unknown")
jmeno = sanitize_name(any_row["jmeno"] or "")
folder_name = sanitize_name(f"{date_str} {prijmeni}, {jmeno} {req_id}")
main_folder = BASE_DIR / folder_name
# ========== VALID FILES ==========
valid_files = {sanitize_name(r["filename"]) for r in filelist}
# ========== FIND OLD FOLDERS ==========
possible_dups = [
f for f in BASE_DIR.iterdir()
if f.is_dir() and req_id in f.name and f != main_folder
]
# ========== MERGE OLD FOLDERS ==========
for dup in possible_dups:
print(f"♻️ Merging folder: {dup.name}")
clean_folder(dup, valid_files)
main_folder.mkdir(parents=True, exist_ok=True)
for f in dup.iterdir():
if f.is_file():
target = main_folder / f.name
if not target.exists():
f.rename(target)
shutil.rmtree(dup, ignore_errors=True)
# ========== CLEAN MAIN FOLDER ==========
main_folder.mkdir(parents=True, exist_ok=True)
clean_folder(main_folder, valid_files)
# ========== SAVE FILES (fast now) ==========
for r in filelist:
filename = sanitize_name(r["filename"])
dest = main_folder / filename
if dest.exists():
continue
content = r["file_content"]
if not content:
continue
with open(dest, "wb") as f:
f.write(content)
print(f"💾 Saved: {dest.relative_to(BASE_DIR)}")
print("\n🎯 Export complete.\n")
@@ -0,0 +1,108 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import importlib.util
import sys
from pathlib import Path
# Load FunctionsLoader
FUNCTIONS_LOADER_PATH = Path(r"C:\Reporting\Functions\FunctionsLoader.py")
spec = importlib.util.spec_from_file_location("FunctionsLoader", FUNCTIONS_LOADER_PATH)
FunctionsLoader = importlib.util.module_from_spec(spec)
sys.modules["FunctionsLoader"] = FunctionsLoader
spec.loader.exec_module(FunctionsLoader)
"""
Spustí všechny PRAVIDELNÉ skripty v daném pořadí:
0) PRAVIDELNE_0_READ_ALL_ACTIVE_POZADAVKY.py
1) PRAVIDELNE_1_ReadLast300DonePozadavku.py
2) PRAVIDELNE_2_ReadPoznamky.py
3) PRAVIDELNE_3_StahniKomunikaci.py
4) PRAVIDELNE_4_StahniPrilohyUlozDoMySQL.py
5) PRAVIDELNE_5_SaveToFileSystem incremental.py
"""
import time, socket
for _ in range(30):
try:
socket.create_connection(("192.168.1.76", 3307), timeout=3).close()
break
except OSError:
time.sleep(10)
import sys
import subprocess
from pathlib import Path
from datetime import datetime
# složka, kde leží tento skript i všechny PRAVIDELNE_*.py
BASE_DIR = Path(__file__).resolve().parent
SCRIPTS_IN_ORDER = [
"PRAVIDELNE_0_READ_ALL_ACTIVE_POZADAVKY.py",
"PRAVIDELNE_1_ReadLast300DonePozadavku.py",
"PRAVIDELNE_2_ReadPoznamky.py",
"PRAVIDELNE_3_StahniKomunikaci.py",
"PRAVIDELNE_4_StahniPrilohyUlozDoMySQL.py",
"PRAVIDELNE_5_SaveToFileSystem incremental.py", # má mezeru v názvu, ale v listu je to OK
]
LOG_FILE = BASE_DIR / "PRAVIDELNE_log.txt"
def log(msg: str):
"""Zapíše zprávu do log souboru i na konzoli."""
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
line = f"[{ts}] {msg}"
print(line)
try:
with LOG_FILE.open("a", encoding="utf-8") as f:
f.write(line + "\n")
except Exception:
# log nesmí shodit běh
pass
def main():
log("=== START pravidelného běhu ===")
for script_name in SCRIPTS_IN_ORDER:
script_path = BASE_DIR / script_name
if not script_path.exists():
log(f"❌ Skript nenalezen: {script_path}")
continue
log(f"▶ Spouštím: {script_path.name}")
# spustíme stejným interpretem, kterým běží tento orchestr
try:
result = subprocess.run(
[sys.executable, str(script_path)],
cwd=str(BASE_DIR),
capture_output=True,
text=True,
encoding="utf-8",
errors="ignore", # NEZKAZÍ SE, NEZBOŘÍ SE, PROSTĚ IGNORUJE CP1252 NEZÁKONNÉ BYTES
)
except Exception as e:
log(f" 💥 Chyba při spouštění {script_path.name}: {e}")
continue
# vypíšeme návratový kód
log(f" ↳ return code: {result.returncode}")
# pokud něco skript vypsal na stderr, logneme
if result.stderr:
log(f" ⚠ stderr {script_path.name}:\n{result.stderr.strip()}")
# stdout můžeš podle chuti také logovat (někdy je toho moc):
# if result.stdout:
# log(f" stdout {script_path.name}:\n{result.stdout.strip()}")
log("=== KONEC pravidelného běhu ===\n")
if __name__ == "__main__":
main()
@@ -0,0 +1,29 @@
[2025-12-01 06:37:41] === START pravidelného běhu ===
[2025-12-01 06:37:42] ▶ Spouštím: PRAVIDELNE_0_READ_ALL_ACTIVE_POZADAVKY.py
[2025-12-01 06:37:44] ↳ PRAVIDELNE_0_READ_ALL_ACTIVE_POZADAVKY.py return code: 0
[2025-12-01 06:37:44] ▶ Spouštím: PRAVIDELNE_1_ReadLast300DonePozadavku.py
[2025-12-01 06:37:48] ↳ PRAVIDELNE_1_ReadLast300DonePozadavku.py return code: 0
[2025-12-01 06:37:48] ▶ Spouštím: PRAVIDELNE_2_ReadPoznamky.py
[2025-12-01 06:37:49] ↳ PRAVIDELNE_2_ReadPoznamky.py return code: 0
[2025-12-01 06:37:50] ▶ Spouštím: PRAVIDELNE_3_StahniKomunikaci.py
[2025-12-01 06:37:51] ↳ PRAVIDELNE_3_StahniKomunikaci.py return code: 0
[2025-12-01 06:37:52] ▶ Spouštím: PRAVIDELNE_4_StahniPrilohyUlozDoMySQL.py
[2025-12-01 06:37:53] ↳ PRAVIDELNE_4_StahniPrilohyUlozDoMySQL.py return code: 0
[2025-12-01 06:37:53] ▶ Spouštím: PRAVIDELNE_5_SaveToFileSystem incremental.py
[2025-12-01 06:38:42] ↳ PRAVIDELNE_5_SaveToFileSystem incremental.py return code: 0
[2025-12-01 06:38:43] === KONEC pravidelného běhu ===
[2025-12-02 07:04:34] === START pravidelného běhu ===
[2025-12-02 07:04:34] ▶ Spouštím: PRAVIDELNE_0_READ_ALL_ACTIVE_POZADAVKY.py
[2025-12-02 07:04:35] ↳ return code: 0
[2025-12-02 07:04:35] ▶ Spouštím: PRAVIDELNE_1_ReadLast300DonePozadavku.py
[2025-12-02 07:04:39] ↳ return code: 0
[2025-12-02 07:04:39] ▶ Spouštím: PRAVIDELNE_2_ReadPoznamky.py
[2025-12-02 07:04:40] ↳ return code: 0
[2025-12-02 07:04:40] ▶ Spouštím: PRAVIDELNE_3_StahniKomunikaci.py
[2025-12-02 07:04:40] ↳ return code: 0
[2025-12-02 07:04:40] ▶ Spouštím: PRAVIDELNE_4_StahniPrilohyUlozDoMySQL.py
[2025-12-02 07:04:40] ↳ return code: 0
[2025-12-02 07:04:40] ▶ Spouštím: PRAVIDELNE_5_SaveToFileSystem incremental.py
[2025-12-02 07:05:28] ↳ return code: 0
[2025-12-02 07:05:28] === KONEC pravidelného běhu ===
+136
View File
@@ -0,0 +1,136 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Orchestrator for all PRAVIDELNE scripts in exact order.
"""
import time, socket
for _ in range(30):
try:
socket.create_connection(("192.168.1.76", 3307), timeout=3).close()
break
except OSError:
time.sleep(10)
import sys
import subprocess
from pathlib import Path
from datetime import datetime
# =====================================================================
# Import EXACT Functions.py from: C:\Reporting\Fio\Functions.py
# This bypasses all other Functions.py files in the system.
# =====================================================================
import importlib.util
FUNCTIONS_FILE = Path(r"C:\Reporting\Fio\Functions.py")
spec = importlib.util.spec_from_file_location("Functions_FIO", FUNCTIONS_FILE)
Functions_FIO = importlib.util.module_from_spec(spec)
sys.modules["Functions_FIO"] = Functions_FIO
spec.loader.exec_module(Functions_FIO)
# correct WhatsApp function
SendWhatsAppMessage = Functions_FIO.SendWhatsAppMessage
# =====================================================================
# General Orchestrator Settings
# =====================================================================
# folder where orchestrator + sub-scripts live
BASE_DIR = Path(__file__).resolve().parent
SCRIPTS_IN_ORDER = [
"PRAVIDELNE_0_READ_ALL_ACTIVE_POZADAVKY.py",
"PRAVIDELNE_1_ReadLast300DonePozadavku.py",
"PRAVIDELNE_2_ReadPoznamky.py",
"PRAVIDELNE_3_StahniKomunikaci.py",
"PRAVIDELNE_4_StahniPrilohyUlozDoMySQL.py",
"PRAVIDELNE_5_SaveToFileSystem incremental.py",
]
LOG_FILE = BASE_DIR / "PRAVIDELNE_log.txt"
# =====================================================================
# Logging + WhatsApp wrappers
# =====================================================================
def log(msg: str):
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
line = f"[{ts}] {msg}"
print(line)
try:
with LOG_FILE.open("a", encoding="utf-8") as f:
f.write(line + "\n")
except:
pass
def whatsapp_notify(text: str):
"""WhatsApp message wrapper — never allowed to crash orchestrator"""
try:
SendWhatsAppMessage(text)
except:
pass
# =====================================================================
# Main orchestrator
# =====================================================================
def main():
log("=== START pravidelného běhu ===")
whatsapp_notify("🏁 *PRAVIDELNÉ skripty: START*")
for script_name in SCRIPTS_IN_ORDER:
script_path = BASE_DIR / script_name
if not script_path.exists():
err = f"❌ Skript nenalezen: {script_path}"
log(err)
whatsapp_notify(err)
continue
log(f"▶ Spouštím: {script_path.name}")
whatsapp_notify(f"▶ *Spouštím:* {script_path.name}")
try:
result = subprocess.run(
[sys.executable, str(script_path)],
cwd=str(BASE_DIR),
capture_output=True,
text=True,
encoding="utf-8",
errors="ignore",
)
except Exception as e:
err = f"💥 Chyba při spouštění {script_path.name}: {e}"
log(err)
whatsapp_notify(err)
continue
# return code
rc_msg = f"{script_path.name} return code: {result.returncode}"
log(rc_msg)
whatsapp_notify(rc_msg)
# stderr (warnings/errors)
if result.stderr:
err_msg = f"⚠ stderr v {script_path.name}:\n{result.stderr.strip()}"
log(err_msg)
whatsapp_notify(err_msg)
log("=== KONEC pravidelného běhu ===")
whatsapp_notify("✅ *PRAVIDELNÉ skripty: KONEC*\n")
# =====================================================================
# Entry point
# =====================================================================
if __name__ == "__main__":
main()
@@ -0,0 +1,196 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download all 'Odeslat lékařskou zprávu' attachments from Medevio API
and store them (including binary content) directly into MySQL table `medevio_downloads`.
Each attachment (PDF, image, etc.) is fetched once and saved as LONGBLOB.
Duplicate protection is ensured via UNIQUE KEY on `attachment_id`.
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2($requestId: UUID!) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
# ==============================
# 🧮 HELPERS
# ==============================
def short_crc8(uuid_str: str) -> str:
"""Return deterministic 8-char hex string from any input string (CRC32)."""
return f"{zlib.crc32(uuid_str.encode('utf-8')) & 0xffffffff:08x}"
def extract_filename_from_url(url: str) -> str:
"""Extracts filename from S3-style URL (between last '/' and first '?')."""
try:
return url.split("/")[-1].split("?")[0]
except Exception:
return "unknown_filename"
def read_token(p: Path) -> str:
"""Read Bearer token from file."""
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
tok = tok.split(" ", 1)[1]
return tok
# ==============================
# 📡 FETCH ATTACHMENTS
# ==============================
def fetch_attachments(headers, request_id):
variables = {"requestId": request_id}
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
print(f"❌ HTTP {r.status_code} for request {request_id}")
return []
data = r.json().get("data", {}).get("patientRequestMedicalRecords", [])
return data
# ==============================
# 💾 SAVE TO MYSQL
# ==============================
def insert_download(cur, req_id, a, m, jmeno, prijmeni, created_date):
url = m.get("downloadUrl")
if not url:
print(" ⚠️ No download URL")
return
try:
r = requests.get(url, timeout=45)
r.raise_for_status()
content = r.content
except Exception as e:
print(f" ⚠️ Failed to download {url}: {e}")
return
file_size = len(content)
filename = extract_filename_from_url(url)
attachment_id = a.get("id")
attachment_type = a.get("attachmentType")
content_type = m.get("contentType")
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type, filename,
content_type, file_size, pacient_jmeno, pacient_prijmeni,
created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
attachment_type,
filename,
content_type,
file_size,
jmeno,
prijmeni,
created_date,
content
))
print(f" 💾 Saved {filename} ({file_size/1024:.1f} kB)")
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
with conn.cursor() as cur:
cur.execute("""
SELECT id, displayTitle, pacient_prijmeni, pacient_jmeno, createdAt
FROM pozadavky
WHERE displayTitle = 'Odeslat lékařskou zprávu'
""")
rows = cur.fetchall()
print(f"📋 Found {len(rows)} 'Odeslat lékařskou zprávu' requests")
for i, row in enumerate(rows, 1):
req_id = row["id"]
prijmeni = row.get("pacient_prijmeni") or "Neznamy"
jmeno = row.get("pacient_jmeno") or ""
created = row.get("createdAt")
try:
created_date = datetime.strptime(str(created), "%Y-%m-%d %H:%M:%S")
except Exception:
created_date = None
print(f"\n[{i}/{len(rows)}] 🧾 {prijmeni}, {jmeno} ({req_id})")
attachments = fetch_attachments(headers, req_id)
if not attachments:
print(" ⚠️ No attachments")
continue
with conn.cursor() as cur:
for a in attachments:
m = a.get("medicalRecord") or {}
insert_download(cur, req_id, a, m, jmeno, prijmeni, created_date)
conn.commit()
print(f"{len(attachments)} attachments saved for {prijmeni}, {jmeno}")
time.sleep(0.5) # be nice to the API
conn.close()
print("\n✅ Done! All attachments stored in MySQL table `medevio_downloads`.")
# ==============================
if __name__ == "__main__":
main()
@@ -0,0 +1,239 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from pathlib import Path
from datetime import datetime
from dateutil import parser
import time
import sys
# ================================
# UTF-8 SAFE OUTPUT (Windows friendly)
# ================================
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ================================
# 🔧 CONFIG
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 500
STATES = ["ACTIVE", "DONE"] # explicitně jinak API vrací jen ACTIVE
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestList2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
lastMessage {
createdAt
}
}
}
}
"""
# ================================
# TOKEN
# ================================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
return tok.split(" ", 1)[1]
return tok
# ================================
# DATETIME PARSER
# ================================
def to_mysql_dt(iso_str):
if not iso_str:
return None
try:
dt = parser.isoparse(iso_str)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=datetime.now().astimezone().tzinfo)
return dt.astimezone().strftime("%Y-%m-%d %H:%M:%S")
except Exception:
return None
# ================================
# UPSERT
# ================================
def upsert(conn, r):
p = r.get("extendedPatient") or {}
api_updated = to_mysql_dt(r.get("updatedAt"))
msg_updated = to_mysql_dt((r.get("lastMessage") or {}).get("createdAt"))
final_updated = max(filter(None, [api_updated, msg_updated]), default=None)
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r.get("id"),
r.get("displayTitle"),
to_mysql_dt(r.get("createdAt")),
final_updated,
to_mysql_dt(r.get("doneAt")),
to_mysql_dt(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# FETCH PAGE (per state)
# ================================
def fetch_state(headers, state, offset):
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"state": state,
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
}
payload = {
"operationName": "ClinicRequestList2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json()["data"]["requestsResponse"]
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
safe_print(f"\n=== FULL Medevio READ-ALL sync @ {datetime.now():%Y-%m-%d %H:%M:%S} ===")
grand_total = 0
for state in STATES:
safe_print(f"\n🔁 STATE = {state}")
offset = 0
total = None
processed = 0
while True:
batch, count = fetch_state(headers, state, offset)
if total is None:
total = count
safe_print(f"📡 {state}: celkem {total}")
if not batch:
break
for r in batch:
upsert(conn, r)
processed += len(batch)
safe_print(f"{processed}/{total}")
offset += BATCH_SIZE
if offset >= count:
break
time.sleep(0.4)
grand_total += processed
conn.close()
safe_print(f"\n✅ HOTOVO celkem zpracováno {grand_total} požadavků\n")
# ================================
if __name__ == "__main__":
main()
@@ -0,0 +1,279 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetches messages from Medevio API.
Modes:
- Incremental (default): Only requests where messagesProcessed IS NULL or < updatedAt
- Full resync (--full): Fetches ALL messages for ALL pozadavky
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import argparse
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY_MESSAGES = r"""
query UseMessages_ListMessages($requestId: String!, $updatedSince: DateTime) {
messages: listMessages(patientRequestId: $requestId, updatedSince: $updatedSince) {
id
createdAt
updatedAt
readAt
text
type
sender {
id
name
surname
clinicId
}
medicalRecord {
id
description
contentType
url
downloadUrl
token
createdAt
updatedAt
}
}
}
"""
# ==============================
# ⏱ DATETIME PARSER
# ==============================
def parse_dt(s):
if not s:
return None
try:
return datetime.fromisoformat(s.replace("Z", "+00:00"))
except:
pass
try:
return datetime.strptime(s[:19], "%Y-%m-%dT%H:%M:%S")
except:
return None
# ==============================
# 🔐 TOKEN
# ==============================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
return tok.replace("Bearer ", "")
# ==============================
# 📡 FETCH MESSAGES
# ==============================
def fetch_messages(headers, request_id):
payload = {
"operationName": "UseMessages_ListMessages",
"query": GRAPHQL_QUERY_MESSAGES,
"variables": {"requestId": request_id, "updatedSince": None},
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
print("❌ HTTP", r.status_code, "for request", request_id)
return []
return r.json().get("data", {}).get("messages", []) or []
# ==============================
# 💾 SAVE MESSAGE
# ==============================
def insert_message(cur, req_id, msg):
sender = msg.get("sender") or {}
sender_name = " ".join(
x for x in [sender.get("name"), sender.get("surname")] if x
) or None
sql = """
INSERT INTO medevio_conversation (
id, request_id,
sender_name, sender_id, sender_clinic_id,
text, created_at, read_at, updated_at,
attachment_url, attachment_description, attachment_content_type
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
sender_name = VALUES(sender_name),
sender_id = VALUES(sender_id),
sender_clinic_id = VALUES(sender_clinic_id),
text = VALUES(text),
created_at = VALUES(created_at),
read_at = VALUES(read_at),
updated_at = VALUES(updated_at),
attachment_url = VALUES(attachment_url),
attachment_description = VALUES(attachment_description),
attachment_content_type = VALUES(attachment_content_type)
"""
mr = msg.get("medicalRecord") or {}
cur.execute(sql, (
msg.get("id"),
req_id,
sender_name,
sender.get("id"),
sender.get("clinicId"),
msg.get("text"),
parse_dt(msg.get("createdAt")),
parse_dt(msg.get("readAt")),
parse_dt(msg.get("updatedAt")),
mr.get("downloadUrl") or mr.get("url"),
mr.get("description"),
mr.get("contentType")
))
# ==============================
# 💾 DOWNLOAD MESSAGE ATTACHMENT
# ==============================
def insert_download(cur, req_id, msg, existing_ids):
mr = msg.get("medicalRecord") or {}
attachment_id = mr.get("id")
if not attachment_id:
return
if attachment_id in existing_ids:
return # skip duplicates
url = mr.get("downloadUrl") or mr.get("url")
if not url:
return
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
data = r.content
except Exception as e:
print("⚠️ Failed to download:", e)
return
filename = url.split("/")[-1].split("?")[0]
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size, created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
"MESSAGE_ATTACHMENT",
filename,
mr.get("contentType"),
len(data),
parse_dt(msg.get("createdAt")),
data
))
existing_ids.add(attachment_id)
# ==============================
# 🧠 MAIN
# ==============================
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--full", action="store_true", help="Load messages for ALL pozadavky")
# Force full mode ON
args = parser.parse_args(args=["--full"])
# args = parser.parse_args()
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# ---- Load existing attachments
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
print(f"📦 Already downloaded attachments: {len(existing_ids)}\n")
# ---- Select pozadavky to process
with conn.cursor() as cur:
if args.full:
print("🔁 FULL REFRESH MODE: Fetching messages for ALL pozadavky!\n")
cur.execute("SELECT id FROM pozadavky")
else:
print("📥 Incremental mode: Only syncing updated pozadavky.\n")
cur.execute("""
SELECT id FROM pozadavky
WHERE messagesProcessed IS NULL
OR messagesProcessed < updatedAt
""")
requests_to_process = cur.fetchall()
# =================================
# ⏩ SKIP FIRST 3100 AS YESTERDAY
# =================================
SKIP = 3100
if len(requests_to_process) > SKIP:
print(f"⏩ Skipping first {SKIP} pozadavky (already processed yesterday).")
requests_to_process = requests_to_process[SKIP:]
else:
print("⚠️ Not enough pozadavky to skip!")
print(f"📋 Requests to process: {len(requests_to_process)}\n")
# ---- Process each request
for idx, row in enumerate(requests_to_process, 1):
req_id = row["id"]
print(f"[{idx}/{len(requests_to_process)}] Processing {req_id}")
messages = fetch_messages(headers, req_id)
with conn.cursor() as cur:
for msg in messages:
insert_message(cur, req_id, msg)
insert_download(cur, req_id, msg, existing_ids)
conn.commit()
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET messagesProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
print(f"{len(messages)} messages saved\n")
time.sleep(0.25)
conn.close()
print("🎉 Done!")
if __name__ == "__main__":
main()
@@ -0,0 +1 @@
{"cookies": [{"name": "gateway-access-token", "value": "YwBgkf8McREDKs7vCZj0EZD2fJsuV8RyDPtYx7WiDoz0nFJ9kxId8kcNEPBLFSwM+Tiz80+SOdFwo+oj", "domain": "my.medevio.cz", "path": "/", "expires": 1763372319, "httpOnly": false, "secure": false, "sameSite": "Lax"}, {"name": "aws-waf-token", "value": "b6a1d4eb-4350-40e5-8e52-1f5f9600fbb8:CgoAr9pC8c6zAAAA:OYwXLY5OyitSQPl5v2oIlS+hIxsrb5LxV4VjCyE2gJCFFE5PQu+0Zbxse2ZIofrNv5QKs0TYUDTmxPhZyTr9Qtjnq2gsVQxWHXzrbebv3Z7RbzB63u6Ymn3Fo8IbDev3CfCNcNuxCKltFEXLqSCjI2vqNY+7HZkgQBIqy2wMgzli3aSLq0w8lWYtZzyyot7q8RPXWMGTfaBUo2reY0SOSffm9rAivE9PszNfPid71CvNrGAAoxRbwb25eVujlyIcDVWe5vZ9Iw==", "domain": ".my.medevio.cz", "path": "/", "expires": 1761125920, "httpOnly": false, "secure": true, "sameSite": "Lax"}], "origins": [{"origin": "https://my.medevio.cz", "localStorage": [{"name": "awswaf_token_refresh_timestamp", "value": "1760780309860"}, {"name": "awswaf_session_storage", "value": "b6a1d4eb-4350-40e5-8e52-1f5f9600fbb8:CgoAr9pC8c+zAAAA:+vw//1NzmePjPpbGCJzUB+orCRivtJd098DbDX4AnABiGRw/+ql6ShqvFY4YdCY7w2tegb5mEPBdAmc4sNi22kNR9BuEoAgCUiMhkU1AZWfzM51zPfTh7SveCrREZ7xdvxcqKPMmfVLRYX5E4+UWh22z/LKQ7+d9VERp3J+wWCUW3dFFirkezy3N7b2FVjTlY/RxsZwhejQziTG/L3CkIFFP3mOReNgBvDpj7aKoM1knY4IL4TZ8E7zNv3nTsvzACLYvnUutVOUcofN1TfOzwZshSKsEXsMzrQn8PzLccX1jM5VSzce7gfEzl0zSPsT8NB3Sna+rhMIttDNYgvbW1HsfG2LIeKMR27Zf8hkslDRVVkcU/Kp2jLOEdhhrBKGjKY2o9/uX3NExdzh5MEKQSSRtmue01BpWYILPH23rMsz4YSmF+Ough5OeQoC95rkcYwVXMhwvUN9Zfp9UZ4xCNfFUex5dOrg9aJntYRnaceeocGUttNI5AdT0i3+osV6XHXzKxeqO8zLCS9BIsCzxaHfdqqem5DorMceuGKz+QqksatIQAA=="}, {"name": "Application.Intl.locale", "value": "cs"}, {"name": "Password.prefill", "value": "{\"username\":\"vladimir.buzalka@buzalka.cz\",\"type\":\"email\"}"}]}]}
+214
View File
@@ -0,0 +1,214 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from pathlib import Path
from datetime import datetime, timezone
import time
from dateutil import parser
import sys
# Force UTF-8 output
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ================================
# 🔧 CONFIGURATION
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 100
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ⭐ NOVÝ TESTOVANÝ DOTAZ obsahuje lastMessage.createdAt
GRAPHQL_QUERY = r"""
query ClinicRequestList2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
lastMessage {
createdAt
}
}
}
}
"""
# ================================
# 🧿 SAFE DATETIME PARSER (ALWAYS UTC → LOCAL)
# ================================
def to_mysql_dt_utc(iso_str):
if not iso_str:
return None
try:
dt = parser.isoparse(iso_str)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
dt_local = dt.astimezone()
return dt_local.strftime("%Y-%m-%d %H:%M:%S")
except:
return None
# ================================
# 🔑 TOKEN
# ================================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
return tok.split(" ", 1)[1]
return tok
# ================================
# 💾 UPSERT
# ================================
def upsert(conn, r):
p = r.get("extendedPatient") or {}
api_updated = to_mysql_dt_utc(r.get("updatedAt"))
last_msg = r.get("lastMessage") or {}
msg_updated = to_mysql_dt_utc(last_msg.get("createdAt"))
def max_dt(a, b):
if a and b:
return max(a, b)
return a or b
final_updated = max_dt(api_updated, msg_updated)
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r.get("id"),
r.get("displayTitle"),
to_mysql_dt_utc(r.get("createdAt")),
final_updated,
to_mysql_dt_utc(r.get("doneAt")),
to_mysql_dt_utc(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# 📡 FETCH ACTIVE PAGE
# ================================
def fetch_active(headers, offset):
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
"state": "ACTIVE",
}
payload = {
"operationName": "ClinicRequestList2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json().get("data", {}).get("requestsResponse", {})
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# 🧠 MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
print(f"\n=== Sync ACTIVE požadavků @ {datetime.now():%Y-%m-%d %H:%M:%S} ===")
offset = 0
total_processed = 0
total_count = None
while True:
batch, count = fetch_active(headers, offset)
if total_count is None:
total_count = count
print(f"📡 Celkem ACTIVE v Medevio: {count}")
if not batch:
break
for r in batch:
upsert(conn, r)
total_processed += len(batch)
print(f"{total_processed}/{total_count} ACTIVE processed")
if offset + BATCH_SIZE >= count:
break
offset += BATCH_SIZE
time.sleep(0.4)
conn.close()
print("\n✅ ACTIVE sync hotovo!\n")
if __name__ == "__main__":
main()
+239
View File
@@ -0,0 +1,239 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pymysql
import requests
from pathlib import Path
from datetime import datetime
from dateutil import parser
import time
import sys
# ================================
# UTF-8 SAFE OUTPUT (Windows friendly)
# ================================
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ================================
# 🔧 CONFIG
# ================================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
BATCH_SIZE = 500
STATES = ["ACTIVE", "DONE"] # explicitně jinak API vrací jen ACTIVE
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = r"""
query ClinicRequestList2(
$clinicSlug: String!,
$queueId: String,
$queueAssignment: QueueAssignmentFilter!,
$state: PatientRequestState,
$pageInfo: PageInfo!,
$locale: Locale!
) {
requestsResponse: listPatientRequestsForClinic2(
clinicSlug: $clinicSlug,
queueId: $queueId,
queueAssignment: $queueAssignment,
state: $state,
pageInfo: $pageInfo
) {
count
patientRequests {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
extendedPatient {
name
surname
identificationNumber
}
lastMessage {
createdAt
}
}
}
}
"""
# ================================
# TOKEN
# ================================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
return tok.split(" ", 1)[1]
return tok
# ================================
# DATETIME PARSER
# ================================
def to_mysql_dt(iso_str):
if not iso_str:
return None
try:
dt = parser.isoparse(iso_str)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=datetime.now().astimezone().tzinfo)
return dt.astimezone().strftime("%Y-%m-%d %H:%M:%S")
except Exception:
return None
# ================================
# UPSERT
# ================================
def upsert(conn, r):
p = r.get("extendedPatient") or {}
api_updated = to_mysql_dt(r.get("updatedAt"))
msg_updated = to_mysql_dt((r.get("lastMessage") or {}).get("createdAt"))
final_updated = max(filter(None, [api_updated, msg_updated]), default=None)
sql = """
INSERT INTO pozadavky (
id, displayTitle, createdAt, updatedAt, doneAt, removedAt,
pacient_jmeno, pacient_prijmeni, pacient_rodnecislo
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
displayTitle=VALUES(displayTitle),
updatedAt=VALUES(updatedAt),
doneAt=VALUES(doneAt),
removedAt=VALUES(removedAt),
pacient_jmeno=VALUES(pacient_jmeno),
pacient_prijmeni=VALUES(pacient_prijmeni),
pacient_rodnecislo=VALUES(pacient_rodnecislo)
"""
vals = (
r.get("id"),
r.get("displayTitle"),
to_mysql_dt(r.get("createdAt")),
final_updated,
to_mysql_dt(r.get("doneAt")),
to_mysql_dt(r.get("removedAt")),
p.get("name"),
p.get("surname"),
p.get("identificationNumber"),
)
with conn.cursor() as cur:
cur.execute(sql, vals)
conn.commit()
# ================================
# FETCH PAGE (per state)
# ================================
def fetch_state(headers, state, offset):
variables = {
"clinicSlug": CLINIC_SLUG,
"queueId": None,
"queueAssignment": "ANY",
"state": state,
"pageInfo": {"first": BATCH_SIZE, "offset": offset},
"locale": "cs",
}
payload = {
"operationName": "ClinicRequestList2",
"query": GRAPHQL_QUERY,
"variables": variables,
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers)
r.raise_for_status()
data = r.json()["data"]["requestsResponse"]
return data.get("patientRequests", []), data.get("count", 0)
# ================================
# MAIN
# ================================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
safe_print(f"\n=== FULL Medevio READ-ALL sync @ {datetime.now():%Y-%m-%d %H:%M:%S} ===")
grand_total = 0
for state in STATES:
safe_print(f"\n🔁 STATE = {state}")
offset = 0
total = None
processed = 0
while True:
batch, count = fetch_state(headers, state, offset)
if total is None:
total = count
safe_print(f"📡 {state}: celkem {total}")
if not batch:
break
for r in batch:
upsert(conn, r)
processed += len(batch)
safe_print(f"{processed}/{total}")
offset += BATCH_SIZE
if offset >= count:
break
time.sleep(0.4)
grand_total += processed
conn.close()
safe_print(f"\n✅ HOTOVO celkem zpracováno {grand_total} požadavků\n")
# ================================
if __name__ == "__main__":
main()
+217
View File
@@ -0,0 +1,217 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download and store Medevio questionnaires (userNote + eCRF) for all patient requests.
Uses the verified working query "GetPatientRequest2".
"""
import json
import requests
import pymysql
from datetime import datetime
from pathlib import Path
import time
import sys
# Force UTF-8 output even under Windows Task Scheduler
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ==============================
# 🛡 SAFE PRINT FOR CP1250 / EMOJI
# ==============================
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# 🔧 CONFIGURATION (UPDATED TO 192.168.1.50)
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
# ==============================
# 🕒 DATETIME FIXER
# ==============================
def fix_datetime(dt_str):
if not dt_str:
return None
try:
return datetime.fromisoformat(dt_str.replace("Z", "").replace("+00:00", ""))
except Exception:
return None
# Optional filter
CREATED_AFTER = "2025-01-01"
# ==============================
# 🧮 HELPERS
# ==============================
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
if tok.startswith("Bearer "):
return tok.split(" ", 1)[1]
return tok
GRAPHQL_QUERY = r"""
query GetPatientRequest2($requestId: UUID!, $clinicSlug: String!, $locale: Locale!) {
request: getPatientRequest2(patientRequestId: $requestId, clinicSlug: $clinicSlug) {
id
displayTitle(locale: $locale)
createdAt
updatedAt
userNote
eventType
extendedPatient(clinicSlug: $clinicSlug) {
name
surname
identificationNumber
}
ecrfFilledData(locale: $locale) {
name
groups {
label
fields {
name
label
type
value
}
}
}
}
}
"""
def fetch_questionnaire(headers, request_id, clinic_slug):
payload = {
"operationName": "GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": {
"requestId": request_id,
"clinicSlug": clinic_slug,
"locale": "cs",
},
}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers, timeout=40)
if r.status_code != 200:
safe_print(f"❌ HTTP {r.status_code} for {request_id}: {r.text}")
return None
return r.json().get("data", {}).get("request")
def insert_questionnaire(cur, req):
if not req:
return
patient = req.get("extendedPatient") or {}
ecrf_data = req.get("ecrfFilledData")
created_at = fix_datetime(req.get("createdAt"))
updated_at = fix_datetime(req.get("updatedAt"))
cur.execute("""
INSERT INTO medevio_questionnaires (
request_id, created_at, updated_at, user_note, ecrf_json
)
VALUES (%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
updated_at = VALUES(updated_at),
user_note = VALUES(user_note),
ecrf_json = VALUES(ecrf_json),
updated_local = NOW()
""", (
req.get("id"),
created_at,
updated_at,
req.get("userNote"),
json.dumps(ecrf_data, ensure_ascii=False),
))
safe_print(f" 💾 Stored questionnaire for {patient.get('surname','')} {patient.get('name','')}")
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# load list of requests from the table we just filled
with conn.cursor() as cur:
sql = """
SELECT id, pacient_jmeno, pacient_prijmeni, createdAt, updatedAt, questionnaireprocessed
FROM pozadavky
WHERE (questionnaireprocessed IS NULL OR questionnaireprocessed < updatedAt)
"""
if CREATED_AFTER:
sql += " AND createdAt >= %s"
cur.execute(sql, (CREATED_AFTER,))
else:
cur.execute(sql)
rows = cur.fetchall()
safe_print(f"📋 Found {len(rows)} requests needing questionnaire check.")
for i, row in enumerate(rows, 1):
req_id = row["id"]
safe_print(f"\n[{i}/{len(rows)}] 🔍 Fetching questionnaire for {req_id} ...")
req = fetch_questionnaire(headers, req_id, CLINIC_SLUG)
if not req:
safe_print(" ⚠️ No questionnaire data found.")
continue
with conn.cursor() as cur:
insert_questionnaire(cur, req)
cur.execute(
"UPDATE pozadavky SET questionnaireprocessed = NOW() WHERE id = %s",
(req_id,)
)
conn.commit()
time.sleep(0.6)
conn.close()
safe_print("\n✅ Done! All questionnaires stored in MySQL table `medevio_questionnaires`.")
if __name__ == "__main__":
main()
+148
View File
@@ -0,0 +1,148 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import sys
# UTF-8 SAFE OUTPUT
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
pass
# ==============================
# CONFIG (.50)
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY_MESSAGES = r"""
query UseMessages_ListMessages($requestId: String!, $updatedSince: DateTime) {
messages: listMessages(patientRequestId: $requestId, updatedSince: $updatedSince) {
id createdAt updatedAt readAt text type
sender { id name surname clinicId }
medicalRecord { id description contentType url downloadUrl createdAt updatedAt }
}
}
"""
def parse_dt(s):
if not s: return None
try: return datetime.fromisoformat(s.replace("Z", "+00:00"))
except: return None
def read_token(path: Path) -> str:
return path.read_text(encoding="utf-8").strip().replace("Bearer ", "")
def main():
token = read_token(TOKEN_PATH)
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
conn = pymysql.connect(**DB_CONFIG)
# 1. Seznam již stažených příloh (prevence duplicit)
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {r["attachment_id"] for r in cur.fetchall()}
# 2. Seznam požadavků k synchronizaci
with conn.cursor() as cur:
cur.execute("""
SELECT id, messagesProcessed FROM pozadavky
WHERE messagesProcessed IS NULL OR messagesProcessed < updatedAt
""")
rows = cur.fetchall()
print(f"📋 Počet požadavků k synchronizaci zpráv: {len(rows)}")
for i, row in enumerate(rows, 1):
req_id = row["id"]
updated_since = row["messagesProcessed"]
if updated_since:
updated_since = updated_since.replace(microsecond=0).isoformat() + "Z"
print(f"[{i}/{len(rows)}] Synchronizuji: {req_id}")
payload = {
"operationName": "UseMessages_ListMessages",
"query": GRAPHQL_QUERY_MESSAGES,
"variables": {"requestId": req_id, "updatedSince": updated_since}
}
try:
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
messages = r.json().get("data", {}).get("messages", []) or []
if messages:
with conn.cursor() as cur:
for msg in messages:
# Uložení zprávy
sender = msg.get("sender") or {}
sender_name = " ".join(filter(None, [sender.get("name"), sender.get("surname")]))
mr = msg.get("medicalRecord") or {}
cur.execute("""
INSERT INTO medevio_conversation (
id, request_id, sender_name, sender_id, sender_clinic_id,
text, created_at, read_at, updated_at,
attachment_url, attachment_description, attachment_content_type
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
text = VALUES(text), updated_at = VALUES(updated_at), read_at = VALUES(read_at)
""", (
msg.get("id"), req_id, sender_name, sender.get("id"), sender.get("clinicId"),
msg.get("text"), parse_dt(msg.get("createdAt")), parse_dt(msg.get("readAt")),
parse_dt(msg.get("updatedAt")), mr.get("downloadUrl") or mr.get("url"),
mr.get("description"), mr.get("contentType")
))
# Uložení přílohy (pokud existuje a nemáme ji)
attachment_id = mr.get("id")
if attachment_id and attachment_id not in existing_ids:
url = mr.get("downloadUrl") or mr.get("url")
if url:
att_r = requests.get(url, timeout=30)
if att_r.status_code == 200:
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size, created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
""", (
req_id, attachment_id, "MESSAGE_ATTACHMENT",
url.split("/")[-1].split("?")[0], mr.get("contentType"),
len(att_r.content), parse_dt(msg.get("createdAt")), att_r.content
))
existing_ids.add(attachment_id)
cur.execute("UPDATE pozadavky SET messagesProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
else:
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET messagesProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
time.sleep(0.3)
except Exception as e:
print(f" ❌ Chyba u {req_id}: {e}")
conn.close()
print("\n🎉 Delta sync zpráv a příloh DOKONČEN")
if __name__ == "__main__":
main()
+177
View File
@@ -0,0 +1,177 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download all attachments for pozadavky where attachmentsProcessed IS NULL
Store them in MySQL table `medevio_downloads` on 192.168.1.50.
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
import sys
# Force UTF-8 output
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
pass
# ==============================
# 🛡 SAFE PRINT
# ==============================
def safe_print(text: str):
enc = sys.stdout.encoding or ""
if not enc or not enc.lower().startswith("utf"):
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# 🔧 CONFIGURATION (.50)
# ==============================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
CREATED_AFTER = "2024-12-01"
GRAPHQL_QUERY = r"""
query ClinicRequestDetail_GetPatientRequest2($requestId: UUID!) {
patientRequestMedicalRecords: listMedicalRecordsForPatientRequest(
attachmentTypes: [ECRF_FILL_ATTACHMENT, MESSAGE_ATTACHMENT, PATIENT_REQUEST_ATTACHMENT]
patientRequestId: $requestId
pageInfo: {first: 100, offset: 0}
) {
attachmentType
id
medicalRecord {
contentType
description
downloadUrl
id
url
visibleToPatient
}
}
}
"""
def extract_filename_from_url(url: str) -> str:
try:
return url.split("/")[-1].split("?")[0]
except:
return "unknown_filename"
def read_token(p: Path) -> str:
tok = p.read_text(encoding="utf-8").strip()
return tok.split(" ", 1)[1] if tok.startswith("Bearer ") else tok
def main():
token = read_token(TOKEN_PATH)
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
conn = pymysql.connect(**DB_CONFIG)
# 1. Načtení ID již stažených příloh
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
safe_print(f"✅ V databázi již máme {len(existing_ids)} příloh.")
# 2. Výběr požadavků ke zpracování
sql = "SELECT id, pacient_prijmeni, pacient_jmeno, createdAt FROM pozadavky WHERE attachmentsProcessed IS NULL"
params = []
if CREATED_AFTER:
sql += " AND createdAt >= %s"
params.append(CREATED_AFTER)
with conn.cursor() as cur:
cur.execute(sql, params)
req_rows = cur.fetchall()
safe_print(f"📋 Počet požadavků ke stažení příloh: {len(req_rows)}")
for i, row in enumerate(req_rows, 1):
req_id = row["id"]
prijmeni = row.get("pacient_prijmeni") or "Neznamy"
created_date = row.get("createdAt") or datetime.now()
safe_print(f"\n[{i}/{len(req_rows)}] 🧾 {prijmeni} ({req_id})")
payload = {
"operationName": "ClinicRequestDetail_GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": {"requestId": req_id},
}
try:
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
attachments = r.json().get("data", {}).get("patientRequestMedicalRecords", [])
if attachments:
with conn.cursor() as cur:
for a in attachments:
m = a.get("medicalRecord") or {}
att_id = a.get("id")
if att_id in existing_ids:
continue
url = m.get("downloadUrl")
if url:
att_r = requests.get(url, timeout=30)
if att_r.status_code == 200:
content = att_r.content
filename = extract_filename_from_url(url)
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size,
created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
""", (req_id, att_id, a.get("attachmentType"), filename,
m.get("contentType"), len(content), created_date, content))
existing_ids.add(att_id)
safe_print(f" 💾 Uloženo: {filename} ({len(content) / 1024:.1f} kB)")
conn.commit()
# Označíme jako zpracované i když nebyly nalezeny žádné přílohy
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET attachmentsProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
time.sleep(0.3)
except Exception as e:
print(f" ❌ Chyba u {req_id}: {e}")
conn.close()
safe_print("\n🎯 Všechny přílohy byly zpracovány.")
if __name__ == "__main__":
main()
@@ -0,0 +1,232 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
from collections import defaultdict
import time
import sys
# Force UTF-8 output even under Windows Task Scheduler
import sys
try:
sys.stdout.reconfigure(encoding='utf-8')
sys.stderr.reconfigure(encoding='utf-8')
except AttributeError:
# Python < 3.7 fallback (not needed for you, but safe)
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# ==============================
# 🛡 SAFE PRINT FOR CP1250 / EMOJI
# ==============================
def safe_print(text: str = ""):
enc = sys.stdout.encoding or ""
if not enc.lower().startswith("utf"):
# Strip emoji and characters outside BMP for Task Scheduler
text = ''.join(ch for ch in text if ord(ch) < 65536)
try:
print(text)
except UnicodeEncodeError:
# ASCII fallback
text = ''.join(ch for ch in text if ord(ch) < 128)
print(text)
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
"""Replace invalid filename characters with underscore."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def make_abbrev(title: str) -> str:
if not title:
return ""
words = re.findall(r"[A-Za-zÁ-Žá-ž0-9]+", title)
abbr = ""
for w in words:
if w.isdigit():
abbr += w
else:
abbr += w[0]
return abbr.upper()
# ==============================
# 🧹 DELETE UNEXPECTED FILES
# ==============================
def clean_folder(folder: Path, valid_files: set):
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file():
if f.name.startswith(""):
continue
sanitized = sanitize_name(f.name)
if sanitized not in valid_files:
safe_print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
safe_print(f"⚠️ Could not delete {f}: {e}")
# ==============================
# 📦 DB CONNECTION
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
safe_print("🔍 Loading metadata from DB (FAST)…")
cur_meta.execute("""
SELECT d.id AS download_id,
d.request_id,
d.filename,
d.created_at,
p.updatedAt AS req_updated_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni,
p.displayTitle
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
WHERE p.updatedAt >= DATE_SUB(NOW(), INTERVAL 14 DAY)
ORDER BY p.updatedAt DESC
""")
rows = cur_meta.fetchall()
safe_print(f"📋 Found {len(rows)} attachment records.\n")
# ==============================
# 🧠 MAIN LOOP WITH PROGRESS
# ==============================
# Group rows by request_id in Python — avoids N extra SELECT filename queries
rows_by_request = defaultdict(list)
for r in rows:
rows_by_request[r["request_id"]].append(r)
total_requests = len(rows_by_request)
safe_print(f"🔄 Processing {total_requests} unique requests...\n")
# Pre-index BASE_DIR once — avoids iterdir() called twice per request
folder_list = [(f, f.name) for f in BASE_DIR.iterdir() if f.is_dir()]
for current_index, (req_id, req_rows) in enumerate(rows_by_request.items(), 1):
percent = (current_index / total_requests) * 100
safe_print(f"\n[ {percent:5.1f}% ] Processing request {current_index} / {total_requests}{req_id}")
# ========== VALID FILENAMES from already-loaded rows ==========
# original filename → sanitized name (needed for DB query later)
file_map = {sanitize_name(r["filename"]): r["filename"] for r in req_rows}
valid_files = set(file_map.keys())
# ========== BUILD FOLDER NAME ==========
r = req_rows[0]
updated_at = r["req_updated_at"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(r["prijmeni"] or "Unknown")
jmeno = sanitize_name(r["jmeno"] or "")
title = r.get("displayTitle") or ""
abbr = make_abbrev(title)
clean_folder_name = sanitize_name(
f"{date_str} {prijmeni}, {jmeno} [{abbr}] {req_id}"
)
# ========== DETECT EXISTING FOLDER from pre-built index ==========
req_id_str = str(req_id)
matching = [f for f, name in folder_list if req_id_str in name]
existing_folder = matching[0] if matching else None
main_folder = existing_folder if existing_folder else BASE_DIR / clean_folder_name
# ========== MERGE DUPLICATES ==========
possible_dups = [f for f, name in folder_list if req_id_str in name and f != main_folder]
for dup in possible_dups:
safe_print(f"♻️ Merging duplicate folder: {dup.name}")
clean_folder(dup, valid_files)
main_folder.mkdir(parents=True, exist_ok=True)
for f in dup.iterdir():
if f.is_file():
target = main_folder / f.name
if not target.exists():
f.rename(target)
shutil.rmtree(dup, ignore_errors=True)
# ========== CLEAN MAIN FOLDER ==========
clean_folder(main_folder, valid_files)
# ========== DOWNLOAD MISSING FILES (batch blob fetch per request) ==========
main_folder.mkdir(parents=True, exist_ok=True)
added_new_file = False
missing_san = [
fn for fn in valid_files
if not (main_folder / fn).exists() and not (main_folder / ("" + fn)).exists()
]
if missing_san:
# Fetch all missing blobs in a single query instead of one per file
missing_orig = [file_map[fn] for fn in missing_san]
placeholders = ",".join(["%s"] * len(missing_orig))
cur_blob.execute(
f"SELECT filename, file_content FROM medevio_downloads "
f"WHERE request_id=%s AND filename IN ({placeholders})",
[req_id] + missing_orig,
)
for blob_filename, content in cur_blob.fetchall():
if not content:
continue
dest_plain = main_folder / sanitize_name(blob_filename)
with open(dest_plain, "wb") as fh:
fh.write(content)
safe_print(f"💾 Wrote: {dest_plain.relative_to(BASE_DIR)}")
added_new_file = True
# ========== REMOVE ▲ FLAG IF NEW FILES ADDED ==========
if added_new_file and "" in main_folder.name:
new_name = main_folder.name.replace("", "").strip()
new_path = main_folder.parent / new_name
if new_path != main_folder:
try:
main_folder.rename(new_path)
safe_print(f"🔄 Folder flag ▲ removed → {new_name}")
main_folder = new_path
except Exception as e:
safe_print(f"⚠️ Could not rename folder: {e}")
safe_print("\n🎯 Export complete.\n")
cur_blob.close()
cur_meta.close()
conn.close()
@@ -0,0 +1,146 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def clean_folder(folder: Path, valid_files: set):
"""Remove files that do NOT exist in MySQL for this request."""
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file() and sanitize_name(f.name) not in valid_files:
print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
print(f"⚠️ Cannot delete {f}: {e}")
# ==============================
# 📥 LOAD EVERYTHING IN ONE QUERY
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur = conn.cursor(pymysql.cursors.DictCursor)
print("📥 Loading ALL metadata + BLOBs with ONE MySQL query…")
cur.execute("""
SELECT
d.id AS download_id,
d.request_id,
d.filename,
d.file_content,
p.updatedAt AS req_updated_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
ORDER BY p.updatedAt DESC, d.created_at ASC
""")
rows = cur.fetchall()
print(f"📦 Loaded {len(rows)} total file rows.\n")
conn.close()
# ==============================
# 🔄 ORGANIZE ROWS PER REQUEST
# ==============================
requests = {} # req_id → list of file dicts
for r in rows:
req_id = r["request_id"]
if req_id not in requests:
requests[req_id] = []
requests[req_id].append(r)
print(f"📌 Unique requests: {len(requests)}\n")
# ==============================
# 🧠 MAIN LOOP SAME LOGIC AS BEFORE
# ==============================
for req_id, filelist in requests.items():
# ========== GET UPDATEDAT (same logic) ==========
any_row = filelist[0]
updated_at = any_row["req_updated_at"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(any_row["prijmeni"] or "Unknown")
jmeno = sanitize_name(any_row["jmeno"] or "")
folder_name = sanitize_name(f"{date_str} {prijmeni}, {jmeno} {req_id}")
main_folder = BASE_DIR / folder_name
# ========== VALID FILES ==========
valid_files = {sanitize_name(r["filename"]) for r in filelist}
# ========== FIND OLD FOLDERS ==========
possible_dups = [
f for f in BASE_DIR.iterdir()
if f.is_dir() and req_id in f.name and f != main_folder
]
# ========== MERGE OLD FOLDERS ==========
for dup in possible_dups:
print(f"♻️ Merging folder: {dup.name}")
clean_folder(dup, valid_files)
main_folder.mkdir(parents=True, exist_ok=True)
for f in dup.iterdir():
if f.is_file():
target = main_folder / f.name
if not target.exists():
f.rename(target)
shutil.rmtree(dup, ignore_errors=True)
# ========== CLEAN MAIN FOLDER ==========
main_folder.mkdir(parents=True, exist_ok=True)
clean_folder(main_folder, valid_files)
# ========== SAVE FILES (fast now) ==========
for r in filelist:
filename = sanitize_name(r["filename"])
dest = main_folder / filename
if dest.exists():
continue
content = r["file_content"]
if not content:
continue
with open(dest, "wb") as f:
f.write(content)
print(f"💾 Saved: {dest.relative_to(BASE_DIR)}")
print("\n🎯 Export complete.\n")
@@ -0,0 +1,101 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import zlib
import pymysql
import re
from pathlib import Path
from datetime import datetime
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
"""Replace invalid filename characters with underscore."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
# ==============================
# 📦 STREAMING EXPORT WITH TRIANGLE CHECK
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
cur_meta.execute("""
SELECT id, request_id, attachment_id, filename, pacient_jmeno,
pacient_prijmeni, created_at, downloaded_at
FROM medevio_downloads
WHERE file_content IS NOT NULL;
""")
rows = cur_meta.fetchall()
print(f"📋 Found {len(rows)} records to check/export")
skipped, exported = 0, 0
for r in rows:
try:
created = r["created_at"] or r["downloaded_at"] or datetime.now()
date_str = created.strftime("%Y-%m-%d")
prijmeni = sanitize_name(r["pacient_prijmeni"] or "Unknown")
jmeno = sanitize_name(r["pacient_jmeno"] or "")
crc = f"{zlib.crc32(r['request_id'].encode('utf-8')) & 0xFFFFFFFF:08X}"
# Base (non-triangle) and processed (triangle) folder variants
base_folder = sanitize_name(f"{date_str} {prijmeni}, {jmeno} {crc}")
tri_folder = sanitize_name(f"{date_str}{prijmeni}, {jmeno} {crc}")
base_path = BASE_DIR / base_folder
tri_path = BASE_DIR / tri_folder
filename = sanitize_name(r["filename"] or f"unknown_{r['id']}.bin")
file_path_base = base_path / filename
file_path_tri = tri_path / filename
# 🟡 Skip if exists in either version
if file_path_base.exists() or file_path_tri.exists():
skipped += 1
found_in = "" if file_path_tri.exists() else ""
print(f"⏭️ Skipping existing{found_in}: {filename}")
continue
# Make sure base folder exists before saving
base_path.mkdir(parents=True, exist_ok=True)
# 2️⃣ Fetch blob
cur_blob.execute("SELECT file_content FROM medevio_downloads WHERE id = %s", (r["id"],))
blob = cur_blob.fetchone()[0]
if blob:
with open(file_path_base, "wb") as f:
f.write(blob)
exported += 1
print(f"✅ Saved: {file_path_base.relative_to(BASE_DIR)}")
else:
print(f"⚠️ No content for id={r['id']}")
except Exception as e:
print(f"❌ Error for id={r['id']}: {e}")
cur_blob.close()
cur_meta.close()
conn.close()
print(f"\n🎯 Export complete — {exported} new files saved, {skipped} skipped.\n")
@@ -0,0 +1,113 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import zlib
import pymysql
import re
from pathlib import Path
from datetime import datetime
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"u:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
"""Replace invalid filename characters with underscore."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
# ==============================
# 📦 EXPORT WITH JOIN TO POZADAVKY
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
# 🎯 JOIN medevio_downloads → pozadavky
cur_meta.execute("""
SELECT d.id, d.request_id, d.attachment_id, d.filename,
d.created_at, d.downloaded_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
WHERE d.file_content IS NOT NULL;
""")
rows = cur_meta.fetchall()
print(f"📋 Found {len(rows)} records to check/export")
skipped, exported = 0, 0
for r in rows:
try:
created = r["created_at"] or r["downloaded_at"] or datetime.now()
date_str = created.strftime("%Y-%m-%d")
# 👍 Now always correct from pozadavky
prijmeni = sanitize_name(r["prijmeni"] or "Unknown")
jmeno = sanitize_name(r["jmeno"] or "")
# 🔥 Full request_id for folder identification
full_req_id = sanitize_name(r["request_id"])
# Folder names (normal and triangle)
base_folder = f"{date_str} {prijmeni}, {jmeno} {full_req_id}"
tri_folder = f"{date_str}{prijmeni}, {jmeno} {full_req_id}"
base_folder = sanitize_name(base_folder)
tri_folder = sanitize_name(tri_folder)
base_path = BASE_DIR / base_folder
tri_path = BASE_DIR / tri_folder
filename = sanitize_name(r["filename"] or f"unknown_{r['id']}.bin")
file_path_base = base_path / filename
file_path_tri = tri_path / filename
# 🟡 Skip if file already exists
if file_path_base.exists() or file_path_tri.exists():
skipped += 1
found_in = "" if file_path_tri.exists() else ""
print(f"⏭️ Skipping existing{found_in}: {filename}")
continue
# Ensure directory exists
base_path.mkdir(parents=True, exist_ok=True)
# 2️⃣ Fetch blob content
cur_blob.execute(
"SELECT file_content FROM medevio_downloads WHERE id = %s",
(r["id"],)
)
blob = cur_blob.fetchone()[0]
if blob:
with open(file_path_base, "wb") as f:
f.write(blob)
exported += 1
print(f"✅ Saved: {file_path_base.relative_to(BASE_DIR)}")
else:
print(f"⚠️ No content for id={r['id']}")
except Exception as e:
print(f"❌ Error for id={r['id']}: {e}")
cur_blob.close()
cur_meta.close()
conn.close()
print(f"\n🎯 Export complete — {exported} new files saved, {skipped} skipped.\n")
@@ -0,0 +1,92 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from pathlib import Path
import sys
# UTF-8 safety
try:
sys.stdout.reconfigure(encoding='utf-8')
except:
pass
# === CONFIG ===
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
REQUEST_ID = "e17536c4-ed22-4242-ada5-d03713e0b7ac" # požadavek který sledujeme
def read_token(path: Path) -> str:
t = path.read_text().strip()
if t.startswith("Bearer "):
return t.split(" ", 1)[1]
return t
# === QUERY ===
QUERY = r"""
query ClinicRequestNotes_Get($patientRequestId: String!) {
notes: getClinicPatientRequestNotes(requestId: $patientRequestId) {
id
content
createdAt
updatedAt
createdBy {
id
name
surname
}
}
}
"""
def run_query(request_id, token):
payload = {
"operationName": "ClinicRequestNotes_Get",
"query": QUERY,
"variables": {"patientRequestId": request_id},
}
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers)
r.raise_for_status()
return r.json()
def main():
token = read_token(TOKEN_PATH)
print(f"🔍 Čtu interní klinické poznámky k požadavku {REQUEST_ID} ...\n")
data = run_query(REQUEST_ID, token)
notes = data.get("data", {}).get("notes", [])
if not notes:
print("📭 Žádné klinické poznámky nejsou uložené.")
return
print(f"📌 Nalezeno {len(notes)} poznámek:\n")
for n in notes:
print("──────────────────────────────")
print(f"🆔 ID: {n['id']}")
print(f"👤 Vytvořil: {n['createdBy']['surname']} {n['createdBy']['name']}")
print(f"📅 createdAt: {n['createdAt']}")
print(f"🕒 updatedAt: {n['updatedAt']}")
print("📝 Obsah:")
print(n['content'])
print("")
if __name__ == "__main__":
main()
@@ -0,0 +1,121 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from pathlib import Path
import sys
# UTF-8 handling
try:
sys.stdout.reconfigure(encoding='utf-8')
except:
pass
# === CONFIG ===
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
REQUEST_ID = "e17536c4-ed22-4242-ada5-d03713e0b7ac" # požadavek
NOTE_PREPEND_TEXT = "🔥 NOVÝ TESTOVACÍ ŘÁDEK\n" # text, který se přidá NA ZAČÁTEK
# === Helpers ===
def read_token(p: Path) -> str:
t = p.read_text().strip()
if t.startswith("Bearer "):
return t.split(" ", 1)[1]
return t
# === Queries ===
QUERY_GET_NOTES = r"""
query ClinicRequestNotes_Get($patientRequestId: String!) {
notes: getClinicPatientRequestNotes(requestId: $patientRequestId) {
id
content
createdAt
updatedAt
createdBy {
id
name
surname
}
}
}
"""
MUTATION_UPDATE_NOTE = r"""
mutation ClinicRequestNotes_Update($noteInput: UpdateClinicPatientRequestNoteInput!) {
updateClinicPatientRequestNote(noteInput: $noteInput) {
id
}
}
"""
# === Core functions ===
def gql(query, variables, token):
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
payload = {"query": query, "variables": variables}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers)
r.raise_for_status()
return r.json()
def get_internal_note(request_id, token):
data = gql(QUERY_GET_NOTES, {"patientRequestId": request_id}, token)
notes = data.get("data", {}).get("notes", [])
return notes[0] if notes else None
def update_internal_note(note_id, new_content, token):
variables = {"noteInput": {"id": note_id, "content": new_content}}
return gql(MUTATION_UPDATE_NOTE, variables, token)
# === MAIN ===
def main():
token = read_token(TOKEN_PATH)
print(f"🔍 Načítám interní poznámku pro požadavek {REQUEST_ID}...\n")
note = get_internal_note(REQUEST_ID, token)
if not note:
print("❌ Nebyla nalezena žádná interní klinická poznámka!")
return
note_id = note["id"]
old_content = note["content"] or ""
print("📄 Původní obsah:")
print(old_content)
print("────────────────────────────\n")
# ===============================
# PREPEND new text
# ===============================
new_content = NOTE_PREPEND_TEXT + old_content
print("📝 Nový obsah který odešlu:")
print(new_content)
print("────────────────────────────\n")
# UPDATE
result = update_internal_note(note_id, new_content, token)
print(f"✅ Hotovo! Poznámka {note_id} aktualizována.")
print(result)
if __name__ == "__main__":
main()
@@ -0,0 +1,261 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import mysql.connector
from pathlib import Path
import sys
from datetime import datetime
# UTF-8 handling
try:
sys.stdout.reconfigure(encoding='utf-8')
except:
pass
# === KONFIGURACE ===
# --- Medevio API ---
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
# --- ZPRACOVÁNÍ ---
# Zadejte počet požadavků ke zpracování.
# 0 znamená zpracovat VŠECHNY nesynchronizované požadavky.
PROCESS_LIMIT = 10 # <-- Používáme PROCESS_LIMIT
# --- MySQL DB ---
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
}
# === Helpers ===
def read_token(p: Path) -> str:
"""Načte Bearer token z textového souboru."""
t = p.read_text().strip()
if t.startswith("Bearer "):
return t.split(" ", 1)[1]
return t
# === DB Funkce ===
def get_requests_to_process_from_db(limit):
"""
Získá seznam požadavků (ID, Titul, Jméno, Příjmení) k synchronizaci z MySQL.
Použije LIMIT, pokud limit > 0.
"""
if limit == 0:
print("🔍 Připojuji se k MySQL a hledám **VŠECHNY** nesynchronizované požadavky...")
else:
print(f"🔍 Připojuji se k MySQL a hledám **{limit}** nesynchronizovaných požadavků...")
requests_list = []
conn = None
try:
conn = mysql.connector.connect(**DB_CONFIG)
cursor = conn.cursor()
# Základní SQL dotaz
query = """
SELECT id, displayTitle, pacient_jmeno, pacient_prijmeni
FROM pozadavky
WHERE doneAt IS NULL
AND noteSyncedAt IS NULL
ORDER BY updatedAt DESC
"""
# Podmíněné přidání LIMIT klauzule
if limit > 0:
query += f"LIMIT {limit};"
else:
query += ";"
cursor.execute(query)
results = cursor.fetchall()
for result in results:
request_id, display_title, jmeno, prijmeni = result
requests_list.append({
"id": request_id,
"displayTitle": display_title,
"jmeno": jmeno,
"prijmeni": prijmeni
})
cursor.close()
if requests_list:
print(f"✅ Nalezeno {len(requests_list)} požadavků ke zpracování.")
else:
print("❌ Nebyl nalezen žádný nesynchronizovaný otevřený požadavek v DB.")
return requests_list
except mysql.connector.Error as err:
print(f"❌ Chyba při připojení/dotazu MySQL: {err}")
return []
finally:
if conn and conn.is_connected():
conn.close()
def update_db_sync_time(request_id, conn):
"""Aktualizuje sloupec noteSyncedAt v tabulce pozadavky. Používá existující připojení."""
cursor = conn.cursor()
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
update_query = """
UPDATE pozadavky
SET noteSyncedAt = %s
WHERE id = %s;
"""
cursor.execute(update_query, (current_time, request_id))
conn.commit()
cursor.close()
print(f" (DB: Čas synchronizace pro {request_id} uložen)")
# === GraphQL Operace (Beze Změny) ===
QUERY_GET_NOTE = r"""
query ClinicRequestNotes_Get($patientRequestId: String!) {
notes: getClinicPatientRequestNotes(requestId: $patientRequestId) {
id
content
}
}
"""
MUTATION_UPDATE_NOTE = r"""
mutation ClinicRequestNotes_Update($noteInput: UpdateClinicPatientRequestNoteInput!) {
updateClinicPatientRequestNote(noteInput: $noteInput) {
id
}
}
"""
MUTATION_CREATE_NOTE = r"""
mutation ClinicRequestNotes_Create($noteInput: CreateClinicPatientRequestNoteInput!) {
createClinicPatientRequestNote(noteInput: $noteInput) {
id
}
}
"""
def gql(query, variables, token):
"""Obecná funkce pro volání GraphQL endpointu."""
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
payload = {"query": query, "variables": variables}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers)
r.raise_for_status()
return r.json()
def get_internal_note(request_id, token):
"""Získá jedinou interní poznámku (obsah a ID) pro daný požadavek."""
data = gql(QUERY_GET_NOTE, {"patientRequestId": request_id}, token)
notes = data.get("data", {}).get("notes", [])
return notes[0] if notes else None
def update_internal_note(note_id, new_content, token):
"""Aktualizuje obsah poznámky v Medeviu."""
variables = {"noteInput": {"id": note_id, "content": new_content}}
return gql(MUTATION_UPDATE_NOTE, variables, token)
def create_internal_note(request_id, content, token):
"""Vytvoří novou interní poznámku k požadavku v Medeviu."""
variables = {"noteInput": {"requestId": request_id, "content": content}}
return gql(MUTATION_CREATE_NOTE, variables, token)
# === MAIN ===
def main():
token = read_token(TOKEN_PATH)
# 1. Získat seznam ID požadavků ke zpracování (používáme PROCESS_LIMIT)
requests_to_process = get_requests_to_process_from_db(PROCESS_LIMIT)
if not requests_to_process:
return
# Pro update DB time otevřeme připojení jednou a použijeme ho v cyklu
conn = mysql.connector.connect(**DB_CONFIG)
print("\n=============================================")
print(f"START ZPRACOVÁNÍ {len(requests_to_process)} POŽADAVKŮ")
print("=============================================\n")
for idx, request in enumerate(requests_to_process, 1):
request_id = request["id"]
print(
f"[{idx}/{len(requests_to_process)}] Zpracovávám požadavek: {request['prijmeni']} {request['jmeno']} (ID: {request_id})")
# 2. Vytvořit text, který chceme přidat/vytvořit
prepend_text = f"ID: {request_id}\n"
# 3. Pokusit se získat existující interní poznámku z Medevia
note = get_internal_note(request_id, token)
medevio_update_success = False
if note:
# A) POZNÁMKA EXISTUJE -> AKTUALIZOVAT
note_id = note["id"]
old_content = note["content"] or ""
new_content = prepend_text + old_content
try:
# Odeslání aktualizace
update_internal_note(note_id, new_content, token)
print(f" (Medevio: Poznámka {note_id} **aktualizována**.)")
medevio_update_success = True
except requests.exceptions.HTTPError as e:
print(f" ❌ Chyba při aktualizaci Medevio API: {e}")
else:
# B) POZNÁMKA NEEXISTUJE -> VYTVOŘIT
new_content = prepend_text.strip()
try:
# Odeslání vytvoření
result = create_internal_note(request_id, new_content, token)
new_note_id = result.get("data", {}).get("createClinicPatientRequestNote", {}).get("id", "N/A")
print(f" (Medevio: Nová poznámka {new_note_id} **vytvořena**.)")
medevio_update_success = True
except requests.exceptions.HTTPError as e:
print(f" ❌ Chyba při vytváření Medevio API: {e}")
# 4. AKTUALIZACE ČASOVÉHO RAZÍTKA V DB
if medevio_update_success:
update_db_sync_time(request_id, conn)
print("---------------------------------------------")
# Uzavřeme připojení k DB po dokončení cyklu
if conn and conn.is_connected():
conn.close()
print("\n✅ Všechny požadavky zpracovány. Připojení k DB uzavřeno.")
if __name__ == "__main__":
main()
@@ -0,0 +1,313 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Full Medevio Report:
- Agenda (API, next 30 days)
- Otevřené požadavky (MySQL)
- Merged (Agenda + Open, deduplicated)
- Vaccine sheets (from merged data)
"""
import re
import json
import pymysql
import requests
import pandas as pd
from pathlib import Path
from datetime import datetime
from dateutil import parser, tz
from dateutil.relativedelta import relativedelta
from openpyxl import load_workbook
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
from openpyxl.utils import get_column_letter
from openpyxl.utils.dataframe import dataframe_to_rows
# ==================== CONFIG ====================
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
CLINIC_SLUG = "mudr-buzalkova"
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
EXPORT_DIR = Path(r"u:\Dropbox\Ordinace\Reporty")
EXPORT_DIR.mkdir(exist_ok=True, parents=True)
# Delete previous reports
for old in EXPORT_DIR.glob("* Agenda + Požadavky.xlsx"):
old.unlink()
print(f"🗑️ Deleted old report: {old.name}")
timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
xlsx_path = EXPORT_DIR / f"{timestamp} Agenda + Požadavky.xlsx"
# ==================== LOAD TOKEN ====================
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
gateway_token = TOKEN_PATH.read_text(encoding="utf-8").strip()
headers = {
"content-type": "application/json",
"authorization": f"Bearer {gateway_token}",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
}
# ==================== STYLING ====================
widths = {1: 11, 2: 13, 3: 45, 4: 30, 5: 15, 6: 15, 7: 30, 8: 15, 9: 37, 10: 37}
header_fill = PatternFill("solid", fgColor="FFFF00")
alt_fill = PatternFill("solid", fgColor="F2F2F2")
thin_border = Border(
left=Side(style="thin", color="000000"),
right=Side(style="thin", color="000000"),
top=Side(style="thin", color="000000"),
bottom=Side(style="thin", color="000000"),
)
REQUEST_URL_TEMPLATE = "https://my.medevio.cz/mudr-buzalkova/klinika/pozadavky?pozadavek={}"
link_font = Font(color="0563C1", underline="single")
def format_ws(ws, df):
"""Apply unified formatting to a worksheet."""
# Find Request_ID column index (1-based)
req_id_col = None
columns = list(df.columns)
if "Request_ID" in columns:
req_id_col = columns.index("Request_ID") + 1
for col_idx in range(1, len(df.columns) + 1):
col_letter = get_column_letter(col_idx)
cell = ws.cell(row=1, column=col_idx)
cell.font = Font(bold=True)
cell.alignment = Alignment(horizontal="center", vertical="center")
cell.fill = header_fill
cell.value = str(cell.value).upper()
cell.border = thin_border
ws.column_dimensions[col_letter].width = widths.get(col_idx, 20)
for r_idx, row in enumerate(ws.iter_rows(min_row=2, max_row=ws.max_row), start=2):
for cell in row:
cell.border = thin_border
if r_idx % 2 == 0:
cell.fill = alt_fill
# Add hyperlink to Request_ID cells
if req_id_col and cell.column == req_id_col and cell.value:
cell.hyperlink = REQUEST_URL_TEMPLATE.format(cell.value)
cell.font = link_font
ws.freeze_panes = "A2"
ws.auto_filter.ref = ws.dimensions
# ==================== 1️⃣ LOAD AGENDA (API) ====================
print("📡 Querying Medevio API for agenda...")
dnes = datetime.utcnow().date()
since = datetime.combine(dnes, datetime.min.time())
until = since + relativedelta(months=1)
payload = {
"operationName": "ClinicAgenda_ListClinicReservations",
"variables": {
"calendarIds": [CALENDAR_ID],
"clinicSlug": CLINIC_SLUG,
"since": since.isoformat() + "Z",
"until": until.isoformat() + "Z",
"locale": "cs",
"emptyCalendarIds": False,
},
"query": """query ClinicAgenda_ListClinicReservations(
$calendarIds: [UUID!], $clinicSlug: String!,
$locale: Locale!, $since: DateTime!, $until: DateTime!,
$emptyCalendarIds: Boolean!
) {
reservations: listClinicReservations(
clinicSlug: $clinicSlug, calendarIds: $calendarIds,
since: $since, until: $until
) @skip(if: $emptyCalendarIds) {
id start end note done color
request {
id displayTitle(locale: $locale)
extendedPatient {
name surname dob insuranceCompanyObject { shortName }
}
}
}
}""",
}
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
r.raise_for_status()
resp = r.json()
if "errors" in resp or "data" not in resp:
print("❌ API response:")
print(json.dumps(resp, indent=2, ensure_ascii=False))
raise SystemExit("API call failed - check token or query.")
reservations = resp["data"]["reservations"]
rows = []
for r in reservations:
req = r.get("request") or {}
patient = req.get("extendedPatient") or {}
insurance = patient.get("insuranceCompanyObject") or {}
try:
start_dt = parser.isoparse(r.get("start")).astimezone(tz.gettz("Europe/Prague"))
end_dt = parser.isoparse(r.get("end")).astimezone(tz.gettz("Europe/Prague"))
except Exception:
start_dt = end_dt = None
date_str = start_dt.strftime("%Y-%m-%d") if start_dt else ""
time_interval = (
f"{start_dt.strftime('%H:%M')}-{end_dt.strftime('%H:%M')}"
if start_dt and end_dt
else ""
)
rows.append(
{
"Date": date_str,
"Time": time_interval,
"Title": req.get("displayTitle") or "",
"Patient": f"{patient.get('surname','')} {patient.get('name','')}".strip(),
"DOB": patient.get("dob") or "",
"Insurance": insurance.get("shortName") or "",
"Note": r.get("note") or "",
"Color": r.get("color") or "",
"Request_ID": req.get("id") or "",
"Reservation_ID": r.get("id"),
}
)
df_agenda = pd.DataFrame(rows).sort_values(["Date", "Time"])
print(f"✅ Loaded {len(df_agenda)} agenda rows.")
# ==================== 2️⃣ LOAD OPEN REQUESTS (MySQL) ====================
print("📡 Loading open requests from MySQL...")
conn = pymysql.connect(**DB_CONFIG)
with conn.cursor() as cur:
cur.execute(
"""
SELECT
id AS Request_ID,
displayTitle AS Title,
pacient_prijmeni AS Pacient_Prijmeni,
pacient_jmeno AS Pacient_Jmeno,
pacient_rodnecislo AS DOB,
createdAt AS Created
FROM pozadavky
WHERE doneAt IS NULL AND removedAt IS NULL
ORDER BY createdAt DESC
"""
)
rows = cur.fetchall()
conn.close()
df_open = pd.DataFrame(rows)
if not df_open.empty:
df_open["Patient"] = (
df_open["Pacient_Prijmeni"].fillna("")
+ " "
+ df_open["Pacient_Jmeno"].fillna("")
).str.strip()
df_open["Date"] = df_open["Created"].astype(str).str[:10]
df_open["Time"] = ""
df_open["Insurance"] = ""
df_open["Note"] = ""
df_open["Color"] = ""
df_open["Reservation_ID"] = ""
df_open = df_open[
[
"Date",
"Time",
"Title",
"Patient",
"DOB",
"Insurance",
"Note",
"Color",
"Request_ID",
"Reservation_ID",
]
]
print(f"✅ Loaded {len(df_open)} open requests.")
# ==================== 3️⃣ MERGE + DEDUPLICATE ====================
print("🟢 Merging and deduplicating (Agenda preferred)...")
df_agenda["Source"] = "Agenda"
df_open["Source"] = "Open"
df_merged = pd.concat([df_agenda, df_open], ignore_index=True).fillna("")
df_merged = df_merged.sort_values(["Source"], ascending=[True])
# drop duplicates — prefer Agenda if same Request_ID or same (Patient+Title)
df_merged = df_merged.drop_duplicates(
subset=["Request_ID", "Patient", "Title"], keep="first"
)
df_merged = df_merged.drop(columns=["Source"], errors="ignore")
df_merged = df_merged.sort_values(["Date", "Time"], na_position="last").reset_index(
drop=True
)
print(f"✅ Total merged rows after deduplication: {len(df_merged)}")
# ==================== 4️⃣ WRITE BASE SHEETS ====================
with pd.ExcelWriter(xlsx_path, engine="openpyxl") as writer:
df_agenda.to_excel(writer, sheet_name="Agenda", index=False)
df_open.to_excel(writer, sheet_name="Otevřené požadavky", index=False)
df_merged.to_excel(writer, sheet_name="Merged", index=False)
wb = load_workbook(xlsx_path)
for name, df_ref in [
("Agenda", df_agenda),
("Otevřené požadavky", df_open),
("Merged", df_merged),
]:
ws = wb[name]
format_ws(ws, df_ref)
# ==================== 5️⃣ VACCINE SHEETS (from MERGED) ====================
VACCINE_SHEETS = {
"Chřipka": ["očkování", "chřipka"],
"COVID": ["očkování", "covid"],
"Pneumokok": ["očkování", "pneumo"],
"Hep A": ["očkování", "žloutenka a"],
"Hep B": ["očkování", "žloutenka b"],
"Hep A+B": ["očkování", "žloutenka a+b"],
"Klíšťovka": ["očkování", "klíšť"],
}
def kw_pattern(kw):
return rf"(?<!\w){re.escape(kw)}(?!\s*\+\s*\w)"
for sheet_name, keywords in VACCINE_SHEETS.items():
mask = pd.Series(True, index=df_merged.index)
title_series = df_merged["Title"].fillna("")
for kw in keywords:
pattern = kw_pattern(kw)
mask &= title_series.str.contains(pattern, flags=re.IGNORECASE, regex=True)
filtered_df = df_merged[mask].copy()
if filtered_df.empty:
print(f"️ No matches for '{sheet_name}'")
continue
ws_new = wb.create_sheet(title=sheet_name)
for r in dataframe_to_rows(filtered_df, index=False, header=True):
ws_new.append(r)
format_ws(ws_new, filtered_df)
print(f"🟡 Created sheet '{sheet_name}' ({len(filtered_df)} rows).")
# ==================== SAVE ====================
wb.save(xlsx_path)
print(f"📘 Exported full merged report:\n{xlsx_path}")
@@ -0,0 +1,33 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Check one request in MySQL."""
import pymysql
import json
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
REQUEST_ID = "6b46b5a8-b080-4821-86b0-39adabeec86b"
conn = pymysql.connect(**DB_CONFIG)
with conn.cursor() as cur:
cur.execute("SELECT * FROM pozadavky WHERE id = %s", (REQUEST_ID,))
row = cur.fetchone()
conn.close()
if row:
# Convert datetime objects to strings for JSON serialization
for k, v in row.items():
if hasattr(v, 'isoformat'):
row[k] = v.isoformat()
print(json.dumps(row, indent=2, ensure_ascii=False, default=str))
else:
print(f"Not found: {REQUEST_ID}")
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Quick check: fetch one request from Medevio API and print all fields."""
import json
import requests
from pathlib import Path
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CLINIC_SLUG = "mudr-buzalkova"
REQUEST_ID = "6b46b5a8-b080-4821-86b0-39adabeec86b"
token = TOKEN_PATH.read_text(encoding="utf-8").strip()
headers = {
"content-type": "application/json",
"authorization": f"Bearer {token}",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
}
# Query with as many fields as possible
QUERY = """
query GetPatientRequest2($requestId: UUID!, $clinicSlug: String!, $locale: Locale!) {
request: getPatientRequest2(patientRequestId: $requestId, clinicSlug: $clinicSlug) {
id
displayTitle(locale: $locale)
createdAt
updatedAt
doneAt
removedAt
userNote
eventType
extendedPatient(clinicSlug: $clinicSlug) {
name
surname
dob
identificationNumber
insuranceCompanyObject { shortName }
}
ecrfFilledData(locale: $locale) {
name
groups {
label
fields { name label type value }
}
}
}
}
"""
payload = {
"operationName": "GetPatientRequest2",
"query": QUERY,
"variables": {
"requestId": REQUEST_ID,
"clinicSlug": CLINIC_SLUG,
"locale": "cs",
},
}
r = requests.post(GRAPHQL_URL, json=payload, headers=headers, timeout=30)
print(json.dumps(r.json(), indent=2, ensure_ascii=False))
@@ -0,0 +1 @@
{"cookies": [{"name": "gateway-access-token", "value": "YwBgkf8McREDKs7vCZj0EZD2fJsuV8RyDPtYx7WiDoz0nFJ9kxId8kcNEPBLFSwM+Tiz80+SOdFwo+oj", "domain": "my.medevio.cz", "path": "/", "expires": 1763372319, "httpOnly": false, "secure": false, "sameSite": "Lax"}, {"name": "aws-waf-token", "value": "b6a1d4eb-4350-40e5-8e52-1f5f9600fbb8:CgoAr9pC8c6zAAAA:OYwXLY5OyitSQPl5v2oIlS+hIxsrb5LxV4VjCyE2gJCFFE5PQu+0Zbxse2ZIofrNv5QKs0TYUDTmxPhZyTr9Qtjnq2gsVQxWHXzrbebv3Z7RbzB63u6Ymn3Fo8IbDev3CfCNcNuxCKltFEXLqSCjI2vqNY+7HZkgQBIqy2wMgzli3aSLq0w8lWYtZzyyot7q8RPXWMGTfaBUo2reY0SOSffm9rAivE9PszNfPid71CvNrGAAoxRbwb25eVujlyIcDVWe5vZ9Iw==", "domain": ".my.medevio.cz", "path": "/", "expires": 1761125920, "httpOnly": false, "secure": true, "sameSite": "Lax"}], "origins": [{"origin": "https://my.medevio.cz", "localStorage": [{"name": "awswaf_token_refresh_timestamp", "value": "1760780309860"}, {"name": "awswaf_session_storage", "value": "b6a1d4eb-4350-40e5-8e52-1f5f9600fbb8:CgoAr9pC8c+zAAAA:+vw//1NzmePjPpbGCJzUB+orCRivtJd098DbDX4AnABiGRw/+ql6ShqvFY4YdCY7w2tegb5mEPBdAmc4sNi22kNR9BuEoAgCUiMhkU1AZWfzM51zPfTh7SveCrREZ7xdvxcqKPMmfVLRYX5E4+UWh22z/LKQ7+d9VERp3J+wWCUW3dFFirkezy3N7b2FVjTlY/RxsZwhejQziTG/L3CkIFFP3mOReNgBvDpj7aKoM1knY4IL4TZ8E7zNv3nTsvzACLYvnUutVOUcofN1TfOzwZshSKsEXsMzrQn8PzLccX1jM5VSzce7gfEzl0zSPsT8NB3Sna+rhMIttDNYgvbW1HsfG2LIeKMR27Zf8hkslDRVVkcU/Kp2jLOEdhhrBKGjKY2o9/uX3NExdzh5MEKQSSRtmue01BpWYILPH23rMsz4YSmF+Ough5OeQoC95rkcYwVXMhwvUN9Zfp9UZ4xCNfFUex5dOrg9aJntYRnaceeocGUttNI5AdT0i3+osV6XHXzKxeqO8zLCS9BIsCzxaHfdqqem5DorMceuGKz+QqksatIQAA=="}, {"name": "Application.Intl.locale", "value": "cs"}, {"name": "Password.prefill", "value": "{\"username\":\"vladimir.buzalka@buzalka.cz\",\"type\":\"email\"}"}]}]}
@@ -0,0 +1,176 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sync open requests: checks each request marked as open in MySQL (doneAt IS NULL
AND removedAt IS NULL) against the Medevio API. If the API shows the request is
closed (doneAt) or removed (removedAt), updates MySQL accordingly.
"""
import json
import sys
import time
import requests
import pymysql
from pathlib import Path
from datetime import datetime
# ==============================
# UTF-8 output (Windows friendly)
# ==============================
try:
sys.stdout.reconfigure(encoding="utf-8")
sys.stderr.reconfigure(encoding="utf-8")
except AttributeError:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
# ==============================
# DRY RUN - set to True to only print what would be updated, False to actually update
# ==============================
DRY_RUN = False
# ==============================
# CONFIG
# ==============================
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CLINIC_SLUG = "mudr-buzalkova"
TOKEN_PATH = Path(__file__).resolve().parent.parent / "token.txt"
gateway_token = TOKEN_PATH.read_text(encoding="utf-8").strip()
headers = {
"content-type": "application/json",
"authorization": f"Bearer {gateway_token}",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
}
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3306,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY = """
query GetPatientRequest2($requestId: UUID!, $clinicSlug: String!) {
request: getPatientRequest2(patientRequestId: $requestId, clinicSlug: $clinicSlug) {
id
doneAt
removedAt
updatedAt
}
}
"""
def fix_datetime(dt_str):
if not dt_str:
return None
try:
return datetime.fromisoformat(dt_str.replace("Z", "+00:00"))
except Exception:
return None
def fetch_request(request_id):
payload = {
"operationName": "GetPatientRequest2",
"query": GRAPHQL_QUERY,
"variables": {
"requestId": request_id,
"clinicSlug": CLINIC_SLUG,
},
}
for attempt in range(3):
try:
r = requests.post(GRAPHQL_URL, json=payload, headers=headers, timeout=30)
break
except (requests.ConnectionError, requests.Timeout, requests.exceptions.RequestException) as e:
print(f" ⚠️ Attempt {attempt+1}/3 failed: {e}")
time.sleep(2)
else:
print(f" ❌ Connection failed after 3 attempts for {request_id}")
return None
if r.status_code != 200:
print(f" ❌ HTTP {r.status_code} for {request_id}")
return None
data = r.json()
if "errors" in data:
print(f" ❌ API error for {request_id}: {data['errors']}")
return None
return data.get("data", {}).get("request")
# ==============================
# MAIN
# ==============================
conn = pymysql.connect(**DB_CONFIG)
# 1) Read all open requests from MySQL
with conn.cursor() as cur:
cur.execute(
"SELECT id, displayTitle, pacient_prijmeni, pacient_jmeno "
"FROM pozadavky WHERE doneAt IS NULL AND removedAt IS NULL"
)
open_requests = cur.fetchall()
mode = "DRY RUN" if DRY_RUN else "LIVE"
print(f"🔧 Mode: {mode}")
print(f"📋 Found {len(open_requests)} open requests in MySQL.\n")
updated = 0
errors = 0
for i, req in enumerate(open_requests, 1):
rid = req["id"]
name = f"{req.get('pacient_prijmeni', '')} {req.get('pacient_jmeno', '')}".strip()
title = req.get("displayTitle", "")
print(f"[{i}/{len(open_requests)}] {name} {title} ({rid})")
api_data = fetch_request(rid)
if api_data is None:
errors += 1
continue
api_done = api_data.get("doneAt")
api_removed = api_data.get("removedAt")
api_updated = api_data.get("updatedAt")
if api_done or api_removed:
done_dt = fix_datetime(api_done)
removed_dt = fix_datetime(api_removed)
updated_dt = fix_datetime(api_updated)
status = "DONE" if api_done else "REMOVED"
if DRY_RUN:
print(f" 🔍 Would update → {status} (doneAt={api_done}, removedAt={api_removed})")
else:
with conn.cursor() as cur:
cur.execute(
"UPDATE pozadavky SET doneAt = %s, removedAt = %s, updatedAt = %s WHERE id = %s",
(done_dt, removed_dt, updated_dt, rid),
)
conn.commit()
print(f" ✅ Updated → {status}")
updated += 1
else:
print(f" ⏳ Still open")
# Be gentle with the API
time.sleep(1)
conn.close()
print(f"\n{'='*50}")
print(f"📊 Total open in MySQL: {len(open_requests)}")
print(f"✅ Updated (closed/removed): {updated}")
print(f"⏳ Still open: {len(open_requests) - updated - errors}")
print(f"❌ Errors: {errors}")
@@ -0,0 +1,46 @@
import os
from pathlib import Path
# Define the target directory
target_path = Path(r"U:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
def rename_folders():
# Ensure the path exists
if not target_path.exists():
print(f"Error: The path {target_path} does not exist.")
return
# Iterate through items in the directory
for folder in target_path.iterdir():
# Only process directories
if folder.is_dir():
original_name = folder.name
# Check if name starts with the triangle
if original_name.startswith(""):
# 1. Remove the triangle from the start
name_without_tri = original_name[1:]
# 2. Prepare the name to be at least 10 chars long
# (so the triangle can sit at index 10 / position 11)
clean_name = name_without_tri.ljust(10)
# 3. Construct new name: first 10 chars + triangle + the rest
new_name = clean_name[:10] + "" + clean_name[10:]
# Remove trailing spaces if the original name was short
# but you don't want extra spaces at the very end
new_name = new_name.rstrip()
new_folder_path = folder.parent / new_name
try:
print(f"Renaming: '{original_name}' -> '{new_name}'")
folder.rename(new_folder_path)
except Exception as e:
print(f"Could not rename {original_name}: {e}")
if __name__ == "__main__":
rename_folders()
@@ -0,0 +1,111 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Compress PDF — output DPI and JPEG quality are chosen automatically
based on the detected resolution of the source PDF.
Usage: python compress_pdf.py <input.pdf> [output.pdf]
python compress_pdf.py (processes all PDFs in current folder)
Output filename: original_name (139 kB).pdf
"""
import sys
import fitz
from pathlib import Path
# ==============================
# COMPRESSION TABLE
# Detected source DPI -> (output DPI, JPEG quality)
# Rows are evaluated top-to-bottom; first match wins.
# ==============================
#
# src_dpi_min src_dpi_max out_dpi jpeg_quality
COMPRESSION_TABLE = [
( 0, 99, 72, 60), # very low res — already small, compress hard
( 100, 149, 100, 70), # low res
( 150, 249, 150, 80), # standard scan (our tested sweet spot)
( 250, 399, 150, 80), # good scan — downsample to 150 is fine
( 400, 599, 200, 85), # high res scan
( 600, 9999, 150, 80), # very high res / professional scan
]
def detect_source_dpi(src: fitz.Document) -> int:
"""Estimate source DPI from the largest image on the first page."""
page = src[0]
images = page.get_images(full=True)
if not images:
return 150 # no raster images — use default
# Find the largest image by pixel area
best = max(images, key=lambda img: img[2] * img[3]) # width * height
img_w_px, img_h_px = best[2], best[3]
# Page size in inches (1 point = 1/72 inch)
page_w_in = page.rect.width / 72.0
page_h_in = page.rect.height / 72.0
dpi_x = img_w_px / page_w_in if page_w_in else 0
dpi_y = img_h_px / page_h_in if page_h_in else 0
return round((dpi_x + dpi_y) / 2)
def pick_settings(source_dpi: int) -> tuple[int, int]:
for min_dpi, max_dpi, out_dpi, quality in COMPRESSION_TABLE:
if min_dpi <= source_dpi <= max_dpi:
return out_dpi, quality
# fallback to last row
return COMPRESSION_TABLE[-1][2], COMPRESSION_TABLE[-1][3]
def compress(input_path: Path, output_path: Path = None):
src = fitz.open(input_path)
source_dpi = detect_source_dpi(src)
out_dpi, jpeg_quality = pick_settings(source_dpi)
print(f" zdroj ~{source_dpi} DPI -> komprese {out_dpi} DPI / JPEG q{jpeg_quality}")
zoom = out_dpi / 72.0
mat = fitz.Matrix(zoom, zoom)
out_doc = fitz.open()
for page in src:
pix = page.get_pixmap(matrix=mat, colorspace=fitz.csRGB)
img_bytes = pix.tobytes("jpeg", jpg_quality=jpeg_quality)
img_doc = fitz.open("pdf", fitz.open("jpeg", img_bytes).convert_to_pdf())
rect = page.rect
new_page = out_doc.new_page(width=rect.width, height=rect.height)
new_page.show_pdf_page(new_page.rect, img_doc, 0)
src.close()
tmp = input_path.with_suffix(".tmp.pdf")
out_doc.save(tmp, deflate=True, garbage=4)
out_doc.close()
size_kb = round(tmp.stat().st_size / 1024)
if output_path is None:
output_path = input_path.parent / f"{input_path.stem} ({size_kb} kB).pdf"
if output_path.exists():
output_path.unlink()
tmp.rename(output_path)
orig_kb = round(input_path.stat().st_size / 1024)
saving = (1 - size_kb / orig_kb) * 100
print(f" {input_path.name} -> {output_path.name} (bylo {orig_kb} kB, uspora {saving:.0f}%)")
if __name__ == "__main__":
if len(sys.argv) >= 2:
inp = Path(sys.argv[1])
out = Path(sys.argv[2]) if len(sys.argv) >= 3 else None
compress(inp, out)
else:
folder = Path(__file__).parent
pdfs = [p for p in folder.glob("*.pdf") if not p.name.endswith(").pdf") and p.stem != Path(__file__).stem]
if not pdfs:
print("Zadne PDF k zpracovani.")
for pdf in pdfs:
compress(pdf)
@@ -0,0 +1,60 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Compress a PDF into multiple variants at different DPI / JPEG quality settings.
Uses PyMuPDF (fitz) — renders each page as JPEG image, saves back as PDF.
"""
import sys
import fitz # PyMuPDF
from pathlib import Path
INPUT = Path(r"u:\Medevio\50 Různé testy\MinimizeOptimizePDF\afd1823b-8277-44a2-84e1-db89a0ccd134.pdf")
OUT_DIR = INPUT.parent
VARIANTS = [
# (label, dpi, jpeg_quality)
("300dpi_q90", 300, 90),
("200dpi_q85", 200, 85),
("150dpi_q80", 150, 80),
("120dpi_q75", 120, 75),
("96dpi_q70", 96, 70),
("72dpi_q60", 72, 60),
]
src = fitz.open(INPUT)
original_size = INPUT.stat().st_size
print(f"Originál: {INPUT.name} ({original_size / 1024:.0f} KB)\n")
print(f"{'Varianta':<20} {'DPI':>5} {'Kvalita':>8} {'Velikost':>12} {'Úspora':>8}")
print("-" * 58)
for label, dpi, quality in VARIANTS:
out_path = OUT_DIR / f"{INPUT.stem}_{label}.pdf"
zoom = dpi / 72.0
mat = fitz.Matrix(zoom, zoom)
out_doc = fitz.open()
for page in src:
pix = page.get_pixmap(matrix=mat, colorspace=fitz.csRGB)
img_bytes = pix.tobytes("jpeg", jpg_quality=quality)
# Create a new PDF page with the same physical dimensions
img_doc = fitz.open("pdf", fitz.open("jpeg", img_bytes).convert_to_pdf())
# Scale page back to original size
rect = page.rect
new_page = out_doc.new_page(width=rect.width, height=rect.height)
new_page.show_pdf_page(new_page.rect, img_doc, 0)
out_doc.save(out_path, deflate=True, garbage=4)
out_doc.close()
size = out_path.stat().st_size
size_kb = round(size / 1024)
final_path = OUT_DIR / f"{INPUT.stem}_{label} ({size_kb} kB).pdf"
out_path.rename(final_path)
saving = (1 - size / original_size) * 100
print(f"{label:<20} {dpi:>5} {quality:>8} {size_kb:>9} kB {saving:>7.0f}%")
src.close()
print("\nHotovo.")
+46
View File
@@ -0,0 +1,46 @@
# 60 ScansProcessing
Agent pro zpracování naskenovaných lékařských zpráv (PDF i JPG/PNG).
## Skripty
### `extract_patient_info.py` — hlavní agent
Spuštění: `python extract_patient_info.py` (bez argumentů = celá složka ToProcess)
**Workflow:**
1. Načte soubory z `ToProcess/`
2. Claude Vision API (sonnet-4-6) extrahuje: jméno, RČ, datum, typ dokumentu, poznámku, navržený název, rotaci
3. Ověří pacienta v Medicus Firebird (tabulka KAR, pole RODCIS/PRIJMENI/JMENO)
4. Fuzzy matching RČ při nenalezení: vynechání cifry + záměna podobných (0↔8, 1↔7, 5↔6, 3↔8) + checksum /11
5. Upozorní na duplicitu v `U:\Dropbox\Ordinace\Dokumentace_zpracovaná\`
6. Interaktivní schválení / oprava názvu
7. JPG/PNG → skutečné PDF (správná orientace, DPI=150, quality=80)
8. Přesun do `Processed/`, smazání z `ToProcess/`
9. Opravy názvů se ukládají do `corrections.json` jako few-shot příklady
**Formát názvu souboru:**
`{RČ} {YYYY-MM-DD} {Příjmení}, {Jméno} [{typ dokumentu}] [{poznámka}].pdf`
Příklady typů: `LZ chirurgie`, `LZ kardiologie`, `Laboratoř`, `CT břicha`, `kolonoskopie`, `poukaz FT`
### `jpg_to_pdf.py` — konverze obrázku na PDF
```
python jpg_to_pdf.py soubor.jpg [vystup.pdf] [rotace_ccw]
```
- Opravuje EXIF orientaci
- Rotace: 0 / 90 / 180 / 270 (CCW)
- A4, DPI=150, quality=80, bez okrajů
- Používá se i interně z `extract_patient_info.py`
## Složky
| Složka | Účel |
|---|---|
| `ToProcess/` | Sem se házejí nové skeny (PDF, JPG, PNG) |
| `Processed/` | Správně pojmenované PDF po schválení |
| `U:\Dropbox\Ordinace\Dokumentace_zpracovaná\` | Finální archiv |
## Konfigurace
- API klíč: `U:\Medevio\.env``ANTHROPIC_API_KEY`
- Medicus: `localhost:c:\medicus 3\data\medicus.fdb` (Firebird, SYSDBA)
- Few-shot korekce: `corrections.json`
@@ -0,0 +1,577 @@
"""
Agent pro extrakci a pojmenování naskenovaných PDF lékařských zpráv.
- Claude Vision API — bez OCR, správná čeština s diakritikou
- Ověření pacienta proti Medicus (KAR), fuzzy matching RČ
- Interaktivní schválení / oprava názvu
- Few-shot learning z uložených korekcí
"""
import base64
import gc
import io
import json
import os
import re
import shutil
import subprocess
import sys
import time
from pathlib import Path
# Windows: nastav stdout/stderr na UTF-8
if sys.platform == "win32":
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")
import anthropic
from pdf2image import convert_from_path
sys.path.insert(0, str(Path(__file__).parent.parent))
from Knihovny.najdi_dropbox import get_dropbox_root
from Knihovny.najdi_medicus import get_medicus_config
POPPLER_PATH = r"C:/Poppler/Library/bin"
CORRECTIONS_FILE = Path(__file__).parent / "corrections.json"
_DROPBOX = Path(get_dropbox_root())
TO_PROCESS = _DROPBOX / r"Ordinace\Dokumentace_ke_zpracování\Ricoh Fi-8040\KeZpracování"
PROCESSED = _DROPBOX / r"Ordinace\Dokumentace_ke_zpracování\Ricoh Fi-8040\Zpracováno"
DOKUMENTACE = _DROPBOX / r"Ordinace\Dokumentace_zpracovaná"
# ─── Konfigurace ──────────────────────────────────────────────────────────────
def _load_env():
env_path = Path(__file__).parent.parent / ".env"
if env_path.exists():
for line in env_path.read_text(encoding="utf-8").splitlines():
line = line.strip()
if "=" in line and not line.startswith("#"):
k, v = line.split("=", 1)
os.environ[k.strip()] = v.strip()
_load_env()
# ─── Korekce (few-shot příklady) ──────────────────────────────────────────────
def load_corrections() -> list[dict]:
if CORRECTIONS_FILE.exists():
return json.loads(CORRECTIONS_FILE.read_text(encoding="utf-8"))
return []
def save_correction(original: str, corrected: str):
corrections = load_corrections()
for c in corrections:
if c["original"] == original and c["corrected"] == corrected:
return
corrections.append({"original": original, "corrected": corrected})
CORRECTIONS_FILE.write_text(
json.dumps(corrections, ensure_ascii=False, indent=2), encoding="utf-8"
)
print(f" ✓ Korekce uložena ({len(corrections)} celkem)")
def build_corrections_prompt() -> str:
corrections = load_corrections()
if not corrections:
return ""
lines = ["Příklady korekcí z minulých běhů (uč se z nich):"]
for c in corrections[-10:]:
lines.append(f' - špatně: "{c["original"]}"')
lines.append(f' správně: "{c["corrected"]}"')
return "\n".join(lines) + "\n\n"
# ─── Kontrola duplicit ───────────────────────────────────────────────────────
def check_duplicates(rc: str, datum: str) -> list[str]:
"""
Hledá v Dokumentace_zpracovaná soubory se stejným RČ a datem.
Vrátí seznam názvů nalezených souborů.
"""
if not DOKUMENTACE.exists():
return []
prefix = f"{rc} {datum}"
return [f.name for f in DOKUMENTACE.iterdir() if f.name.startswith(prefix)]
# ─── Medicus ověření ──────────────────────────────────────────────────────────
def _medicus_connect():
try:
import fdb
cfg = get_medicus_config()
return fdb.connect(
dsn=cfg.dsn,
user="SYSDBA", password="masterkey", charset="win1250"
)
except Exception as e:
print(f" [Medicus] Nepřipojeno: {e}")
return None
def _lookup_by_rc(cur, rc_digits: str) -> dict | None:
"""Přesné vyhledání podle RČ (bez lomítka)."""
cur.execute(
"SELECT IDPAC, PRIJMENI, JMENO, RODCIS FROM KAR "
"WHERE REPLACE(RODCIS, '/', '') = ?",
(rc_digits,)
)
row = cur.fetchone()
if row:
return {"idpac": row[0], "prijmeni": row[1].strip(), "jmeno": row[2].strip(), "rodcis": row[3].strip()}
return None
def _rc_candidates(rc: str) -> list[str]:
"""
Generuje kandidáty RČ pro fuzzy matching:
- vynechání každé cifry (OCR přečetlo znak navíc)
- vložení nuly na každou pozici (OCR přehlédlo nulu v sekvenci 00)
- záměna podobně vypadajících číslic na každé pozici
Vrátí unikátní seznam kandidátů bez původního RČ.
"""
similar = {"0": "8", "8": "0", "1": "7", "7": "1", "5": "6", "6": "5", "3": "8"}
candidates = set()
# Vynechání jedné cifry (OCR přečetlo znak navíc)
for i in range(len(rc)):
candidates.add(rc[:i] + rc[i+1:])
# Vložení nuly na každou pozici (nejčastější chyba: sekvence 00 přečtena jako 0)
for i in range(len(rc) + 1):
candidates.add(rc[:i] + "0" + rc[i:])
# Záměna podobné cifry na každé pozici
for i, ch in enumerate(rc):
if ch in similar:
candidates.add(rc[:i] + similar[ch] + rc[i+1:])
candidates.discard(rc)
candidates = {c for c in candidates if len(c) in (9, 10)}
return sorted(candidates)
def _rc_checksum_ok(rc: str) -> bool:
"""Ověří dělitelnost 11 pro 10místná RČ (platí pro narozené po 1.1.1954)."""
digits = re.sub(r"\D", "", rc)
if len(digits) == 10:
return int(digits) % 11 == 0
return True # 9místná RČ nemají checksum
def verify_patient(rc_raw: str) -> dict:
"""
Ověří pacienta v Medicus.
Vrací:
status: "ok" | "fuzzy" | "not_found" | "offline"
patient: dict nebo None
rc_corrected: opravené RČ (pokud fuzzy) nebo None
"""
rc = re.sub(r"\D", "", rc_raw or "")
if not rc:
return {"status": "not_found", "patient": None, "rc_corrected": None}
con = _medicus_connect()
if con is None:
return {"status": "offline", "patient": None, "rc_corrected": None}
try:
cur = con.cursor()
# 1. Přesná shoda
patient = _lookup_by_rc(cur, rc)
if patient:
return {"status": "ok", "patient": patient, "rc_corrected": None}
# 2. Fuzzy matching — zkus kandidáty, preferuj ty s platným checksumem
candidates = _rc_candidates(rc)
matches = []
for cand in candidates:
p = _lookup_by_rc(cur, cand)
if p:
matches.append((cand, p))
if not matches:
return {"status": "not_found", "patient": None, "rc_corrected": None}
# Seřaď: platný checksum na prvním místě
matches.sort(key=lambda x: (0 if _rc_checksum_ok(x[0]) else 1))
best_rc, best_patient = matches[0]
return {"status": "fuzzy", "patient": best_patient, "rc_corrected": best_rc, "all_matches": matches}
finally:
con.close()
# ─── PDF → obrázek ────────────────────────────────────────────────────────────
def pdf_to_images(pdf_path: str) -> list:
return convert_from_path(pdf_path, poppler_path=POPPLER_PATH, dpi=300)
def image_to_base64(image) -> str:
buf = io.BytesIO()
image.save(buf, format="JPEG", quality=95)
return base64.standard_b64encode(buf.getvalue()).decode("utf-8")
# ─── Extrakce Claude Vision ───────────────────────────────────────────────────
def extract_patient_info(pdf_path: str) -> dict:
pdf_path = Path(pdf_path)
if not pdf_path.exists():
raise FileNotFoundError(f"Soubor nenalezen: {pdf_path}")
print(f"\nNačítám: {pdf_path.name}")
suffix = pdf_path.suffix.lower()
if suffix in (".jpg", ".jpeg", ".png"):
from PIL import Image
img = Image.open(pdf_path)
image_b64 = image_to_base64(img)
img.close()
else:
images = pdf_to_images(str(pdf_path))
image_b64 = image_to_base64(images[0])
del images
gc.collect()
prompt = (
build_corrections_prompt() +
"Toto je naskenovaná lékařská zpráva v češtině. "
"Vrať JSON s těmito poli:\n"
"- \"jmeno\": celé jméno pacienta (příjmení + jméno + případný titul)\n"
"- \"rodne_cislo\": rodné číslo pacienta BEZ lomítka (pouze číslice)\n"
"- \"datum_zpravy\": datum zprávy ve formátu YYYY-MM-DD\n"
"- \"typ_dokumentu\": typ dokumentu — "
"\"LZ {oddělení}\" = ambulantní/lékařská zpráva (např. \"LZ chirurgie\", \"LZ kardiologie\", \"LZ plicní\", \"LZ ORL\"); "
"\"PZ {oddělení}\" = propouštěcí zpráva z hospitalizace (např. \"PZ interna\", \"PZ neurologie\"). "
"Jiné typy: \"Laboratoř\", \"CT břicha\", \"MRI páteře\", \"kolonoskopie\", "
"\"operační protokol oční\", \"poukaz FT\", \"diagnostická mamografie\" atd.\n"
"- \"poznamka\": krátká klinická poznámka česky, max 80 znaků. "
"DŮLEŽITÉ: pokud zpráva obsahuje sekci \"Závěr:\" nebo \"Závěr vyšetření:\", "
"použij VÝHRADNĚ obsah této sekce — je nejdůležitější. "
"Teprve pokud závěr chybí, shrň obsah z celé zprávy.\n"
"- \"nazev_souboru\": název souboru ve formátu "
"\"{rodne_cislo} {datum_zpravy} {Příjmení}, {Jméno} [{typ_dokumentu}] [{poznamka}].pdf\" "
"(jméno bez titulu, RČ bez lomítka)\n"
"- \"rotace\": o kolik stupňů CCW je třeba otočit obrázek aby byl text čitelně na výšku nebo šířku "
"(hodnoty: 0, 90, 180, 270). Pokud je text již správně orientovaný, vrať 0.\n\n"
"Pokud pole nenajdeš, použij null. Nepiš nic jiného než JSON."
)
print(" Volám Claude Vision API...")
client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
response = client.messages.create(
model="claude-sonnet-4-6",
max_tokens=400,
messages=[{
"role": "user",
"content": [
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image_b64}},
{"type": "text", "text": prompt},
],
}],
)
usage = response.usage
cost_input = usage.input_tokens * 3 / 1_000_000
cost_output = usage.output_tokens * 15 / 1_000_000
print(f" Tokeny: {usage.input_tokens} in + {usage.output_tokens} out = ${cost_input + cost_output:.4f}")
raw = response.content[0].text.strip()
if raw.startswith("```"):
raw = raw.split("```")[1]
if raw.startswith("json"):
raw = raw[4:]
try:
return json.loads(raw.strip())
except json.JSONDecodeError:
print(f" VAROVÁNÍ: nelze parsovat JSON: {raw!r}")
return {"nazev_souboru": None, "raw": raw}
# ─── Interaktivní schválení ───────────────────────────────────────────────────
def sanitize_filename(name: str) -> str:
return re.sub(r'[<>:"/\\|?*]', '', name)
def _open_preview(root, pdf_path: Path):
"""Otevře náhledové okno PDF/obrázku jako Toplevel. Pracuje s temp kopií — žádné zamykání originálu."""
import tkinter as tk
import tempfile
import shutil as _shutil
try:
from PIL import Image, ImageTk
import fitz
except ImportError:
return
# Temp kopie — prohlížeč nikdy nesahá na originál
tmp = Path(tempfile.mktemp(suffix=pdf_path.suffix))
_shutil.copy2(pdf_path, tmp)
suffix = pdf_path.suffix.lower()
if suffix in (".jpg", ".jpeg", ".png"):
pil_pages = [Image.open(tmp)]
doc = None
else:
try:
doc = fitz.open(str(tmp))
except Exception:
tmp.unlink(missing_ok=True)
return
pil_pages = []
def render(n) -> Image.Image:
if doc is not None:
page = doc[n]
zoom = min(700 / page.rect.width, (sh - 150) / page.rect.height)
pix = page.get_pixmap(matrix=fitz.Matrix(zoom, zoom))
return Image.frombytes("RGB", (pix.width, pix.height), pix.samples)
else:
img = pil_pages[0].copy()
img.thumbnail((700, sh - 150), Image.LANCZOS)
return img
def on_close():
try:
if doc:
doc.close()
except Exception:
pass
tmp.unlink(missing_ok=True)
win.destroy()
page_count = len(doc) if doc else 1
sh = root.winfo_screenheight()
current = [0]
photo_ref = [None]
win = tk.Toplevel(root)
win.title(pdf_path.name)
win.attributes("-topmost", True)
win.resizable(False, False)
win.protocol("WM_DELETE_WINDOW", on_close)
lbl_img = tk.Label(win)
lbl_img.pack()
frame_nav = tk.Frame(win)
frame_nav.pack(pady=4)
lbl_page = tk.Label(frame_nav, font=("Segoe UI", 9))
lbl_page.pack(side="left", padx=10)
def show(n):
current[0] = n
img = render(n)
photo_ref[0] = ImageTk.PhotoImage(img)
lbl_img.config(image=photo_ref[0])
lbl_page.config(text=f"Strana {n + 1} / {page_count}")
btn_prev.config(state="normal" if n > 0 else "disabled")
btn_next.config(state="normal" if n < page_count - 1 else "disabled")
btn_prev = tk.Button(frame_nav, text="◄ Předchozí",
command=lambda: show(current[0] - 1))
btn_prev.pack(side="left")
btn_next = tk.Button(frame_nav, text="Další ►",
command=lambda: show(current[0] + 1))
btn_next.pack(side="left")
show(0)
win.update_idletasks()
win.geometry(f"+0+0")
def _rename_dialog(nazev: str, info_lines: list[str]) -> str | None:
"""
Spustí rename_dialog.py jako subprocess — vyhneme se Tkinter konfliktům s PyCharm.
Vrátí finální název (s .pdf) nebo None = přeskočit.
"""
import tempfile
data = {"nazev": nazev, "info_lines": info_lines}
tmp = Path(tempfile.mktemp(suffix=".json"))
tmp.write_text(json.dumps(data, ensure_ascii=False), encoding="utf-8")
dialog_script = Path(__file__).parent / "rename_dialog.py"
try:
proc = subprocess.run(
[sys.executable, str(dialog_script), str(tmp)],
capture_output=True, text=True, encoding="utf-8",
)
output = proc.stdout.strip()
if output:
return json.loads(output).get("value")
return None
finally:
tmp.unlink(missing_ok=True)
def print_verification(verif: dict, rc_from_scan: str):
"""Vypíše výsledek ověření proti Medicus."""
status = verif["status"]
patient = verif.get("patient")
if status == "ok":
print(f" ✓ Medicus: {patient['prijmeni']} {patient['jmeno']} | RČ {patient['rodcis']}")
elif status == "fuzzy":
rc_corr = verif["rc_corrected"]
print(f" ⚠ Medicus: RČ ze skenu '{rc_from_scan}' nenalezeno")
print(f" → Nalezen podobný pacient: {patient['prijmeni']} {patient['jmeno']} | RČ {patient['rodcis']}")
print(f" → Pravděpodobná oprava RČ: {rc_from_scan}{rc_corr} (OCR chyba)")
if len(verif.get("all_matches", [])) > 1:
print(f" → Další shody: {[m[0] for m in verif['all_matches'][1:]]}")
elif status == "not_found":
print(f" ✗ Medicus: RČ '{rc_from_scan}' nenalezeno ani při fuzzy hledání")
elif status == "offline":
print(f" — Medicus: nedostupný (offline), ověření přeskočeno")
def interactive_rename(pdf_path: Path, info: dict, verif: dict) -> bool:
"""
Otevře tkinter dialog pro schválení / opravu názvu.
Schválený soubor přesune do Processed/ a smaže z ToProcess/.
"""
rc = re.sub(r"\D", "", verif["patient"]["rodcis"] if verif.get("patient") else info.get("rodne_cislo") or "")
datum = info.get("datum_zpravy") or ""
duplicity = check_duplicates(rc, datum)
# Oprava RČ při fuzzy matchi
nazev = info.get("nazev_souboru")
if verif["status"] == "fuzzy" and verif.get("rc_corrected") and nazev:
rc_scan = re.sub(r"\D", "", info.get("rodne_cislo") or "")
nazev = nazev.replace(rc_scan, verif["rc_corrected"], 1)
print(f" → Název aktualizován s opraveným RČ")
# Sestavení info řádků pro dialog
rc_from_scan = re.sub(r"\D", "", info.get("rodne_cislo") or "")
status = verif["status"]
patient = verif.get("patient")
info_lines = []
if status == "ok":
info_lines.append(f"✓ Medicus: {patient['prijmeni']} {patient['jmeno']} | RČ {patient['rodcis']}")
elif status == "fuzzy":
info_lines.append(f"⚠ RČ ze skenu '{rc_from_scan}' → opraveno na {verif['rc_corrected']}")
info_lines.append(f" Pacient: {patient['prijmeni']} {patient['jmeno']} | RČ {patient['rodcis']}")
elif status == "not_found":
info_lines.append(f"✗ RČ '{rc_from_scan}' nenalezeno v Medicus")
else:
info_lines.append("— Medicus nedostupný (offline)")
if duplicity:
info_lines.append(f"⚠ DUPLICITA: {', '.join(duplicity)}")
print()
print("" * 70)
if nazev:
print(f" Navržený název: {nazev}")
print(" Otevírám dialog...")
odpoved = _rename_dialog(nazev or "", info_lines)
if odpoved is None:
print(" Přeskočeno.")
return False
if not odpoved.endswith(".pdf"):
odpoved += ".pdf"
final_name = sanitize_filename(odpoved)
if nazev and nazev != final_name:
save_correction(nazev, final_name)
if not final_name or final_name == ".pdf":
print(" Název je prázdný, přeskakuji.")
return False
dest = PROCESSED / final_name
if dest.exists():
print(f" VAROVÁNÍ: '{final_name}' již existuje v Processed, přeskakuji.")
return False
if pdf_path.suffix.lower() in (".jpg", ".jpeg", ".png"):
from jpg_to_pdf import image_to_pdf
image_to_pdf(pdf_path, dest, rotate_ccw=info.get("rotace") or 0)
else:
shutil.copy2(pdf_path, dest)
pdf_path.unlink()
print(f" ✓ Uloženo: Processed/{final_name}")
return True
# ─── Hlavní logika ────────────────────────────────────────────────────────────
def _start_preview_process(pdf_path: Path):
"""
Otevře náhled PDF jako samostatný subprocess (žádné tkinter threading problémy).
Pracuje s temp kopií — originál zůstane volný.
Vrátí funkci close() pro ukončení procesu.
"""
import tempfile
import shutil as _shutil
tmp = Path(tempfile.mktemp(suffix=pdf_path.suffix))
_shutil.copy2(pdf_path, tmp)
viewer = Path(__file__).parent / "preview_viewer.py"
proc = subprocess.Popen(
[sys.executable, str(viewer), str(tmp), "--delete-on-close"],
)
def close():
try:
proc.terminate()
proc.wait(timeout=3)
except Exception:
pass
try:
tmp.unlink(missing_ok=True)
except Exception:
pass
return close
def process_file(pdf_path: Path):
close_preview = _start_preview_process(pdf_path)
try:
info = extract_patient_info(str(pdf_path))
rc_from_scan = re.sub(r"\D", "", info.get("rodne_cislo") or "")
print(f" Ověřuji v Medicus (RČ: {rc_from_scan})...")
verif = verify_patient(rc_from_scan)
print_verification(verif, rc_from_scan)
interactive_rename(pdf_path, info, verif)
finally:
close_preview()
def process_folder(folder: Path):
pdf_files = sorted(f for f in folder.iterdir()
if f.suffix.lower() in (".pdf", ".jpg", ".jpeg", ".png"))
if not pdf_files:
print(f"Žádná PDF nenalezena v: {folder}")
return
print(f"Nalezeno {len(pdf_files)} PDF soubor(ů).\n")
for pdf_file in pdf_files:
try:
process_file(pdf_file)
except Exception as e:
print(f" CHYBA: {e}")
print("\nHotovo.")
if __name__ == "__main__":
if len(sys.argv) > 1:
target = Path(sys.argv[1])
else:
target = TO_PROCESS
PROCESSED.mkdir(exist_ok=True)
TO_PROCESS.mkdir(exist_ok=True)
if target.is_file() and target.suffix.lower() in (".pdf", ".jpg", ".jpeg", ".png"):
process_file(target)
elif target.is_dir():
process_folder(target)
else:
print("Použití: python extract_patient_info.py [soubor.pdf nebo složka]")
sys.exit(1)
Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

+538
View File
@@ -0,0 +1,538 @@
[
{
"original": "505228025 2026-05-14 Titlbachová, Božena [Žádanka předoperační vyšetření GYNA] [Předop. vyšetření, dg. N890, malý výkon A, anestezie CA].pdf",
"corrected": "505228025 2026-05-14 Titlbachová, Božena [žádanka předoperační vyšetření] [gynekologie, dg. N890, malý výkon A, anestezie CA].pdf"
},
{
"original": "6860241553 2026-02-12 Šímová, Helena [LZ neurologie] [VAS L páteře, iritačně zánikový radik sy L5/S1 vpravo, dg. M511].pdf",
"corrected": "6860241553 2026-02-12 Šímová, Helena [LZ neurologie] [VAS L páteře, po PRT pod CT, krásné zlepšení, iritačně zánikový radik sy L5/S1 vpravo, dg. M511].pdf"
},
{
"original": "6860241553 2026-02-10 Šímová, Helena [denzitometrie] [osteopenie, L1-4 T-score -1,4, krček fem. l T-1,8, r T-2,3].pdf",
"corrected": "6860241553 2026-02-10 Šímová, Helena [DXA] [osteopenie, L1-4 T-score -1.4, krček fem. l T-1.8, r T-2.3].pdf"
},
{
"original": "470629074 2026-03-31 Šebesta, Jaroslav [LZ kardiologie] [ECHO: EF 50%, hypokineza IVS a sp. stěny, dilatace LS, MR 1-2/4].pdf",
"corrected": "470629074 2026-03-31 Šebesta, Jaroslav [LZ kardiologie] [ECHO: EF 50%, hypokineza IVS a sp. stěny, dilatace LS, MR 1-2/4, indikace lázně II_3].pdf"
},
{
"original": "505809020 2026-01-14 Šebestová, Zdenka [LZ ortopedie] [TEP kyčle l.sin., kontrola 6 týdnů, chůze 2FH, doporučení lázně].pdf",
"corrected": "505809020 2026-01-14 Šebestová, Zdenka [LZ ortopedie] [TEP kyčle l.sin., kontrola 6 týdnů, chůze 2FH, indikace lázně VII_10].pdf"
},
{
"original": "505809020 2025-12-10 Šebestová, Zdenka [LZ ortopedie] [Fct. colli femor. l.sin., TEP kyčle l.sin., propuštění na RHB].pdf",
"corrected": "505809020 2025-12-10 Šebestová, Zdenka [PZ ortopedie] [29NOV-10DEC2025 Fct. colli femor. l.sin., TEP kyčle l.sin., propuštění na RHB].pdf"
},
{
"original": "7308100426 2026-04-15 Janda, Martin [Laboratoř] [hypercholesterolemie, S_Urea 9,18, glukóza 5,9, moč erytrocyty 6/ul].pdf",
"corrected": "7308100426 2026-04-15 Janda, Martin [Laboratoř] [Z000 hypercholesterolemie, S_Urea 9,18, glukóza 5,9, moč erytrocyty 6ul].pdf"
},
{
"original": "7454230454 2026-04-15 Zíková, Jana [Laboratoř] [moč kultivace negativní, dg. N309].pdf",
"corrected": "7454230454 2026-04-15 Zíková, Jana [Laboratoř] [N309 moč kultivace negativní].pdf"
},
{
"original": "0460142969 2026-04-15 Strnadová, Natálie [Laboratoř] [E660 koagulace: PT ratio 1,09, INR 1,10, aPTT 1,11, Fibrinogen 3,01].pdf",
"corrected": "0460142969 2026-04-15 Strnadová, Natálie [Laboratoř] [E660 koagulace PT ratio 1.09, INR 1.10, aPTT 1.11, Fibrinogen 3.01, parametry koagulace normální].pdf"
},
{
"original": "5855280013 2026-04-15 Holubová, Daniela [Laboratoř] [Z000 CKD G2, DM, P_Glukóza 6.8, HbA1c 44, TG 1.77, leukocyty 11.9, moč bakterie].pdf",
"corrected": "5855280013 2026-04-15 Holubová, Daniela [Laboratoř] [Z000 CKD G2, DM (prediabetes), P_Glukóza 6.8, HbA1c 44, TG 1.77, leukocyty 11.9, moč bakterie záplava].pdf"
},
{
"original": "5954110184 2026-04-15 Holečková, Hana [Laboratoř] [E119 CKD G2, P_Glukóza 6.7, HbA1c 44, bili 28.13, moč hlen].pdf",
"corrected": "5954110184 2026-04-15 Holečková, Hana [Laboratoř] [E119 CKD G2, P_Glukóza 6.7 (prediabetes), HbA1c 44, bili 28.13, moč hlen].pdf"
},
{
"original": "7556220452 2026-04-09 Štěpánová, Lenka [Laboratoř] [K20 Anti HAV IgM+total pozit, CKD G2, bili konj 5.58].pdf",
"corrected": "7556220452 2026-04-09 Štěpánová, Lenka [Laboratoř] [K20 Anti HAV IgM+, total pozit, postoèkovací protilátky, CKD G2, bili konj 5.58].pdf"
},
{
"original": "5862236435 2026-04-14 Kopřivová, Erika [Laboratoř] [Z000 CKD G2, hypercholesterol, TG 2.16, HbA1c 50, ALT 0.80, moč leukocyty 75ul].pdf",
"corrected": "5862236435 2026-04-14 Kopřivová, Erika [Laboratoř] [Z000 CKD G2, smíšená hypercholesterolémie, TG 2.16, HbA1c 50 (prediabetes), ALT 0.80, moč leukocyty 75ul].pdf"
},
{
"original": "6258130637 2026-04-14 Hofmannová, Oldřiška [Laboratoř] [D500 CKD G2, cholesterol 6.38, LDL 4.37, glukóza 7.1, moč bakterie záplava, nitrity poz].pdf",
"corrected": "6258130637 2026-04-14 Hofmannová, Oldřiška [Laboratoř] [D500 CKD G2, èistá hypercholesterolémie 6.38, LDL 4.37, glukóza 7.1 (DM), moč bakterie záplava, nitrity+].pdf"
},
{
"original": "8256060021 2026-04-10 Karešová, Barbora [Laboratoř] [Z000 hypercholesterol 6.54, LDL 3.73, ALT 1.00, CKD G2, moč erytrocyty 11ul].pdf",
"corrected": "8256060021 2026-04-10 Karešová, Barbora [Laboratoř] [Z000 čistá hypercholesterolémie 6.54, LDL 3.73, ALT 1.00, CKD G2, moč erytrocyty 11ul].pdf"
},
{
"original": "400424003 2026-04-07 Faměra, Jiří [Laboratoř] [I10 CKD G3b, anemie, S_Urea 16.45, kreatinin 155, glukóza 5.7].pdf",
"corrected": "400424003 2026-04-07 Faměra, Jiří [Laboratoř] [I10 CKD G3b, anemie 116, S_Urea 16.45, kreatinin 155, glukóza 5.7].pdf"
},
{
"original": "0460142969 2026-04-08 Strnadová, Natálie [Laboratoř] [E660 S_Urea 2.35, AST 0.18, CKD G1, krevní obraz v normě].pdf",
"corrected": "0460142969 2026-04-08 Strnadová, Natálie [Laboratoř] [E660 krevní obraz v normě].pdf"
},
{
"original": "6055052157 2026-04-08 Frýdlová, Jana [Laboratoř] [Z000 CKD G2, prediabetes HbA1c 43, GGT 6.48, cholesterol 5.69, moč leukocyty 42ul].pdf",
"corrected": "6055052157 2026-04-08 Frýdlová, Jana [Laboratoř] [Z000 CKD G2, prediabetes HbA1c 43, GGT 6.48 (susp. alkohol), čistá hypercholesterolémie 5.69, moč leukocyty 42ul].pdf"
},
{
"original": "7160239911 2026-04-01 Čenanovičová Krkičov, Sanja [Laboratoř] [E789 čistá hypercholesterolémie 6.68, LDL 4.76, Non-HDL 5.0].pdf",
"corrected": "7160239911 2026-04-01 Čenanovičová Krkičov, Sanja [Laboratoř] [E789 čistá hypercholesterolémie 6.68, LDL 4.76, Non-HDL 5.0, CK v pořádku].pdf"
},
{
"original": "6212231861 2026-04-01 Novotný, Vladimír [Laboratoř] [D50 sideropenní anemie 105, Fe 5.9, TIBC 88.2, trombocytóza 609, mikrocytóza].pdf",
"corrected": "6212231861 2026-04-01 Novotný, Vladimír [Laboratoř] [D50 sideropenická anémie 105, Fe 5.9, TIBC 88.2, trombocytóza 609 (známý stav), mikrocytóza].pdf"
},
{
"original": "466103013 2026-04-01 Sixtová, Blanka [Laboratoř] [C188 CKD G3a, urea 12.32, kyselina močová 425, GGT 1.41, trombocytopenie 138].pdf",
"corrected": "466103013 2026-04-01 Sixtová, Blanka [Laboratoř] [C188 CKD G3a, urea 12.32, kyselina močová 425, GGT 1.41, trombocytopenie 138, železa je stále nedostatek].pdf"
},
{
"original": "5601090550 2026-02-25 Psohlavec, Miroslav [Laboratoř] [D53 B12 134 (nízký), folát 7.90, albumin 41.3, celk. bílk. 78].pdf",
"corrected": "5601090550 2026-02-25 Psohlavec, Miroslav [Laboratoř] [D53 B12 134 (nízký), folát 7.90 (nízký), albumin 41.3 OK, celk. bílk. 78 OK].pdf"
},
{
"original": "0061010422 2026-04-10 Brabcová, Barbora [Laboratoř] [Z000 foláty 9.4 (nízké), CKD G2, TSH 2.030, krevní obraz v normě].pdf",
"corrected": "0061010422 2026-04-10 Brabcová, Barbora [Laboratoř] [Z000 foláty 9.4 (nízké), B12 nízké, CKD G2, TSH 2.030, krevní obraz v normě].pdf"
},
{
"original": "486020212 2025-03-27 Krausová, Anna [EGD] [K30 biliární duodenogastrický reflux, jinak přiměřený nález].pdf",
"corrected": "486020212 2025-03-27 Krausová, Anna [gastroskopie] [K30 biliární duodenogastrický reflux, jinak přiměřený nález].pdf"
},
{
"original": "466225409 2026-03-09 Teršová, Eva [Laboratoř] [E118 CKD G3b, glukóza 10.5, HbA1c 34, C-peptid nízký, LDL 3.32, TG 1.52].pdf",
"corrected": "466225409 2026-03-09 Teršová, Eva [Laboratoř] [E118 CKD G3b, glukóza 10.5 (DM), HbA1c 34, C-peptid nízký, LDL 3.32, TG 1.52].pdf"
},
{
"original": "476027162 2026-02-25 Buňková, Zuzana [LZ endokrinologie] [E063 kompenz. imunogenní hypothyreoza, uzlová přestavba, TSH 0.818].pdf",
"corrected": "476027162 2026-02-25 Buňková, Zuzana [LZ endokrinologie] [E063 kompenz. imunogenní hypothyreoza, uzlová přestavba, TSH 0.818, na substituci].pdf"
},
{
"original": "385312025 2026-03-30 Aubrechtová, Iva [medikace] [Furon, Eliquis, Cordarone, Digoxin, Tezeo, Dilatrend, Betaserc, Xalacom].pdf",
"corrected": "385312025 2026-03-30 Aubrechtová, Iva [přehled užívané medikace] [od pacientky].pdf"
},
{
"original": "480529193 2026-01-22 Klikorka, Václav [LZ kardiologie] [I482, EF LK 60%, konc. hypertrofie LK, diastol. dysfunkce I, Mi regurg. 2+].pdf",
"corrected": "480529193 2026-01-22 Klikorka, Václav [LZ kardiologie] [I482, EF LK 60%, konc. hypertrofie LK, diastol. dysfunkce I, Mi regurg. 2+, kontrola +6m].pdf"
},
{
"original": "480529193 2025-09-04 Klikorka, Václav [LZ oční] [Z961 pseudofakie, ERM ok. dx., kontrola OCTA za 4M].pdf",
"corrected": "480529193 2025-09-04 Klikorka, Václav [LZ oční] [Z961 pseudofakie, ERM ok. dx., kontrola OCTA za 4m].pdf"
},
{
"original": "6258130637 2026-02-23 Hofmannová, Oldřiška [LZ interní] [Z039 atypická bolest na hrudi, epigastralgie, vertigo, BNH].pdf",
"corrected": "6258130637 2026-02-23 Hofmannová, Oldřiška [LZ interní] [Z039 atypická bolest na hrudi, přivezla RZS, epigastralgie, vertigo, bez známek ICHS].pdf"
},
{
"original": "436212054 2026-04-01 Těšitelová, Jana [LZ rehabilitační] [M159 st.p. TEP gen l.dx., TEP coxae l.sin., amputace PDK, inkontinence II.st.].pdf",
"corrected": "436212054 2026-04-01 Těšitelová, Jana [PZ Lázně Velichovky] [11MAR-01APR2026, indikace VII_8, M159 st.p. TEP gen l.dx., TEP coxae l.sin., amputace PDK, inkontinence II.st., všechno v lázních OK].pdf"
},
{
"original": "6104260668 2026-04-09 Neuwirth, Richard [LZ neurologie] [R42 vertigo, etanol 1.19 g/l, CT mozku bez ak. změn, kongenit. nystagmus].pdf",
"corrected": "6104260668 2026-04-09 Neuwirth, Richard [LZ neurologie] [RZS, R42 vertigo, etanol 1.19 gl, CT mozku bez ak. změn, kongenit. nystagmus].pdf"
},
{
"original": "5954110184 2015-02-10 Holečková, Hana [LZ radiační onkologie] [C50 l.dx., st.p. ablaci 2004, kompletní remise, MMG norm.].pdf",
"corrected": "5954110184 2015-02-10 Holečková, Hana [LZ radiační onkologie] [vyšetření pro posudkovou komisi, C50 l.dx., st.p. ablaci 2004, kompletní remise, MMG norm.].pdf"
},
{
"original": "5954110184 2011-11-03 Holečková, Hana [LZ chirurgie] [K800 cholecystolithiasis, cholecystektomie laparoskopicky 31.10.2011].pdf",
"corrected": "5954110184 2011-11-03 Holečková, Hana [PZ chirurgie] [30OCT-03NOV2025, K800 cholecystolithiasis, cholecystektomie laparoskopicky 31.10.2011].pdf"
},
{
"original": "5954110184 2023-09-12 Holečková, Hana [LZ endokrinologie] [eutyreoza, malá štítnice, stacion. uzel v PL benigní, BMI 26.7].pdf",
"corrected": "5954110184 2023-09-12 Holečková, Hana [LZ endokrinologie] [eutyreoza, malá štítnice, stacion. uzel v PL benigní, BMI 26.7, kontrola +1r].pdf"
},
{
"original": "5954110184 2023-08-28 Holečková, Hana [LZ rehabilitační] [M7737 ostruha patní kosti, pes planus, ultrazvuk 10x].pdf",
"corrected": "5954110184 2023-08-28 Holečková, Hana [LZ rehabilitace] [M7737 ostruha patní kosti, pes planus, ultrazvuk 10x].pdf"
},
{
"original": "285703963 2026-04-08 Bartáková, Hilde [Laboratoř] [biochemie, KO, Fe: mírná anémie, kreatinin↑, CKD G3b, B12↑].pdf",
"corrected": "285703963 2026-04-08 Bartáková, Hilde [Laboratoř] [biochemie, KO, Fe mírná anémie, kreatinin↑, CKD G3b, B12↑].pdf"
},
{
"original": "495831175 2026-04-07 Kazdová, Daniela [Laboratoř] [biochemie, hepatitidy: ALT↑, GGT↑, ALP↑, anti HAV total pozitivní].pdf",
"corrected": "495831175 2026-04-07 Kazdová, Daniela [Laboratoř] [biochemie, hepatitidy ALT↑, GGT↑, ALP↑, anti HAV total pozitivní, antiHAV IgM negativní, anamnestické protilátky].pdf"
},
{
"original": "5954110184 2024-04-23 Holečková, Hana [LZ radiační onkologie] [C50 l.dx., st.p. ablaci 2004, CR 20 let, MMG benigní, předání PL].pdf",
"corrected": "5954110184 2024-04-23 Holečková, Hana [LZ radiační onkologie] [C50 l.dx., st.p. ablaci 2004, CR 20 let, MMG benigní, předání PL, konec jejich dispenzarizace].pdf"
},
{
"original": "6104260668 2026-04-08 Neuwirth, Richard [Laboratoř] [biochemie, moč: cholesterol↑, TG↑, HDL↓, glukóza↑, CKD G1 A1].pdf",
"corrected": "6104260668 2026-04-08 Neuwirth, Richard [Laboratoř] [biochemie, moč cholesterol↑, TG↑, HDL↓, glukóza↑, CKD G1 A1].pdf"
},
{
"original": "475915002 2026-04-13 Protivová, Lidmila [Laboratoř] [biochemie, moč, KO cholesterol↑, TG↑, glukóza↑, CKD G2, leukocyty↓].pdf",
"corrected": "475915002 2026-04-13 Protivová, Lidmila [Laboratoř] [biochemie, moč, KO smíšená hyperlipidémie, cholesterol↑, TG↑, glukóza↑ (prediabetes), CKD G2, leukocyty↓].pdf"
},
{
"original": "5954110184 2024-03-25 Holečková, Hana [EKG vyšetření] [sinusový rytmus 76/min, intermed poloha, fyziologický záznam].pdf",
"corrected": "5954110184 2024-03-25 Holečková, Hana [EKG] [sinusový rytmus 76min, intermed poloha, fyziologický záznam].pdf"
},
{
"original": "5954110184 2020-03-10 Holečková, Hana [EKG] [sinusový rytmus 64/min, intermed poloha, fyziologický záznam].pdf",
"corrected": "5954110184 2020-03-10 Holečková, Hana [EKG] [sinusový rytmus 64min, intermed poloha, fyziologický záznam].pdf"
},
{
"original": "null 2026-04-20 null [Laboratoř] [moč LEU+2 125 WBC/uL, ERY+3 200 RBC/uL, pH 6, SG 1.005].pdf",
"corrected": "475915054 Žabová, Vìra 2026-04-20 [uritex] [moč LEU+2 125 WBCuL, ERY+3 200 RBCuL, pH 6, SG 1.005].pdf"
},
{
"original": "7409240399 2026-04-17 Bukvář, Martin [LZ ortopedie] [M7126 cystis politelais gensu, st.p. achilodyniam, punkce 14ml, Diop+Kort].pdf",
"corrected": "7409240399 2026-04-17 Bukvář, Martin [LZ ortopedie] [M7126 cystis politelais genu, st.p. achilodyniam, punkce 14ml, aplikace kortikoidu].pdf"
},
{
"original": "515705039 2026-04-01 Cahová, Daniela [LZ neurologie] [G20 Parkinson, klidový třes LHK+LDK, wearing off, bolest pravého ramene, m.deltoideus].pdf",
"corrected": "515705039 2026-04-01 Cahová, Daniela [LZ neurologie] [G20 Parkinson, klidový třes LHK+LDK, wearing off, bolest pravého ramene, m.deltoideus, ad MRI].pdf"
},
{
"original": "7862150351 2018-10-30 Braunspergerová, Eva [RTG LS páteře] [dextrokonvexní skolióza, lordóza, osteochondróza L5-S1, spina bifida oculta S1].pdf",
"corrected": "7862150351 2018-10-30 Braunspergerová, Eva [RTG LS páteře] [dextrokonvexní skolióza, lordóza, osteochondróza L5-S1, spina bifida oculta S1, pooperaèní svorky].pdf"
},
{
"original": "7862150351 2008-09-23 Braunspergerová, Eva [PZ infekční] [hepatitida A, ikterus, BLR↑, HAV IgM poz., anti HCV neg., zlepšení].pdf",
"corrected": "7862150351 2008-09-23 Braunspergerová, Eva [PZ infekce] [19-23SEP2008, hepatitida A, ikterus, BLR↑, HAV IgM poz., anti HCV neg., zlepšení].pdf"
},
{
"original": "5855280013 2023-06-12 Holubová, Daniela [LZ endokrinologie] [adenom l.nadledviny 12x17x12mm, DM2 PAD+inzulin, dyslipidémie].pdf",
"corrected": "5855280013 2023-06-12 Holubová, Daniela [LZ endokrinologie] [adenom l.nadledviny 12x17x12mm, DM2 PAD+inzulin, dyslipidémie, konzultace s výsledky].pdf"
},
{
"original": "5855280013 2026-03-03 Holubová, Daniela [CT břicha] [kalykolitiáza l.ledviny, stacionární adenom l.nadledviny 10mm].pdf",
"corrected": "5855280013 2026-02-27 Holubová, Daniela [CT břicha] [kalykolitiáza l.ledviny, stacionární adenom l.nadledviny 10mm, vyšetøeno pro hematurii].pdf"
},
{
"original": "7862150351 2024-06-12 Braunspergerová, Eva [EKG] [sinusový rytmus 60min, hraniční i.v. vedení, bez čerstvých změn].pdf",
"corrected": "7862150351 2024-06-12 Braunspergerová, Eva [EKG] [sinusový rytmus 60min, hraniční i.v. vedení, bez čerstvých změn, stacionární].pdf"
},
{
"original": "7755260271 2026-04-16 Straková, Barbara [žádost o předání zdravotních informací] [převzetí do péče, žádost o zaslání zdravotní dokumentace].pdf",
"corrected": "7755260271 2026-04-16 Straková, Barbara [žádost o předání zdravotních informací] [převzetí do péče, žádost o zaslání zdravotní dokumentace, VeleòMedic s.r.o.].pdf"
},
{
"original": "7602044780 2026-04-18 Suchý, Vladimír [PZ nefrologie] [SLE+APS, renální biopsie, 2.puls Endoxan, VRE/ESBL, defekt PDK].pdf",
"corrected": "7602044780 2026-04-18 Suchý, Vladimír [PZ nefrologie] [14-18APR2026, SLE+APS, renální biopsie, 2.puls Endoxan, VREESBL, defekt PDK].pdf"
},
{
"original": "6709150613 2026-04-15 Rutrle, Petr [LZ ORL] [laryngitis chr., vestibul.sy vpravo, percepční porucha sluchu bil.].pdf",
"corrected": "6709150613 2026-04-15 Rutrle, Petr [LZ ORL] [laryngitis chr., vestibul.sy vpravo, percepční porucha sluchu bil., ad sono karotid, Rp Helicid, zakoupí Tanakan].pdf"
},
{
"original": "460614110 null Galus, Karel [LZ nefrologie] [CHOPN III.st, DM2, ICHS, CKD-EPI 35ml/s/kor, incipientní dia nefropatie].pdf",
"corrected": "460614110 2026-04-09 Galus, Karel [LZ nefrologie] [CHOPN III.st, DM2, ICHS, CKD-EPI 35mlskor, incipientní dia nefropatie].pdf"
},
{
"original": "435624102 2026-03-31 Hovorková, Eva [PZ ortopedie] [26-31MAR2026, gonartróza l.dx., TEP kolene, Zimmer Nexgen CR F/6/10].pdf",
"corrected": "435624102 2026-03-31 Hovorková, Eva [PZ ortopedie] [26-31MAR2026, gonartróza l.dx., TEP kolene].pdf"
},
{
"original": "470629074 2026-04-21 Šebesta, Jaroslav [oznámení ZP správní řízení] [zahájení správního řízení, LRPéče indikace II/3 hypertenzní choroba II-III.st].pdf",
"corrected": "470629074 2026-04-21 Šebesta, Jaroslav [oznámení ZP správní řízení] [zahájení správního řízení, návrh lázně, indikace II3 hypertenzní choroba II-III.st].pdf"
},
{
"original": "5503040026 2026-02-17 Koubek, Jiří [LZ kardiologie] [ECHO: EF 65%, konc.hypertrofie, diastol.dysfunkce I.st, Bevimlar 20mg].pdf",
"corrected": "5503040026 2026-02-17 Koubek, Jiří [LZ kardiologie] [ECHO EF 65%, konc.hypertrofie, diastol.dysfunkce I.st, Bevimlar 20mg].pdf"
},
{
"original": "480529219 2026-04-17 Nytra, Vlastimil [Laboratoř] [osteomarkery, Ca, P, ALP, vit.D 67,1 snížen, PTH, Beta-CrossLaps].pdf",
"corrected": "480529219 2026-04-17 Nytra, Vlastimil [Laboratoř] [osteomarkery, Ca, P, ALP, vit.D 67.1 snížen, PTH, Beta-CrossLaps].pdf"
},
{
"original": "435520110 2026-04-20 Nechodomová, Marie [sonografie břicha] [hypersekr.žaludku, lipomatoza pankreatu, steatoza jat., cholecystolithiaza, splenomegalie].pdf",
"corrected": "435520110 2026-04-20 Nechodomová, Marie [sonografie břicha] [zesílení stěny žaludku - dovyšetřit, hypersekr.žaludku, lipomatoza pankreatu, steatoza jat., cholecystolithiaza, splenomegalie].pdf"
},
{
"original": "6903020080 2026-04-20 Novotný, Martin [Laboratoř] [cholesterol 5.54, LDL 3.25, TG 2.06, glukoza 6.1, HbA1c 38].pdf",
"corrected": "6903020080 2026-04-20 Novotný, Martin [Laboratoř] [smíšená hyperlipidémie, prediabetes, cholesterol 5.54, LDL 3.25, TG 2.06, glukoza 6.1, HbA1c 38].pdf"
},
{
"original": "480529219 2026-04-17 Nytra, Vlastimil [Laboratoř] [ELFO bílkovin, bílkovina 69.0, albumin 0.581, gama-globuliny 0.125].pdf",
"corrected": "480529219 2026-04-17 Nytra, Vlastimil [Laboratoř] [ELFO bílkovin OK, bílkovina 69.0, albumin 0.581, gama-globuliny 0.125].pdf"
},
{
"original": "5556046672 2026-04-07 Simionová, Lýdia [Laboratoř] [močový konkrement, whewellit 100%, 6x3mm, hnědý, bradavičnatý].pdf",
"corrected": "5556046672 2026-04-07 Simionová, Lýdia [Laboratoř] [močový konkrement analýza, whewellit 100%, 6x3mm, hnědý, bradavičnatý].pdf"
},
{
"original": "510802325 2026-04-20 Simion, Vladimír [LZ chirurgie] [chronický vřed kůže, TMT amputace IV.+V.prstu PDK, defekt LDK 5x3.5cm].pdf",
"corrected": "510802325 2026-04-20 Simion, Vladimír [LZ chirurgie] [chronický vřed kůže, TMT amputace IV.+V.prstu PDK, defekt LDK 5x3.5cm, DP 3xt].pdf"
},
{
"original": "436114002 2026-03-17 Petrovská, Eliška [LZ kardiologie] [fibrilace síní paroxysmální, sinus, st.p.kardioverzi, rivaroxaban].pdf",
"corrected": "436114002 2026-03-17 Petrovská, Eliška [LZ kardiologie] [fibrilace síní paroxysmální, sinus, st.p.kardioverzi, rivaroxaban, ad Holter EKG, bisoprolol vysadí].pdf"
},
{
"original": "436114002 2026-03-14 Petrovská, Eliška [LZ interna] [fibrilace síní paroxysmální, kardioverze, sinusový rytmus, rivaroxaban].pdf",
"corrected": "436114002 2026-03-14 Petrovská, Eliška [LZ interna urgent] [fibrilace síní paroxysmální, kardioverze, sinusový rytmus, rivaroxaban].pdf"
},
{
"original": "6008091738 2026-04-20 Nikitin, Petro [Laboratoř] [urea 9.47 zvýš, CKD-EPI G2, glukoza 6.6, osmolalita 296, MCV 81.5].pdf",
"corrected": "6008091738 2026-04-20 Nikitin, Petro [Laboratoř] [Z000 prediabetes, mikrocyty, urea 9.47 zvýš, CKD-EPI G2, glukoza 6.6, osmolalita 296, MCV 81.5].pdf"
},
{
"original": "440802018 2026-04-20 Havelka, Miroslav [Laboratoř] [CKD-EPI G2, NT-proBNP 6128 zvýš, CRP 6.6, MCV 81.8, MCHC 314].pdf",
"corrected": "440802018 2026-04-20 Havelka, Miroslav [Laboratoř] [srdeční selhání, mikrocyty, CKD-EPI G2, NT-proBNP 6128 zvýš, CRP 6.6, MCV 81.8, MCHC 314].pdf"
},
{
"original": "7857260422 2023-02-28 Jindrová, Kateřina [LZ ORL] [st.p. incizi inflam aterom P tváře - zhojeno, extirpace atheromu P tváře].pdf",
"corrected": "7857260422 2023-02-28 Jindrová, Kateřina [LZ ORL] [st.p. incizi inflam aterom P tváře - zhojeno, extirpace atheromu P tváře domluveno].pdf"
},
{
"original": "7857260422 2021-05-06 Jindrová, Kateřina [LZ angiologie] [CVI II. st. dle CEAP C4, ortostáza, flebitida/flebotrombóza bilat. neprokázána].pdf",
"corrected": "7857260422 2021-05-06 Jindrová, Kateřina [LZ angiologie] [CVI II. st. dle CEAP C4, ortostáza, flebitidaflebotrombóza bilat. neprokázána].pdf"
},
{
"original": "7857260422 2021-05-20 Jindrová, Kateřina [LZ neurologie] [VAS C-pá, porucha statodynamiky C úseku, tinitus auric. bilat., ad rehab].pdf",
"corrected": "7857260422 2021-05-20 Jindrová, Kateřina [LZ neurologie] [VAS Cp, porucha statodynamiky C úseku, tinitus auric. bilat., ad RHB].pdf"
},
{
"original": "7857260422 2024-02-12 Jindrová, Kateřina [EKG] [sinusový rytmus 70/min, semivertik poloha, osa 55st, fyziol záznam].pdf",
"corrected": "7857260422 2024-02-12 Jindrová, Kateřina [EKG] [sinusový rytmus 70min, semivertik poloha, osa 55st, fyziol záznam].pdf"
},
{
"original": "5958260660 2026-02-04 Masopustová, Ivana [LZ interna] [viroza 1/25, RTH, CRP norm, klacid, únava, kašel, dušnost, zlepšuje se].pdf",
"corrected": "5958260660 2026-02-04 Masopustová, Ivana [LZ interna] [viroza 125, RTH, CRP norm, klacid, únava, kašel, dušnost, zlepšuje se].pdf"
},
{
"original": "5958260660 2026-03-03 Masopustová, Ivana [LZ kardiologie] [Benig. kom. extrasystolie, Art. hypertenze komp., HLP, thyreopathie disp. + OA].pdf",
"corrected": "5958260660 2026-03-03 Masopustová, Ivana [LZ kardiologie] [Benig. kom. extrasystolie, Art. hypertenze komp., HLP, thyreopathie disp. + OA, důvodu nerozumím].pdf"
},
{
"original": "7857260422 2026-04-21 Jindrová, Kateřina [Laboratoř] [Z000, erytrocyty 5.27 zvýš, hemoglobin 161 zvýš, hematokrit 0.475 zvýš].pdf",
"corrected": "7857260422 2026-04-21 Jindrová, Kateřina [Laboratoř] [Z000, erytrocyty 5.27 zvýš, hemoglobin 161 zvýš, hematokrit 0.475 zvýš, nic zvláštního].pdf"
},
{
"original": "7555270085 2026-04-20 Křížová, Lucie [Laboratoř] [CKD-EPI G2, hemoglobin 113 sníž, MCV 78 sníž, MCH 23.9 sníž, MCHC 307 sníž].pdf",
"corrected": "7555270085 2026-04-20 Křížová, Lucie [Laboratoř] [E119, CKD-EPI G2, hemoglobin 113 sníž, MCV 78 sníž, MCH 23.9 sníž, MCHC 307 sníž, sideropenická anémie].pdf"
},
{
"original": "505218025 2026-04-14 Beznosková, Milena [LZ diabetologie] [DM 2.typu, zlepšení kompenzace, HbA1c 49 mmol/mol, léčba PAD a dieta].pdf",
"corrected": "505218025 2026-04-14 Beznosková, Milena [LZ diabetologie] [DM 2.typu, zlepšení kompenzace, HbA1c 49 mmolmol, léčba PAD a dieta].pdf"
},
{
"original": "495524246 2026-03-30 Dusilová, Jana [LZ urologie] [RCC pT1G1 st.p. NE I26, restaging: bez obtíží, nál. přiměř., CT s kontrastem plán].pdf",
"corrected": "495524246 2026-03-30 Dusilová, Jana [LZ urologie] [RCC pT1G1 st.p. NE I26, restaging bez obtíží, nál. přiměř., CT s kontrastem plán].pdf"
},
{
"original": "5452020420 2026-04-20 Uhlířová, Jana [RTG LS páteře] [anterolistéza L5 11mm gr.2, snížení disků L4-S1, spondylofyty L2-S1, spondyloartróza bilat.].pdf",
"corrected": "5452020420 2026-04-20 Uhlířová, Jana [RTG LSp] [revize nálezu, anterolistéza L5 11mm gr.2, snížení disků L4-S1, spondylofyty L2-S1, spondyloartróza bilat.].pdf"
},
{
"original": "9451210054 2026-04-21 Bódisová, Barbara [LZ interna] [bolest Thp po zvedání břemene, DDimer slabě poz., EKG ektop. síňový rytmus].pdf",
"corrected": "9451210054 2026-04-21 Bódisová, Barbara [LZ interna] [bolest Thp po zvedání břemene, DDimer slabě poz., EKG ektop. síňový rytmus, není kardiologické].pdf"
},
{
"original": "6761150341 2026-04-21 Písaříková, Helena [Laboratoř] [Z000, CKD-EPI G2, cholesterol 5.76 zvýš, Non-HDL 3.9 zvýš, glukóza 5.7 zvýš].pdf",
"corrected": "6761150341 2026-04-21 Písaříková, Helena [Laboratoř] [Z000, CKD-EPI G2, cholesterol 5.76 zvýš, Non-HDL 3.9 zvýš, glukóza 5.7 zvýš, prediabetes, hypercholesterolémie].pdf"
},
{
"original": "5558270113 2026-03-03 Knejslíková, Alena [LZ radiační onkologie] [pokračuje v CHRT, sliznice G0, urogen G0, dolní GIT G0, další CHT 12.3.26].pdf",
"corrected": "5558270113 2026-03-03 Knejslíková, Alena [LZ radiační onkologie] [kontrola při radioterapii, pokračuje v CHRT, sliznice G0, urogen G0, dolní GIT G0, další CHT 12.3.26].pdf"
},
{
"original": "5558270113 2026-02-12 Knejslíková, Alena [LZ radiační onkologie] [Ca endometria pT1b pN1 FIGO IIIC1, indik. adjuv. CHRT PORTEC3, CHT CDDP+CBDCA+PTX].pdf",
"corrected": "5558270113 2026-02-12 Knejslíková, Alena [LZ radiační onkologie] [chemopohovor, Ca endometria pT1b pN1 FIGO IIIC1, indik. adjuv. CHRT PORTEC3, CHT CDDP+CBDCA+PTX].pdf"
},
{
"original": "465201175 2026-02-05 Voříšková, Helena [PZ kardiologie] [FIS a Flutter síní, paroxysmus Flutteru, AK Clexane, art. hypertenze].pdf",
"corrected": "465201175 2026-02-05 Voříšková, Helena [PZ kardiologie] [4-5FEB2026, FIS a Flutter síní, paroxysmus Flutteru, AK Clexane, art. hypertenze].pdf"
},
{
"original": "465201175 2026-01-07 Voříšková, Helena [PZ gynekologie-porodnictví] [I480 paroxyzmální FiS, pád, bolest kyčlí a levého hemithoraxu, analgetika].pdf",
"corrected": "465201175 2026-01-07 Voříšková, Helena [PZ gynekologie] [z interny pro lůžkovou tíseň, I480 paroxyzmální FiS, pád, bolest kyčlí a levého hemithoraxu, analgetika].pdf"
},
{
"original": "470916013 2026-04-14 Dvořák, Josef [LZ plicní] [IPF dg. HRCT+klinika, MDT 2/2024, Ofev 100mg, Vigantol 3 kapky].pdf",
"corrected": "470916013 2026-04-14 Dvořák, Josef [LZ plicní] [IPF dg. HRCT+klinika, MDT 22024, Ofev 100mg, Vigantol 3 kapky, ad kožní].pdf"
},
{
"original": "470916013 2026-04-02 Dvořák, Josef [Návrh na lázeňskou léčbu] [intersticiální plicní fibróza, indikace V/6, J84.1, Luhačovice+Mariánské Lázně].pdf",
"corrected": "470916013 2026-04-02 Dvořák, Josef [Návrh na lázeňskou léčbu] [intersticiální plicní fibróza, indikace V6, J84.1, Luhačovice+Mariánské Lázně].pdf"
},
{
"original": "470916013 2026-04-02 Dvořák, Josef [Návrh na lázeňskou léčbu] [intersticiální plicní fibróza, indikace V6, J84.1, Luhačovice+Mariánské Lázně].pdf",
"corrected": "470916013 2026-04-02 Dvořák, Josef [Návrh na lázeňskou léčbu příloha] [intersticiální plicní fibróza, indikace V6, J84.1, Luhačovice+Mariánské Lázně].pdf"
},
{
"original": "470916013 2026-04-21 Dvořák, Josef [LZ interna] [revize před lázněmi, IPF, AVNRT, AH, DM2, dyslipidémie, hypotyreóza].pdf",
"corrected": "470916013 2026-04-21 Dvořák, Josef [LZ interna] [vyšetření před lázněmi, IPF, AVNRT, AH, DM2, dyslipidémie, hypotyreóza].pdf"
},
{
"original": "445318078 2026-04-23 Kusáková, Jaroslava [LZ revmatologie] [gonartróza III-IV st., susp. atypická PMR, klesající zánět, v plánu TEP 5/2026].pdf",
"corrected": "445318078 2026-04-23 Kusáková, Jaroslava [LZ revmatologie] [gonartróza III-IV st., susp. atypická PMR, klesající zánět, v plánu TEP 52026].pdf"
},
{
"original": "5521946540 2025-12-30 Peterková, Eliška [PZ chirurgie] [S02.00 Fissura calvae, pád na eskalátoru, odlomení kost. fragmentu okcipitalně].pdf",
"corrected": "5521946540 2025-12-30 Peterková, Eliška [PZ chirurgie] [29-30DEC2025, pád v metru, bezvìdomí, S02.00 Fissura calvae, pád na eskalátoru, odlomení kost. fragmentu okcipitalně].pdf"
},
{
"original": "0552194654 2025-11-07 Peterková, Eliška [PZ psychiatrie] [F432, TS intox. venlafaxinem, 5x Epi záchvat, emočně nestab. osobnost].pdf",
"corrected": "0552194654 2025-11-07 Peterková, Eliška [PZ psychiatrie] [31OCT-07NOV2025, pokus o sebevraždu, F432, TS intox. venlafaxinem, 5x Epi záchvat, emočně nestab. osobnost].pdf"
},
{
"original": "5505290252 2026-04-21 Flek, Zbyněk [LZ urologie] [Ca prostatae pT2cGS 3+4 po dvRP 6/2019, iPSA 6,26, PSA 0,081, drobné parapelv. cysty ledvin].pdf",
"corrected": "5505290252 2026-04-21 Flek, Zbyněk [LZ urologie] [kontrola, Ca prostatae pT2cGS 3+4 po dvRP 62019, iPSA 6,26, PSA 0,081, drobné parapelv. cysty ledvin, trvá complete remission].pdf"
},
{
"original": "8452 2026-04-02 Věkrbeová [Laboratoř] [moč chemicky: ERY trace, ostatní neg., pH 6, SG 1.020].pdf",
"corrected": "8755120429 2026-04-02 [uritex] [moč chemicky ERY trace, ostatní neg., pH 6, SG 1.020].pdf"
},
{
"original": "461001479 2026-04-21 Šťastný, Libor [LZ endokrinologie] [St.p. TTE dx a STE sin 5/18, strumiprivní hypotyreóza substituovaná, Letrox 150ug].pdf",
"corrected": "461001479 2026-04-21 Šťastný, Libor [LZ endokrinologie] [St.p. TTE dx a STE sin 518, strumiprivní hypotyreóza substituovaná, Letrox 150ug].pdf"
},
{
"original": "9901040000 2026-04-26 Tvrz, Matěj [export zdraví krevní tlak] [prům. 153/74 mmHg, hypertenze 5d, emergentní hypertenzní stav 1d].pdf",
"corrected": "9901040000 2026-04-26 Tvrz, Matěj [export zdraví krevní tlak] [prům. 15374 mmHg, hypertenze 5d, emergentní hypertenzní stav 1d].pdf"
},
{
"original": "395907022 2026-04-10 Herzová, Marie [LZ ortopedie] [Gonarthrosis bilat., obstr. kortik. +M i.a. vlevo, indik. lázně VII7, M179].pdf",
"corrected": "395907022 2026-04-10 Herzová, Marie [LZ ortopedie] [indikace lázně VII7, M179, gonarthrosis bilat., obstr. kortik. +M i.a. vlevo].pdf"
},
{
"original": "6008091738 2020-07-15 Nikitin, Petro [LZ gastroenterologie] [Antrumgastritida, inkompetentní kardie, gastroesophageální reflux].pdf",
"corrected": "6008091738 2020-07-15 Nikitin, Petro [LZ gastro] [gastroskopie, antrumgastritida, inkompetentní kardie, gastroesophageální reflux].pdf"
},
{
"original": "6008091738 2025-11-24 Nikitin, Petro [LZ dermatologie] [seboroická verruka].pdf",
"corrected": "6008091738 2025-11-24 Nikitin, Petro [LZ kožní [seboroická verruka L tváøe, abraze].pdf"
},
{
"original": "6008091738 2025-08-25 Nikitin, Petro [LZ kožní] [pigmentové névy tč. klidné, bez onkosuspekce].pdf",
"corrected": "6008091738 2025-08-25 Nikitin, Petro [LZ kožní] [vyšetøení dermatoskopem, pigmentové névy tč. klidné, bez onkosuspekce].pdf"
},
{
"original": "6008091738 2025-05-20 Nikitin, Petro [LZ kardiologie] [ICHS, po PCI RIA 2018, EF LK 65%, mírná dilatace aort. kořene bez progrese].pdf",
"corrected": "6008091738 2025-05-20 Nikitin, Petro [LZ kardiologie] [kontrola, ICHS, po PCI RIA 2018, EF LK 65%, mírná dilatace aort. kořene bez progrese].pdf"
},
{
"original": "6008091738 2018-08-24 Nikitin, Petro [RTG páteře] [C páteř: lordosa oploštělá, C56 zúžen, spondylóza; Th: skolióza, kyfóza, Th7-10].pdf",
"corrected": "6008091738 2018-08-24 Nikitin, Petro [RTG páteře] [C páteř lordosa oploštělá, C56 zúžen, spondylóza; Th skolióza, kyfóza, Th7-10].pdf"
},
{
"original": "7109203893 2026-04-07 Deyak, Mykhaylo [Laboratoř] [glukóza 7,1, HbA1c 36, chol. 4,49, LDL 3,07, HDL 0,99, osmolalita 301, PSA 1,438].pdf",
"corrected": "7109203893 2026-04-07 Deyak, Mykhaylo [Laboratoř] [Z000, glukóza 7,1, HbA1c 36, chol. 4,49, LDL 3,07, HDL 0,99, osmolalita 301, PSA 1,438].pdf"
},
{
"original": "415414073 2026-04-21 Pekárková, Vlasta [Laboratoř] [Z000, K 5,8, osmolalita 296, glukóza 5,7, HbA1c 41, CKD-EPI 0,92 G3a, trombocyty 140].pdf",
"corrected": "415414073 2026-04-21 Pekárková, Vlasta [Laboratoř] [Z000, prediabetes, K 5,8, osmolalita 296, glukóza 5,7, HbA1c 41, CKD-EPI 0,92 G3a, trombocyty 140].pdf"
},
{
"original": "505218025 2026-04-22 Beznosková, Milena [Laboratoř] [E789, urea 8,31, CKD-EPI 1,33 G2, osmolalita 302, glukóza 7,5, CK 5,49].pdf",
"corrected": "505218025 2026-04-22 Beznosková, Milena [Laboratoř] [E789, diabetes, urea 8,31, CKD-EPI 1,33 G2, osmolalita 302, glukóza 7,5, CK 5,49].pdf"
},
{
"original": "500206172 2026-04-22 Beznoska, Miloslav [Laboratoř] [E789, CKD-EPI 1,21 G2, glukóza 5,9, HbA1c 41, LDL 3,29].pdf",
"corrected": "500206172 2026-04-22 Beznoska, Miloslav [Laboratoř] [E789, prediabetes, CKD-EPI 1,21 G2, glukóza 5,9, HbA1c 41, LDL 3,29].pdf"
},
{
"original": "475915054 2026-04-20 Žabová, Věra [Laboratoř] [moč: E. coli 10E5 CFU/ml, citlivá na ampicilin, cefuroxim, cotrimoxazol, pivmecilinam].pdf",
"corrected": "475915054 2026-04-20 Žabová, Věra [Laboratoř] [N309, kultivace a citlivost, moč E. coli 10E5 CFUml, citlivá na ampicilin, cefuroxim, cotrimoxazol, pivmecilinam].pdf"
},
{
"original": "7059087629 2026-04-13 Tůmová, Renáta [Laboratoř] [E789, chol. 7,34, LDL 4,52, non-HDL 5,53, glukóza 5,83, CKD-EPI 1,42 G2].pdf",
"corrected": "7059087629 2026-04-13 Tůmová, Renáta [Laboratoř] [E789, smíšená hyperlipidémie, prediabetes, chol. 7,34, LDL 4,52, non-HDL 5,53, glukóza 5,83, CKD-EPI 1,42 G2].pdf"
},
{
"original": "7352200328 2026-04-10 Vališová, Gabriela [Laboratoř] [Z000, chol. 5,62, LDL 3,19, HDL 1,13, TG 4,29, non-HDL 4,5, glukóza 5,4].pdf",
"corrected": "7352200328 2026-04-10 Vališová, Gabriela [Laboratoř] [Z000, smíšená hyperlipidémie, chol. 5,62, LDL 3,19, HDL 1,13, TG 4,29, non-HDL 4,5, glukóza 5,4].pdf"
},
{
"original": "6757100592 2026-04-16 Slabá, Radka [Laboratoř] [E789, CKD-EPI 1,31 G2, TG 1,90, glukóza 5,8, HbA1c 36, chol. 4,35, LDL 2,10].pdf",
"corrected": "6757100592 2026-04-16 Slabá, Radka [Laboratoř] [E789, prediabetes, CKD-EPI 1,31 G2, TG 1,90, glukóza 5,8, HbA1c 36, chol. 4,35, LDL 2,10].pdf"
},
{
"original": "395907022 2026-04-10 Herzová, Marie [LZ ortopedie] [gonarthrosis bilat, obstřik kortik+M i.a., indik. lázeňská terapie VII/7 M179].pdf",
"corrected": "395907022 2026-04-10 Herzová, Marie [LZ ortopedie] [gonarthrosis bilat, obstřik kortik+M i.a., indik. lázeňská terapie VII7 M179].pdf"
},
{
"original": "356031017 2025-10-27 Mejstříková, Marcela [LZ gastroenterologie] [inkompetence kardie, lehce polyp. GE junkce, antrální gastropatie, biopsie].pdf",
"corrected": "356031017 2025-10-27 Mejstříková, Marcela [LZ gastroenterologie] [gastroskopie, inkompetence kardie, lehce polyp. GE junkce, antrální gastropatie, biopsie].pdf"
},
{
"original": "356031017 2026-01-27 Mejstříková, Marcela [SONO krku] [drobné koloidní uzlíky a spongiformní uzel levého laloku š.ž.].pdf",
"corrected": "356031017 2026-01-27 Mejstříková, Marcela [sono ŠŽ] [drobné koloidní uzlíky a spongiformní uzel levého laloku ŠŽ].pdf"
},
{
"original": "346204097 2025-11-14 Kopřivíková, Jarmila [PZ neurologie] [1114NOV2025 embolus M2 ACM sin, trombektomie TICl2c, iCMP].pdf",
"corrected": "346204097 2025-11-14 Kopřivíková, Jarmila [PZ neurologie] [1114NOV2025, iktus, embolus M2 ACM sin, trombektomie TICl2c, iCMP].pdf"
},
{
"original": "8351112693 2026-04-27 Zelenková, Petra [sono mamm.] [fibrozní dysplazie, vícečetné fibromy bilat., expanzivní proces ZHQ vlevo s benigními markantami].pdf",
"corrected": "8351112693 2026-04-27 Zelenková, Petra [sono prsù] [fibrozní dysplazie, vícečetné fibromy bilat., expanzivní proces ZHQ vlevo s benigními markantami].pdf"
},
{
"original": "450113005 2025-01-16 Fiala, Václav [LZ angiologie] [Ektazie AP bilat., sono žil DKK: bil. hluboký žilní syst. bez trombozy, varikozity bilat.].pdf",
"corrected": "450113005 2025-01-16 Fiala, Václav [LZ angiologie] [Ektazie AP bilat., sono žil DKK bil. hluboký žilní syst. bez trombozy, varikozity bilat.].pdf"
},
{
"original": "450113005 2025-03-28 Fiala, Václav [CT krku, hrudníku, břicha a pánve] [progrese nadbráničníí lymfadenopatie, NHL MZL KS IV A, 1. relaps].pdf",
"corrected": "450113005 2025-03-28 Fiala, Václav [CT krku, hrudníku, břicha a pánve] [progrese nadbráničníí lymfadenopatie, NHL MZL KS IV A, 1. relaps, nekompletní zpráva].pdf"
},
{
"original": "450113005 2019-02-10 Fiala, Václav [LZ denzitometrie] [snížení kostní denzity v pásmu osteopenie, nehomogenní rozložení denzity].pdf",
"corrected": "450113005 2019-02-10 Fiala, Václav [LZ denzitometrie] [T-1.6, osteopenie, snížení kostní denzity v pásmu osteopenie, nehomogenní rozložení denzity].pdf"
},
{
"original": "450113005 2026-02-18 Fiala, Václav [LZ hematologie] [MZL, lymfocytóza 42.98, B-NHL CD20+ 65.4%, lymfadenopatie, FIS po kardioverzi].pdf",
"corrected": "450113005 2026-02-18 Fiala, Václav [LZ hematologie] [NHL, lymfocytóza 42.98, B-NHL CD20+ 65.4%, lymfadenopatie, FIS po kardioverzi].pdf"
},
{
"original": "5862236435 2026-03-18 Kopřivová, Erika [sono břicha] [st.p. IK resekci, IK anastomóza, neo-TI, jaterní steatóza, korové cysty pravé ledviny].pdf",
"corrected": "5862236435 2026-03-18 Kopřivová, Erika [sono břicha] [Crohnova nemoc, st.p. IK resekci, IK anastomóza, neo-TI, jaterní steatóza, korové cysty pravé ledviny].pdf"
},
{
"original": "5862236435 2026-01-12 Kopřivová, Erika [LZ gastroenterologie] [Crohnova nemoc, st.p. LPSK IK resekci, terapie Entyvio, switch z Remsima 1/2021].pdf",
"corrected": "5862236435 2026-01-12 Kopřivová, Erika [LZ gastroenterologie] [Crohnova nemoc, st.p. LPSK IK resekci, terapie Entyvio, switch z Remsima 12021].pdf"
},
{
"original": "365123089 2026-04-22 Opršalová, Libuše [Laboratoř] [dg. I839, warfarin, PT ratio 2.32*, INR 2.48*].pdf",
"corrected": "365123089 2026-04-22 Opršalová, Libuše [Laboratoř] [dg. I839, warfarin, PT ratio 2.32, INR 2.48].pdf"
},
{
"original": "7361130040 2021-12-07 Šilhavá, Simona [LZ plicní] [FVP: FEV1 61%, FEV1/FVC 79%, PEF 48%, TLco 65%, obstrukce].pdf",
"corrected": "7361130040 2021-12-07 Šilhavá, Simona [LZ plicní] [FVP FEV1 61%, FEV1FVC 79%, PEF 48%, TLco 65%, obstrukce].pdf"
},
{
"original": "5862236435 2026-02-23 Kopřivová, Erika [LZ interna] [hypertenze TK 161/95, BMI 31.87, Crohn-Entyvio, DM2, HLP, hyperurikémie, CPAP].pdf",
"corrected": "5862236435 2026-02-23 Kopřivová, Erika [LZ interna] [hypertenze TK 16195, BMI 31.87, Crohn-Entyvio, DM2, HLP, hyperurikémie, CPAP, ad urologie].pdf"
},
{
"original": "425412434 null Hornofová, Helena [LZ algologie] [P ramenní funkční, min. bolestivý; L ramenní bolestivý, hybnost dobrá; Biofenac, RTG ramen].pdf",
"corrected": "425412434 2026-04-28 Hornofová, Helena [LZ ambulance bolesti] [nevím datum, P ramenní funkční, min. bolestivý; L ramenní bolestivý, hybnost dobrá; Biofenac, RTG ramen].pdf"
},
{
"original": "7701120955 2026-04-21 Moudrý, Michal [LZ interna] [EKG sinusový rytmus 64/min, křivka v mezích normy, arteriální hypertenze].pdf",
"corrected": "7701120955 2026-04-21 Moudrý, Michal [LZ interna] [EKG sinusový rytmus 64min, křivka v mezích normy, arteriální hypertenze].pdf"
},
{
"original": "471126130 2026-04-07 Procházka, Vladimír [LZ angiologie] [CVI bez progrese, bez TEN, varikozity přibývají, Duplex UŽ DK bez obliterace].pdf",
"corrected": "471126130 2026-04-07 Procházka, Vladimír [LZ cévní] [CVI bez progrese, bez TEN, varikozity přibývají, Duplex UŽ DK bez obliterace].pdf"
},
{
"original": "471126130 2026-02-12 Procházka, Vladimír [LZ kardiologie] [FS 114min, QRS 110, LAH, indikována reablace FS].pdf",
"corrected": "471126130 2026-02-12 Procházka, Vladimír [LZ kardiologie] [plánovaná kontrola, FS 114min, QRS 110, LAH, indikována reablace FS].pdf"
},
{
"original": "null 2026-04-27 Drakpelova [Laboratoř] [DM2, Glucophage XR 1000 0-0-1, HbA1c 4.5-4.2, Chol 3.07-3.77].pdf",
"corrected": "515820013 2026-04-27 Drakselová, Daniela [INR karta] [07APR2025-10MAR2026].pdf"
},
{
"original": "5951231044 2026-04-21 Vašinová, Jiřina [PZ ortopedie] [1321APR2026 impl. TEP coxae l.dx., koxartróza l.dx. KL IV.].pdf",
"corrected": "5951231044 2026-04-21 Vašinová, Jiřina [PZ ortopedie] [1321APR2026 impl. TEP coxae l.dx., koxartróza l.dx., indikace KLL VII_10].pdf"
},
{
"original": "480416072 2026-03-09 Štrup, Petr [žádanka OZP] [žádanka o vyšetření zdravotního stavu pro průkaz OZP, komplexní vyšetření 958 Kč].pdf",
"corrected": "480416072 2026-03-09 Štrup, Petr [žádanka IPZS] [žádanka o vyšetření zdravotního stavu pro průkaz OZP, komplexní vyšetření 958 Kč].pdf"
},
{
"original": "9301280417 2026-03-25 Vaňous, Jakub [žádost o předání zdravotních informací] [registrace u MUDr. Panáčkové, žádost o zaslání dokumentace].pdf",
"corrected": "9301280417 2026-03-25 Vaňous, Jakub [žádost o předání zdravotních informací] [ResTrial s.r.o.].pdf"
},
{
"original": "Binder1.pdf",
"corrected": "8056010149 2026-04-28 [výbìr nekompletních zpráv] [od pacientky].pdf"
},
{
"original": "6709150613 2026-04-28 Rutrle, Petr [LZ ORL] [PVS - v.s m. Menier, t.č hypaksuis perc. apicochlearis].pdf",
"corrected": "6709150613 2026-04-28 Rutrle, Petr [LZ ORL] [PVS - v.s m. Menier, t.č hypakusis perc. apicochlearis, doporučena hyperbarická komora].pdf"
}
]
@@ -0,0 +1,468 @@
"""
Zpracování naskenovaných PDF — nová verze.
1. Preview originálu + Claude Vision API
2. Rename dialog
3. 5 variant komprese → uživatel vybere
4. Uložit do Processed, smazat originál
"""
import base64
import gc
import io
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
if sys.platform == "win32":
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")
import anthropic
from pdf2image import convert_from_path
sys.path.insert(0, str(Path(__file__).parent.parent))
from Knihovny.najdi_dropbox import get_dropbox_root
from Knihovny.najdi_medicus import get_medicus_config
def _load_env():
env_path = Path(__file__).parent.parent / ".env"
if env_path.exists():
for line in env_path.read_text(encoding="utf-8").splitlines():
line = line.strip()
if "=" in line and not line.startswith("#"):
k, v = line.split("=", 1)
os.environ[k.strip()] = v.strip()
_load_env()
POPPLER_PATH = r"C:/Poppler/Library/bin"
CORRECTIONS = True # True = corrections.json se načítá a ukládá; False = ignorovat
_DROPBOX = Path(get_dropbox_root())
TO_PROCESS = _DROPBOX / r"Ordinace\Dokumentace_ke_zpracování\Ricoh Fi-8040\KeZpracování"
PROCESSED = _DROPBOX / r"Ordinace\Dokumentace_ke_zpracování\Ricoh Fi-8040\Zpracováno"
CORRECTIONS_FILE = Path(__file__).parent / "corrections.json"
NAMING_RULES_FILE = Path(__file__).parent / "naming_rules.md"
DOKUMENTACE = _DROPBOX / r"Ordinace\Dokumentace_zpracovaná"
import threading
_dokumentace_index: set[str] = set()
_dokumentace_ready = threading.Event()
def _load_dokumentace_index_bg():
if DOKUMENTACE.exists():
names = {f.name for f in DOKUMENTACE.iterdir() if f.is_file()}
else:
names = set()
global _dokumentace_index
_dokumentace_index = names
_dokumentace_ready.set()
print(f" Index dokumentace: {len(names)} souborů načteno.")
def start_dokumentace_index():
t = threading.Thread(target=_load_dokumentace_index_bg, daemon=True)
t.start()
VIEWER = Path(__file__).parent / "preview_viewer.py"
RENAME_DIALOG = Path(__file__).parent / "rename_dialog.py"
VARIANT_PICKER = Path(__file__).parent / "variant_picker.py"
# 5 kompresních variant
COMPRESS_VARIANTS = [
("300 DPI / q90", 300, 90),
("200 DPI / q85", 200, 85),
("150 DPI / q80", 150, 80),
("120 DPI / q75", 120, 75),
( "96 DPI / q70", 96, 70),
]
# ─── Komprese jedné varianty ──────────────────────────────────────────────────
def compress_to_temp(pdf_path: Path, dpi: int, quality: int) -> Path:
import fitz
src = fitz.open(str(pdf_path))
mat = fitz.Matrix(dpi / 72.0, dpi / 72.0)
out = fitz.open()
for page in src:
pix = page.get_pixmap(matrix=mat, colorspace=fitz.csRGB)
img_bytes = pix.tobytes("jpeg", jpg_quality=quality)
img_doc = fitz.open("pdf", fitz.open("jpeg", img_bytes).convert_to_pdf())
rect = page.rect
np = out.new_page(width=rect.width, height=rect.height)
np.show_pdf_page(np.rect, img_doc, 0)
src.close()
tmp = Path(tempfile.mktemp(suffix=".pdf"))
out.save(tmp, deflate=True, garbage=4)
out.close()
return tmp
# ─── Medicus ověření ─────────────────────────────────────────────────────────
def _medicus_connect():
try:
import fdb
cfg = get_medicus_config()
return fdb.connect(dsn=cfg.dsn, user="SYSDBA", password="masterkey", charset="win1250")
except Exception as e:
print(f" [Medicus] Nepřipojeno: {e}")
return None
def _lookup_by_rc(cur, rc_digits: str) -> dict | None:
cur.execute(
"SELECT IDPAC, PRIJMENI, JMENO, RODCIS FROM KAR "
"WHERE REPLACE(RODCIS, '/', '') = ?", (rc_digits,)
)
row = cur.fetchone()
if row:
return {"idpac": row[0], "prijmeni": row[1].strip(), "jmeno": row[2].strip(), "rodcis": row[3].strip()}
return None
def _rc_candidates(rc: str) -> list[str]:
similar = {"0": "8", "8": "0", "1": "7", "7": "1", "5": "6", "6": "5", "3": "8"}
candidates = set()
for i in range(len(rc)):
candidates.add(rc[:i] + rc[i+1:])
for i in range(len(rc) + 1):
candidates.add(rc[:i] + "0" + rc[i:])
for i, ch in enumerate(rc):
if ch in similar:
candidates.add(rc[:i] + similar[ch] + rc[i+1:])
candidates.discard(rc)
return sorted(c for c in candidates if len(c) in (9, 10))
def _rc_checksum_ok(rc: str) -> bool:
digits = re.sub(r"\D", "", rc)
if len(digits) == 10:
return int(digits) % 11 == 0
return True
def verify_patient(rc_raw: str) -> dict:
rc = re.sub(r"\D", "", rc_raw or "")
if not rc:
return {"status": "not_found", "patient": None, "rc_corrected": None}
con = _medicus_connect()
if con is None:
return {"status": "offline", "patient": None, "rc_corrected": None}
try:
cur = con.cursor()
patient = _lookup_by_rc(cur, rc)
if patient:
return {"status": "ok", "patient": patient, "rc_corrected": None}
candidates = _rc_candidates(rc)
matches = [(c, _lookup_by_rc(cur, c)) for c in candidates]
matches = [(c, p) for c, p in matches if p]
if not matches:
return {"status": "not_found", "patient": None, "rc_corrected": None}
matches.sort(key=lambda x: (0 if _rc_checksum_ok(x[0]) else 1))
best_rc, best_patient = matches[0]
return {"status": "fuzzy", "patient": best_patient, "rc_corrected": best_rc, "all_matches": matches}
finally:
con.close()
def check_duplicates(rc: str, datum: str) -> list[str]:
if not rc or not datum:
return []
# Počkej max 15s na dokončení indexu (typicky hotovo za dobu volání Claude)
_dokumentace_ready.wait(timeout=15)
prefix = f"{rc} {datum}"
return [name for name in _dokumentace_index if name.startswith(prefix)]
# ─── Korekce (few-shot příklady) ─────────────────────────────────────────────
def load_corrections() -> list[dict]:
if CORRECTIONS_FILE.exists():
return json.loads(CORRECTIONS_FILE.read_text(encoding="utf-8"))
return []
def save_correction(original: str, corrected: str):
if not CORRECTIONS:
return
corrections = load_corrections()
for c in corrections:
if c["original"] == original and c["corrected"] == corrected:
return
corrections.append({"original": original, "corrected": corrected})
CORRECTIONS_FILE.write_text(
json.dumps(corrections, ensure_ascii=False, indent=2), encoding="utf-8"
)
print(f" ✓ Korekce uložena ({len(corrections)} celkem)")
def load_naming_rules() -> str:
if NAMING_RULES_FILE.exists():
content = NAMING_RULES_FILE.read_text(encoding="utf-8").strip()
if content:
return f"Pravidla pro pojmenování souborů (dodržuj vždy):\n{content}\n\n"
return ""
def build_corrections_prompt() -> str:
if not CORRECTIONS:
return ""
corrections = load_corrections()
if not corrections:
return ""
lines = ["Příklady korekcí z minulých běhů (uč se z nich):"]
for c in corrections[-10:]:
lines.append(f' - špatně: "{c["original"]}"')
lines.append(f' správně: "{c["corrected"]}"')
return "\n".join(lines) + "\n\n"
# ─── Claude Vision API ────────────────────────────────────────────────────────
def extract_info(pdf_path: Path) -> dict:
print(" Převádím na obrázek...")
suffix = pdf_path.suffix.lower()
if suffix in (".jpg", ".jpeg", ".png"):
from PIL import Image
img = Image.open(pdf_path)
buf = io.BytesIO()
img.save(buf, format="JPEG", quality=95)
img.close()
else:
images = convert_from_path(str(pdf_path), poppler_path=POPPLER_PATH, dpi=300)
buf = io.BytesIO()
images[0].save(buf, format="JPEG", quality=95)
del images
gc.collect()
image_b64 = base64.standard_b64encode(buf.getvalue()).decode("utf-8")
prompt = (
load_naming_rules() +
build_corrections_prompt() +
"Toto je naskenovaná lékařská zpráva v češtině. "
"Vrať JSON s těmito poli:\n"
"- \"jmeno\": celé jméno pacienta (příjmení + jméno + případný titul)\n"
"- \"rodne_cislo\": rodné číslo pacienta BEZ lomítka (pouze číslice)\n"
"- \"datum_zpravy\": datum zprávy ve formátu YYYY-MM-DD\n"
"- \"typ_dokumentu\": typ dokumentu — "
"\"LZ {oddělení}\" = ambulantní/lékařská zpráva (např. \"LZ chirurgie\", \"LZ kardiologie\", \"LZ plicní\", \"LZ ORL\"); "
"\"PZ {oddělení}\" = propouštěcí zpráva z hospitalizace (např. \"PZ interna\", \"PZ neurologie\"). "
"Jiné typy: \"Laboratoř\", \"CT břicha\", \"MRI páteře\", \"kolonoskopie\", "
"\"operační protokol oční\", \"poukaz FT\", \"diagnostická mamografie\" atd.\n"
"- \"poznamka\": krátká klinická poznámka česky, max 80 znaků. "
"DŮLEŽITÉ: pokud zpráva obsahuje sekci \"Závěr:\" nebo \"Závěr vyšetření:\", "
"použij VÝHRADNĚ obsah této sekce — je nejdůležitější. "
"Teprve pokud závěr chybí, shrň obsah z celé zprávy.\n"
"- \"nazev_souboru\": název souboru ve formátu "
"\"{rodne_cislo} {datum_zpravy} {Příjmení}, {Jméno} [{typ_dokumentu}] [{poznamka}].pdf\" "
"(jméno bez titulu, RČ bez lomítka)\n"
"- \"rotace\": o kolik stupňů CCW je třeba otočit obrázek aby byl text čitelně na výšku nebo šířku "
"(hodnoty: 0, 90, 180, 270). Pokud je text již správně orientovaný, vrať 0.\n\n"
"Pokud pole nenajdeš, použij null. Nepiš nic jiného než JSON."
)
print(" Volám Claude Vision API...")
try:
client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
response = client.messages.create(
model="claude-sonnet-4-6",
max_tokens=400,
messages=[{"role": "user", "content": [
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image_b64}},
{"type": "text", "text": prompt},
]}],
)
usage = response.usage
print(f" Tokeny: {usage.input_tokens} in + {usage.output_tokens} out = ${usage.input_tokens*3/1e6 + usage.output_tokens*15/1e6:.4f}")
raw = response.content[0].text.strip()
if raw.startswith("```"):
raw = raw.split("```")[1]
if raw.startswith("json"):
raw = raw[4:]
try:
return json.loads(raw.strip())
except json.JSONDecodeError:
print(f" VAROVÁNÍ: nelze parsovat JSON: {raw!r}")
return {"nazev_souboru": None, "raw": raw}
except Exception as e:
print(f" VAROVÁNÍ: Claude API selhalo ({e}) — otevírám dialog pro ruční vyplnění.")
return {"nazev_souboru": None}
# ─── Subprocess helpers ───────────────────────────────────────────────────────
def open_preview(pdf_path: Path) -> tuple[subprocess.Popen, Path]:
geom_file = Path(tempfile.mktemp(suffix=".json"))
proc = subprocess.Popen([sys.executable, str(VIEWER), str(pdf_path), f"--write-geometry={geom_file}"])
return proc, geom_file
def read_preview_bottom(geom_file: Path, timeout: float = 5.0) -> int:
import time
deadline = time.time() + timeout
while time.time() < deadline:
if geom_file.exists():
geom = json.loads(geom_file.read_text(encoding="utf-8"))
geom_file.unlink(missing_ok=True)
return geom["y"] + geom["h"] + 30 # +30 pro title bar
time.sleep(0.1)
geom_file.unlink(missing_ok=True)
return None
def run_rename_dialog(nazev: str, info_lines: list, below_y: int = None) -> str | None:
tmp = Path(tempfile.mktemp(suffix=".json"))
tmp.write_text(json.dumps({"nazev": nazev, "info_lines": info_lines}, ensure_ascii=False), encoding="utf-8")
args = [sys.executable, str(RENAME_DIALOG), str(tmp)]
if below_y is not None:
args.append(f"--below-y={below_y}")
env = {**os.environ, "PYTHONIOENCODING": "utf-8", "PYTHONUTF8": "1"}
proc = subprocess.run(args, capture_output=True, text=True, encoding="utf-8", env=env)
tmp.unlink(missing_ok=True)
out = proc.stdout.strip()
return json.loads(out).get("value") if out else None
def run_variant_picker(variants_data: list) -> str | None:
tmp = Path(tempfile.mktemp(suffix=".json"))
tmp.write_text(json.dumps(variants_data, ensure_ascii=False), encoding="utf-8")
proc = subprocess.run(
[sys.executable, str(VARIANT_PICKER), str(tmp)],
capture_output=True, text=True, encoding="utf-8",
)
tmp.unlink(missing_ok=True)
if proc.returncode != 0 or not proc.stdout.strip():
print(f" [variant_picker] returncode={proc.returncode}")
if proc.stderr.strip():
print(f" [variant_picker] CHYBA:\n{proc.stderr.strip()}")
out = proc.stdout.strip()
return json.loads(out).get("chosen") if out else None
# ─── Hlavní flow ──────────────────────────────────────────────────────────────
def process_file(pdf_path: Path):
print(f"\nSoubor: {pdf_path.name}")
# Spusť načítání indexu dokumentace na pozadí — hotovo za dobu volání Claude
start_dokumentace_index()
# 1. Otevři preview originálu
preview, geom_file = open_preview(pdf_path)
below_y = read_preview_bottom(geom_file)
# 2. Claude Vision API
info = extract_info(pdf_path)
nazev = info.get("nazev_souboru") or pdf_path.name
# 3. Medicus ověření + fuzzy matching RČ
rc_from_scan = re.sub(r"\D", "", info.get("rodne_cislo") or "")
print(f" Ověřuji v Medicus (RČ: {rc_from_scan})...")
verif = verify_patient(rc_from_scan)
# Oprava RČ při fuzzy matchi
if verif["status"] == "fuzzy" and verif.get("rc_corrected") and nazev:
nazev = nazev.replace(rc_from_scan, verif["rc_corrected"], 1)
print(f" → RČ opraveno: {rc_from_scan}{verif['rc_corrected']}")
# Info řádky pro dialog
status = verif["status"]
patient = verif.get("patient")
info_lines = []
if status == "ok":
info_lines.append(f"✓ Medicus: {patient['prijmeni']} {patient['jmeno']} | RČ {patient['rodcis']}")
elif status == "fuzzy":
info_lines.append(f"⚠ RČ ze skenu '{rc_from_scan}' → opraveno na {verif['rc_corrected']}")
info_lines.append(f" Pacient: {patient['prijmeni']} {patient['jmeno']} | RČ {patient['rodcis']}")
elif status == "not_found":
info_lines.append(f"✗ RČ '{rc_from_scan}' nenalezeno v Medicus")
else:
info_lines.append("— Medicus nedostupný (offline)")
# Duplicity
rc_final = re.sub(r"\D", "", verif["patient"]["rodcis"] if patient else rc_from_scan)
duplicity = check_duplicates(rc_final, info.get("datum_zpravy") or "")
if duplicity:
info_lines.append(f"⚠ DUPLICITA: {', '.join(duplicity)}")
if not info_lines:
info_lines = ["[Claude nevrátil název — uprav ručně]"]
print(" Otevírám dialog pro schválení názvu...")
final_name = run_rename_dialog(nazev, info_lines, below_y=below_y)
preview.terminate()
if not final_name:
print(" Přeskočeno.")
return
if not final_name.endswith(".pdf"):
final_name += ".pdf"
final_name = re.sub(r'[<>:"/\\|?*]', '', final_name)
if nazev and final_name != nazev:
save_correction(nazev, final_name)
print(f" Schválený název: {final_name}")
# 4. Generuj kompresní varianty (originál + 5 variant)
print(" Generuji kompresní varianty...")
temp_files = []
orig_kb = round(pdf_path.stat().st_size / 1024)
variants_data = [{"path": str(pdf_path), "label": "Originál", "size_kb": orig_kb}]
for label, dpi, quality in COMPRESS_VARIANTS:
tmp = compress_to_temp(pdf_path, dpi, quality)
size_kb = round(tmp.stat().st_size / 1024)
temp_files.append(tmp)
variants_data.append({"path": str(tmp), "label": label, "size_kb": size_kb})
print(f" {label}: {size_kb} kB")
# 5. Vyber variantu
print(" Vyber variantu v okně...")
chosen = run_variant_picker(variants_data)
if not chosen:
print(" Žádná varianta nevybrána, přeskakuji.")
for t in temp_files:
t.unlink(missing_ok=True)
return
# 6. Ulož do Processed
PROCESSED.mkdir(exist_ok=True)
dest = PROCESSED / final_name
if dest.exists():
print(f" Přepisuji existující: {dest.name}")
shutil.copy2(chosen, dest)
pdf_path.unlink()
print(f" ✓ Uloženo: {dest.name}")
for t in temp_files:
t.unlink(missing_ok=True) # originál mezi temp_files není, je bezpečné
def process_folder(folder: Path):
files = sorted(f for f in folder.iterdir() if f.suffix.lower() in (".pdf", ".jpg", ".jpeg", ".png"))
if not files:
print(f"Žádné soubory v: {folder}")
return
print(f"Nalezeno {len(files)} soubor(ů).")
for f in files:
try:
process_file(f)
except Exception as e:
print(f" CHYBA: {e}")
print("\nHotovo.")
if __name__ == "__main__":
PROCESSED.mkdir(exist_ok=True)
TO_PROCESS.mkdir(exist_ok=True)
target = Path(sys.argv[1]) if len(sys.argv) > 1 else TO_PROCESS
if target.is_file():
process_file(target)
elif target.is_dir():
process_folder(target)
else:
print("Použití: python extract_patient_info_novy.py [soubor.pdf nebo složka]")
sys.exit(1)
+101
View File
@@ -0,0 +1,101 @@
"""
Konverze JPG/PNG → PDF se správnou orientací stránky (A4).
Řeší:
- EXIF orientaci (fotky z telefonu/skeneru bývají otočené)
- Správné umístění na A4 stránce (na výšku nebo na šířku dle obsahu)
- Zachování kvality
Použití:
python jpg_to_pdf.py soubor.jpg
python jpg_to_pdf.py soubor.jpg vystup.pdf
"""
import io
import sys
from pathlib import Path
from PIL import Image, ImageOps
# A4 rozměry v mm
A4_W_MM = 210
A4_H_MM = 297
MARGIN_MM = 0 # bez okraje, tisk si řeší Acrobat (Fit to Print)
def fix_orientation(img: Image.Image) -> Image.Image:
"""Opraví rotaci podle EXIF dat (tag 274)."""
return ImageOps.exif_transpose(img)
def image_to_pdf(src: Path, dst: Path, dpi: int = 150, quality: int = 80, rotate_ccw: int = 0):
img = Image.open(src)
print(f" Originál: {img.size[0]}×{img.size[1]} px, mode={img.mode}, format={img.format}")
# 1. Oprav EXIF orientaci
img = fix_orientation(img)
print(f" Po EXIF korekci: {img.size[0]}×{img.size[1]} px")
# 2. Rotace dle parametru (od Claude nebo ručně)
if rotate_ccw and rotate_ccw != 0:
img = img.rotate(rotate_ccw, expand=True)
print(f" Po rotaci {rotate_ccw}° CCW: {img.size[0]}×{img.size[1]} px")
# 2. Převeď na RGB (PDF nepodporuje RGBA/P)
if img.mode in ("RGBA", "P", "LA"):
img = img.convert("RGB")
# 3. Urči orientaci stránky podle poměru stran obrázku
img_w, img_h = img.size
if img_w > img_h:
# Obrázek na šířku → stránka na šířku (A4 landscape)
page_w_mm, page_h_mm = A4_H_MM, A4_W_MM
print(f" Orientace stránky: na šířku (landscape)")
else:
# Obrázek na výšku → stránka na výšku (A4 portrait)
page_w_mm, page_h_mm = A4_W_MM, A4_H_MM
print(f" Orientace stránky: na výšku (portrait)")
# 4. Vypočti cílovou velikost s okrajem (mm → px při daném DPI)
mm_to_px = dpi / 25.4
max_w_px = int((page_w_mm - 2 * MARGIN_MM) * mm_to_px)
max_h_px = int((page_h_mm - 2 * MARGIN_MM) * mm_to_px)
# 5. Škáluj obrázek na stránku (zachovej poměr stran)
img.thumbnail((max_w_px, max_h_px), Image.LANCZOS)
print(f" Výsledná velikost obrázku: {img.size[0]}×{img.size[1]} px")
# 6. Vlož obrázek na bílé A4 plátno
page_w_px = int(page_w_mm * mm_to_px)
page_h_px = int(page_h_mm * mm_to_px)
canvas = Image.new("RGB", (page_w_px, page_h_px), "white")
offset_x = (page_w_px - img.size[0]) // 2
offset_y = (page_h_px - img.size[1]) // 2
canvas.paste(img, (offset_x, offset_y))
# 7. Ulož jako PDF
canvas.save(dst, "PDF", resolution=dpi, quality=quality)
print(f" ✓ Uloženo: {dst.name} ({dst.stat().st_size // 1024} KB)")
if __name__ == "__main__":
if sys.platform == "win32":
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")
if len(sys.argv) < 2:
print("Použití: python jpg_to_pdf.py soubor.jpg [vystup.pdf] [rotace_ccw]")
print(" rotace_ccw: 0 / 90 / 180 / 270 (výchozí: 0)")
sys.exit(1)
src = Path(sys.argv[1])
if not src.exists():
print(f"Soubor nenalezen: {src}")
sys.exit(1)
dst = Path(sys.argv[2]) if len(sys.argv) > 2 else src.with_suffix(".pdf")
rotate_ccw = int(sys.argv[3]) if len(sys.argv) > 3 else 0
print(f"Konvertuji: {src.name}{dst.name}")
image_to_pdf(src, dst, rotate_ccw=rotate_ccw)
@@ -0,0 +1,26 @@
# Pravidla pro přejmenování souborů
Tato pravidla platí vždy při generování polí `poznamka` a `nazev_souboru`.
1. Název souboru má vždy tvar: `RODNECISLO YYYY-MM-DD Příjmení, Jméno [TYP ODBORNOST] [popis].pdf`
- TYP je vždy buď `LZ` (lékařská zpráva / ambulantní zpráva) nebo `PZ` (propouštěcí zpráva z hospitalizace).
- Jiné typy dokumentů (Laboratoř, CT, MRI, kolonoskopie, poukaz FT apod.) nemají TYP prefix — píší se celým názvem: `[Laboratoř]`, `[CT břicha]` atd.
- Příklady: `[LZ chirurgie]`, `[PZ interna]`, `[Laboratoř]`, `[CT břicha]`
2. Když je typ dokumentu PZ (propouštěcí zpráva), umísti do druhé závorky jako první věc data hospitalizace ve tvaru `DDMMMYYYYDDMMMYYYY` (měsíc třemi písmeny anglicky, velká, bez mezer), za pomlčkou pak popis.
- Příklad: `[PZ interna] [1215APR2026 srdeční selhání]`
- Pokud je datum přijetí a propuštění ve stejném měsíci, stačí: `[1215APR2026 ...]`
- Pokud datum hospitalizace nelze určit, druhou závorku napiš bez datumu.
3. Když je dokument typ "Laboratoř", do `poznamka` uváděj POUZE hodnoty mimo normu (patologické nálezy) — hodnoty v normě vynech. Osmolalitu séra nikdy nezmiňuj, ani když je mimo normu.
4. Pokud laboratorní výsledky obsahují glomerulární filtraci — bývá označena jako eGFR, CKD-EPI nebo CK-EPI — do `poznamka` nikdy nepiš číselnou hodnotu eGFR. Místo toho uveď pouze klasifikaci: eGFR ≥ 90 → CHRIG1, 6089 → CHRIG2, 4559 → CHRIG3a, 3044 → CHRIG3b, 1529 → CHRIG4, < 15 → CHRIG5. Klasifikaci uváděj pouze pokud je CHRIG2 nebo horší (tj. eGFR < 90) — CHRIG1 je v normě, nezmiňuj ho.
5. Když je dokument typ "Laboratoř" a zpráva obsahuje diagnózu (dg., dg:, diagnóza), umísti ji do `nazev_souboru` jako první část druhé závorky, tedy: `[Laboratoř] [dg. XY00 - stručná poznamka]`.
6. Zkratky a pojmenování: slovo „sono" (sonografie/ultrazvuk) piš vždy malými písmeny — `sono břicha`, `sono ŠŽ`, nikoli `SONO`. Štítnou žlázu označuj vždy zkratkou `ŠŽ`. Sonografii prsu/prsů (sono mamm., sono mamografie, sono mamma apod.) piš vždy jako `sono prsů`. Denzitometrii (DEXA, DXA, denzitometrie) piš vždy pouze jako `[DXA]` — bez prefixu LZ. Algologii piš vždy jako `[LZ léčba bolesti]`. Dermatovenerologii (dermatologie, dermatovenerologie, kožní) piš vždy jako `[LZ kožní]`. Angiologii piš vždy jako `[LZ cévní]`.
7. V číselných hodnotách VŽDY používej desetinnou tečku, nikoli desetinnou čárku. Toto pravidlo platí absolutně pro všechna čísla v `poznamka` i `nazev_souboru` — např. `TG 4.73`, nikoli `TG 4,73`.
8. Rozpoznávání vzorců — sideropenická anémie: Pokud laboratorní výsledky splňují typický obraz sideropenické (železo-deficitní) anémie, přidej diagnózu jako první část druhé závorky ve tvaru `[sideropenická anémie, ...]`.
Typický obraz (stačí kombinace několika z těchto nálezů):
- Krevní obraz: ↓ Hb, ↓ Htk, ↓ MCV (mikrocytóza), ↓ MCH nebo ↓ MCHC (hypochromie), ↑ RDW (anisocytóza)
- Metabolismus železa: ↓ sérové Fe (železo), ↓ ferritin, ↑ transferrin (nebo TIBC), ↓ saturace transferrinu
- Diagnózu uveď pouze pokud je obraz dostatečně přesvědčivý (alespoň ↓ Hb + ↓ MCV nebo ↓ Fe/ferritin).
- Příklad výsledného názvu: `[Laboratoř] [sideropenická anémie, Hb 98, MCV 71, Fe 5.2]`
@@ -0,0 +1,111 @@
"""
Standalone PDF/obrázek náhled — spouští se jako subprocess z extract_patient_info.py.
Argumenty: preview_viewer.py <soubor> [--delete-on-close]
"""
import sys
from pathlib import Path
import tkinter as tk
def main():
if len(sys.argv) < 2:
sys.exit(1)
pdf_path = Path(sys.argv[1])
delete_on_close = "--delete-on-close" in sys.argv
try:
from PIL import Image, ImageTk
import fitz
except ImportError:
sys.exit(2)
suffix = pdf_path.suffix.lower()
if suffix in (".jpg", ".jpeg", ".png"):
pil_img = Image.open(pdf_path)
doc = None
else:
doc = fitz.open(str(pdf_path))
pil_img = None
root = tk.Tk()
root.tk.call("encoding", "system", "utf-8")
sh = root.winfo_screenheight()
page_count = len(doc) if doc else 1
current = [0]
photo_ref = [None]
def render(n) -> Image.Image:
if doc is not None:
page = doc[n]
zoom = min(700 / page.rect.width, (sh - 150) / page.rect.height)
pix = page.get_pixmap(matrix=fitz.Matrix(zoom, zoom))
return Image.frombytes("RGB", (pix.width, pix.height), pix.samples)
else:
img = pil_img.copy()
img.thumbnail((700, sh - 150), Image.LANCZOS)
return img
def on_close():
if doc:
try:
doc.close()
except Exception:
pass
if delete_on_close:
try:
pdf_path.unlink(missing_ok=True)
except Exception:
pass
root.destroy()
root.title(pdf_path.stem)
root.attributes("-topmost", True)
root.resizable(False, False)
root.protocol("WM_DELETE_WINDOW", on_close)
lbl_img = tk.Label(root)
lbl_img.pack()
frame_nav = tk.Frame(root)
frame_nav.pack(pady=4)
lbl_page = tk.Label(frame_nav, font=("Segoe UI", 9))
lbl_page.pack(side="left", padx=10)
def show(n):
current[0] = n
img = render(n)
photo_ref[0] = ImageTk.PhotoImage(img)
lbl_img.config(image=photo_ref[0])
lbl_page.config(text=f"Strana {n + 1} / {page_count}")
btn_prev.config(state="normal" if n > 0 else "disabled")
btn_next.config(state="normal" if n < page_count - 1 else "disabled")
btn_prev = tk.Button(frame_nav, text="◄ Předchozí", command=lambda: show(current[0] - 1))
btn_prev.pack(side="left")
btn_next = tk.Button(frame_nav, text="Další ►", command=lambda: show(current[0] + 1))
btn_next.pack(side="left")
show(0)
root.update_idletasks()
sw = root.winfo_screenwidth()
w = root.winfo_width()
h = root.winfo_height()
x = (sw - w) // 2
root.geometry(f"+{x}+0")
# Zapiš geometrii do souboru pokud byl předán argument --write-geometry=<cesta>
import json as _json
for arg in sys.argv:
if arg.startswith("--write-geometry="):
geom_path = Path(arg.split("=", 1)[1])
geom_path.write_text(_json.dumps({"x": x, "y": 0, "w": w, "h": h}), encoding="utf-8")
break
root.mainloop()
if __name__ == "__main__":
main()
+105
View File
@@ -0,0 +1,105 @@
"""
Standalone dialog pro schválení / opravu názvu souboru.
Spouští se jako subprocess z extract_patient_info.py.
Argumenty: rename_dialog.py <json_soubor>
JSON vstup: { "nazev": "...", "info_lines": [...] }
JSON výstup: { "value": "..." } nebo { "value": null }
"""
import json
import os
import sys
from pathlib import Path
import tkinter as tk
if sys.platform == "win32":
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
except Exception:
pass
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")
def main():
if len(sys.argv) < 2:
print(json.dumps({"value": None}))
sys.exit(0)
data = json.loads(Path(sys.argv[1]).read_text(encoding="utf-8"))
nazev = data.get("nazev") or ""
info_lines = data.get("info_lines") or []
result = {"value": None}
root = tk.Tk()
root.title("Schválení názvu souboru")
root.resizable(True, False)
root.attributes("-topmost", True)
root.tk.call("encoding", "system", "utf-8")
os.environ.setdefault("TCL_ENCODING", "utf-8")
pad = {"padx": 12, "pady": 6}
frame_info = tk.Frame(root, bg="#f0f0f0", bd=1, relief="sunken")
frame_info.pack(fill="x", **pad)
for line in info_lines:
color = "#b00000" if line.startswith("") else "#004080" if line.startswith("") else "#333"
tk.Label(frame_info, text=line, anchor="w", bg="#f0f0f0",
fg=color, font=("Segoe UI", 10)).pack(fill="x", padx=8, pady=1)
tk.Label(root, text="Název souboru (bez .pdf):", anchor="w",
font=("Segoe UI", 9, "bold")).pack(fill="x", padx=12, pady=(10, 2))
nazev_bez = nazev[:-4] if nazev.endswith(".pdf") else nazev
var = tk.StringVar(value=nazev_bez)
entry = tk.Entry(root, textvariable=var, font=("Segoe UI", 10), width=135)
entry.pack(fill="x", padx=12, pady=(0, 10))
entry.icursor(tk.END)
entry.focus_set()
frame_btn = tk.Frame(root)
frame_btn.pack(pady=(0, 12))
def schvalit(event=None):
result["value"] = var.get().strip()
root.destroy()
def preskocit(event=None):
result["value"] = None
root.destroy()
tk.Button(frame_btn, text="✓ Schválit (Enter)", command=schvalit,
bg="#2a7a2a", fg="white", font=("Segoe UI", 10, "bold"),
padx=16, pady=6).pack(side="left", padx=8)
tk.Button(frame_btn, text="✗ Přeskočit (Esc)", command=preskocit,
bg="#7a2a2a", fg="white", font=("Segoe UI", 10),
padx=16, pady=6).pack(side="left", padx=8)
root.bind("<Return>", schvalit)
root.bind("<Escape>", preskocit)
root.update_idletasks()
sw = root.winfo_screenwidth()
w = root.winfo_width()
x = (sw - w) // 2
# Pozice pod preview oknem pokud byl předán argument --below-y=N
below_y = None
for arg in sys.argv:
if arg.startswith("--below-y="):
below_y = int(arg.split("=", 1)[1])
break
y = below_y if below_y is not None else (root.winfo_screenheight() - root.winfo_height() - 60)
root.geometry(f"+{x}+{y}")
root.lift()
root.focus_force()
root.mainloop()
print(json.dumps({"value": result["value"]}, ensure_ascii=False))
if __name__ == "__main__":
main()
@@ -0,0 +1,148 @@
"""
Jedno okno pro výběr kompresní varianty PDF.
Nahoře tlačítka 1N pro přepínání, tlačítko "Tohle beru" pro potvrzení.
Argumenty: variant_picker.py <json_soubor>
JSON vstup: [{"path": "...", "label": "150 DPI / q80", "size_kb": 139}, ...]
JSON výstup (stdout): {"chosen": "cesta/k/souboru"}
"""
import json
import sys
from pathlib import Path
import tkinter as tk
from PIL import Image, ImageTk
import fitz
def main():
if len(sys.argv) < 2:
sys.exit(1)
variants = json.loads(Path(sys.argv[1]).read_text(encoding="utf-8"))
chosen = {"path": None}
docs = [fitz.open(v["path"]) for v in variants]
current = [0]
photo_ref = [None]
root = tk.Tk()
root.tk.call("encoding", "system", "utf-8")
root.attributes("-topmost", True)
sh = root.winfo_screenheight()
sw = root.winfo_screenwidth()
win_h = sh - 80 # odečteme taskbar + title bar
img_h = win_h - 160
img_w = sw // 2 # šířka okna = polovina monitoru
x = (sw - img_w) // 2
root.geometry(f"{img_w}x{win_h}+{x}+0")
root.resizable(False, False)
# ── Horní panel s tlačítky variant ──
frame_top = tk.Frame(root, bg="#222")
frame_top.pack(fill="x")
btn_variants = []
current_page = [0]
def show(n, page_n=0):
current[0] = n
current_page[0] = page_n
doc = docs[n]
page = doc[page_n]
zoom = min(img_w / page.rect.width, img_h / page.rect.height)
pix = page.get_pixmap(matrix=fitz.Matrix(zoom, zoom))
img = Image.frombytes("RGB", (pix.width, pix.height), pix.samples)
photo_ref[0] = ImageTk.PhotoImage(img)
lbl_img.config(image=photo_ref[0])
page_count = len(doc)
root.title(f"Varianta {n+1}: {variants[n]['label']} ({variants[n]['size_kb']} kB) — strana {page_n+1}/{page_count}")
for i, b in enumerate(btn_variants):
b.config(bg="#2a5a9a" if i == n else "#444")
btn_prev_page.config(state="normal" if page_n > 0 else "disabled")
btn_next_page.config(state="normal" if page_n < page_count - 1 else "disabled")
for i, v in enumerate(variants):
b = tk.Button(
frame_top,
text=f"{i+1}. {v['label']}\n{v['size_kb']} kB",
font=("Segoe UI", 9, "bold"),
bg="#444", fg="white",
relief="flat", padx=8, pady=6,
command=lambda n=i: show(n),
)
b.pack(side="left", padx=2, pady=4)
btn_variants.append(b)
# ── Tlačítka Beru / Přeskočit — stejný styl jako varianty ──
def beru():
chosen["path"] = variants[current[0]]["path"]
root.destroy()
def preskocit():
root.destroy()
tk.Button(
frame_top,
text="✓ Tohle beru\n",
command=beru,
bg="#2a7a2a", fg="white",
font=("Segoe UI", 9, "bold"),
relief="flat", padx=8, pady=6,
).pack(side="left", padx=2, pady=4)
tk.Button(
frame_top,
text="✗ Přeskočit\n",
command=preskocit,
bg="#7a2a2a", fg="white",
font=("Segoe UI", 9, "bold"),
relief="flat", padx=8, pady=6,
).pack(side="left", padx=2, pady=4)
# ── Navigace stran — úplně vpravo ──
btn_next_page = tk.Button(
frame_top,
text="Další ►\n",
command=lambda: show(current[0], current_page[0] + 1),
bg="#555", fg="white",
font=("Segoe UI", 9, "bold"),
relief="flat", padx=8, pady=6,
)
btn_next_page.pack(side="right", padx=2, pady=4)
btn_prev_page = tk.Button(
frame_top,
text="◄ Před.\n",
command=lambda: show(current[0], current_page[0] - 1),
bg="#555", fg="white",
font=("Segoe UI", 9, "bold"),
relief="flat", padx=8, pady=6,
)
btn_prev_page.pack(side="right", padx=2, pady=4)
# ── Obrázek ──
lbl_img = tk.Label(root, bg="black")
lbl_img.pack(fill="both", expand=True)
root.bind("<Key-1>", lambda e: show(0))
root.bind("<Key-2>", lambda e: show(1))
root.bind("<Key-3>", lambda e: show(2))
root.bind("<Key-4>", lambda e: show(3))
root.bind("<Key-5>", lambda e: show(4))
root.bind("<Return>", lambda e: beru())
root.bind("<Escape>", lambda e: preskocit())
show(0)
root.mainloop()
for d in docs:
try:
d.close()
except Exception:
pass
print(json.dumps({"chosen": chosen["path"]}, ensure_ascii=False))
if __name__ == "__main__":
main()
+205
View File
@@ -0,0 +1,205 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Query Medevio for the full agenda of 17 Oct 2025,
print raw API response, and export to Excel.
"""
import json
import time
from pathlib import Path
import requests
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import Font, Alignment
from openpyxl.utils import get_column_letter
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta # přidá měsíce správně
from Functions import get_reports_folder
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
CLINIC_SLUG = "mudr-buzalkova"
# ==================== Load Token ====================
def load_gateway_token(storage_path="medevio_storage.json"):
"""Return Medevio gateway-access-token from saved Playwright storage."""
from pathlib import Path
path = Path(storage_path)
if not path.exists():
raise SystemExit(f"❌ Storage file not found: {path}")
with path.open("r", encoding="utf-8") as f:
state = json.load(f)
token = next(
(c["value"] for c in state["cookies"]
if c["name"] == "gateway-access-token"), None
)
if not token:
raise SystemExit("❌ gateway-access-token not found in storage file.")
return token
gateway_token = load_gateway_token()
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
"authorization": f"Bearer {gateway_token}",
}
# === Dynamické datumy ===
dnes = datetime.utcnow().date()
since = datetime.combine(dnes, datetime.min.time()).replace(microsecond=1)
until = since + relativedelta(months=1) - timedelta(milliseconds=1)
# ISO formát s "Z" (UTC)
since_iso = since.isoformat() + "Z"
until_iso = until.isoformat() + "Z"
payload = {
"operationName": "ClinicAgenda_ListClinicReservations",
"variables": {
"calendarIds": [CALENDAR_ID],
"clinicSlug": CLINIC_SLUG,
"since": since_iso,
"until": until_iso,
"locale": "cs",
"emptyCalendarIds": False,
},
"query": """query ClinicAgenda_ListClinicReservations(
$calendarIds: [UUID!],
$clinicSlug: String!,
$locale: Locale!,
$since: DateTime!,
$until: DateTime!,
$emptyCalendarIds: Boolean!
) {
reservations: listClinicReservations(
clinicSlug: $clinicSlug,
calendarIds: $calendarIds,
since: $since,
until: $until
) @skip(if: $emptyCalendarIds) {
id
start
end
note
done
color
request {
id
displayTitle(locale: $locale)
extendedPatient {
name
surname
dob
insuranceCompanyObject { shortName }
}
}
}
}""",
}
print("since:", since_iso)
print("until:", until_iso)
# ==================== Query API ====================
print("📡 Querying Medevio API for agenda...")
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
print("Status:", r.status_code)
try:
data = r.json()
except Exception as e:
print("❌ Could not parse JSON:", e)
print(r.text)
raise SystemExit()
if "data" not in data or "reservations" not in data["data"]:
raise SystemExit("⚠️ No 'reservations' data found in response.")
reservations = data["data"]["reservations"]
from datetime import datetime
from dateutil import parser, tz
# ===== Process reservations into table =====
rows = []
for r in reservations:
req = r.get("request") or {}
patient = req.get("extendedPatient") or {}
insurance = patient.get("insuranceCompanyObject") or {}
# parse datetimes (convert to local time)
try:
start_dt = parser.isoparse(r.get("start")).astimezone(tz.gettz("Europe/Prague"))
end_dt = parser.isoparse(r.get("end")).astimezone(tz.gettz("Europe/Prague"))
except Exception:
start_dt = end_dt = None
date_str = start_dt.strftime("%Y-%m-%d") if start_dt else ""
time_interval = f"{start_dt.strftime('%H:%M')}-{end_dt.strftime('%H:%M')}" if start_dt and end_dt else ""
rows.append({
"Date": date_str,
"Time": time_interval,
"Title": req.get("displayTitle") or "",
"Patient": f"{patient.get('surname','')} {patient.get('name','')}".strip(),
"DOB": patient.get("dob") or "",
"Insurance": insurance.get("shortName") or "",
"Note": r.get("note") or "",
"Color": r.get("color") or "",
"Request_ID": req.get("id") or "",
"Reservation_ID": r.get("id"),
})
df = pd.DataFrame(rows).sort_values(["Date", "Time"])
# ===== Excel export =====
EXPORT_DIR = Path(get_reports_folder())
# EXPORT_DIR = Path(r"C:\Users\vlado\PycharmProjects\Medevio\exports")
EXPORT_DIR.mkdir(exist_ok=True)
timestamp = time.strftime("%Y-%m-%d %H-%M-%S")
xlsx_path = EXPORT_DIR / f"Medevio_agenda_{timestamp}.xlsx"
# remove old files
for old in EXPORT_DIR.glob("Medevio_agenda_*.xlsx"):
try:
old.unlink()
except Exception:
pass
df.to_excel(xlsx_path, index=False)
wb = load_workbook(xlsx_path)
ws = wb.active
# === Apply styling and custom column widths ===
widths = {
1: 11, # A - Date
2: 13, # B - Time
3: 45, # C - Title
4: 30, # D - Patient
5: 15, # E - DOB
6: 15, # F - Insurance
7: 30, # G - Note
8: 15, # H - Color
9: 37, # I - Request_ID
10: 37 # J - Reservation_ID
}
for col_idx in range(1, len(df.columns) + 1):
col_letter = get_column_letter(col_idx)
c = ws.cell(row=1, column=col_idx)
c.font = Font(bold=True)
c.alignment = Alignment(horizontal="center")
ws.column_dimensions[col_letter].width = widths.get(col_idx, 20)
ws.freeze_panes = "A2"
wb.save(xlsx_path)
print(f"📘 Exported clean agenda view to:\n{xlsx_path}")
+29
View File
@@ -0,0 +1,29 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import smtplib
from email.message import EmailMessage
# ========= CONFIG =========
SMTP_SERVER = "smtp.office365.com"
SMTP_PORT = 587
EMAIL_FROM = "ordinace@buzalkova.cz"
EMAIL_TO = "vladimir.buzalka@buzalka.cz"
SMTP_USER = "ordinace@buzalkova.cz"
SMTP_PASS = "********" # <- your Office365 APP PASSWORD (see note below)
# ==========================
# Create the email
msg = EmailMessage()
msg["Subject"] = "Test zpráva z Pythonu"
msg["From"] = EMAIL_FROM
msg["To"] = EMAIL_TO
msg.set_content("Dobrý den,\n\ntoto je testovací e-mail odeslaný z Python skriptu.\n\n--\nOrdinace MUDr. Buzalková")
# Send the email
with smtplib.SMTP(SMTP_SERVER, SMTP_PORT) as server:
server.starttls() # enable TLS encryption
server.login(SMTP_USER, SMTP_PASS)
server.send_message(msg)
print("✅ E-mail byl úspěšně odeslán!")
+98
View File
@@ -0,0 +1,98 @@
import socket,fdb,pymysql
from pymysql.cursors import DictCursor
import pymysql
from pymysql.cursors import DictCursor
import socket
def get_path_ciselniky():
hostname = socket.gethostname().strip().upper()
if hostname in ("NTBVBHP470G10", "Z230"):
return r"u:\Dropbox\!!!Days\Downloads Z230\Pracuji_na\Import"
elif hostname == "SESTRA":
return r"z:\Dropbox\!!!Days\Downloads Z230\Pracuji_na\Import"
else:
print(f"Unknown host: {hostname}")
return None
def get_reports_folder():
hostname = socket.gethostname().strip().upper()
if hostname in ("NTBVBHP470G10", "Z230"):
return r"u:\Dropbox\!!!Days\Downloads Z230"
elif hostname in ["SESTRA","POHODA","LEKAR"]:
return r"z:\Dropbox\Ordinace\Reporty"
else:
print(f"Unknown host: {hostname}")
return None
def get_mysql_connection(cursor_mode=None):
"""
Return a PyMySQL connection.
If cursor_mode == "DICT", return connection with DictCursor.
Otherwise, return default tuple cursor connection.
"""
hostname = socket.gethostname().strip()
# decide cursor class
cursor_cls = DictCursor if cursor_mode == "DICT" else None
if hostname in ("NTBVBHP470G10", "Z230"):
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
autocommit=True,
)
elif hostname == "SESTRA":
MYSQL_CFG = dict(
host="127.0.0.1",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
autocommit=True,
)
else:
print(f"Unknown host: {hostname}")
return None
# include cursorclass only if we want a dict cursor
if cursor_cls is not None:
MYSQL_CFG["cursorclass"] = cursor_cls
try:
return pymysql.connect(**MYSQL_CFG)
except pymysql.MySQLError as e:
print(f"MySQL connection failed: {e}")
return None
def get_medicus_connection():
"""
Attempt to create a Firebird connection to the Medicus database.
Returns:
fdb.Connection object on success
None on failure
"""
if socket.gethostname().strip() in ("NTBVBHP470G10","Z230"):
MEDICUS_CFG = dict(
dsn=r"192.168.1.4:z:\medicus 3\data\medicus.fdb",
user="SYSDBA",
password="masterkey",
charset="win1250",
)
elif socket.gethostname().strip()=="SESTRA":
MEDICUS_CFG = dict(
dsn=r"192.168.1.10:m:\medicus\data\medicus.fdb",
user="SYSDBA",
password="masterkey",
charset="win1250",
)
try:
return fdb.connect(**MEDICUS_CFG)
except fdb.fbcore.DatabaseError as e:
print(f"Medicus DB connection failed: {e}")
return None
View File
+62
View File
@@ -0,0 +1,62 @@
import winreg
import json
import os
def get_dropbox_root() -> str:
"""
Vrátí kořenovou cestu složky Dropbox na tomto počítači.
Dropbox může být nainstalován na různých discích (C:, U:, Z: …),
ale struktura složek uvnitř zůstává vždy stejná. Tato funkce zjistí
aktuální umístění, takže ostatní skripty nemusí cestu napevno zadávat.
Postup hledání (v tomto pořadí):
1. Registr HKCU\\Software\\Dropbox\\ks — hlavní klíč, hodnota "Personal"
je uložena jako byte array v kódování UTF-16 LE.
2. Registr HKCU\\Software\\Dropbox\\ks1 — alternativní klíč používaný
novějšími verzemi klienta Dropbox.
3. Soubor info.json v %APPDATA%\\Dropbox\\ nebo %LOCALAPPDATA%\\Dropbox\\
— záložní metoda, pokud registr cestu neobsahuje.
Vrací:
str: Absolutní cesta ke kořenové složce Dropboxu, např. "U:\\Dropbox".
Vyvolá:
RuntimeError: Pokud se cestu nepodaří zjistit žádnou z metod.
Příklad použití:
from Knihovny.najdi_dropbox import get_dropbox_root
import os
ROOT = get_dropbox_root()
PACIENTI = os.path.join(ROOT, "Ordinace", "Pacienti")
"""
# Metoda 1 a 2: registr HKCU\Software\Dropbox\ks a ks1
for subkey in (r"Software\Dropbox\ks", r"Software\Dropbox\ks1"):
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, subkey) as key:
value, _ = winreg.QueryValueEx(key, "Personal")
path = bytes(value).decode("utf-16-le").rstrip("\x00")
if path:
return path
except (OSError, UnicodeDecodeError):
continue
# Metoda 3: záložní — info.json v AppData
for base in (os.getenv("APPDATA", ""), os.getenv("LOCALAPPDATA", "")):
info_path = os.path.join(base, "Dropbox", "info.json")
if os.path.isfile(info_path):
with open(info_path, encoding="utf-8") as f:
info = json.load(f)
path = (info.get("personal") or info.get("business") or {}).get("path", "")
if path:
return path
raise RuntimeError("Nepodařilo se zjistit cestu k Dropboxu.")
if __name__ == "__main__":
root = get_dropbox_root()
print(f"Dropbox root: {root}")
+49
View File
@@ -0,0 +1,49 @@
import socket
from dataclasses import dataclass
@dataclass
class MedicusConfig:
server: str
path: str
@property
def dsn(self) -> str:
return f"{self.server}:{self.path}"
def get_medicus_config() -> MedicusConfig:
"""
Vrátí konfiguraci připojení k databázi Medicus podle jména počítače.
Logika:
- LEKAR → localhost, M:\\Medicus\\Data\\Medicus.fdb (ostrý lokální)
- SESTRA → 192.168.1.10, M:\\Medicus\\Data\\Medicus.fdb (ostrý přes síť)
- LENOVO → 192.168.1.10, M:\\Medicus\\Data\\Medicus.fdb (testovací přes síť)
- ostatní → localhost, C:\\Medicus 3\\Data\\Medicus.fdb (testovací lokální)
Vrací:
MedicusConfig s atributy server, path a vlastností dsn ("server:path").
Příklad použití:
from Knihovny.najdi_medicus import get_medicus_config
cfg = get_medicus_config()
con = fdb.connect(dsn=cfg.dsn, user="SYSDBA", password="masterkey")
"""
hostname = socket.gethostname().upper()
if hostname == "LEKAR":
return MedicusConfig(server="localhost", path=r"M:\Medicus\Data\Medicus.fdb")
elif hostname in ("SESTRA", "LENOVO"):
return MedicusConfig(server="192.168.1.10", path=r"M:\Medicus\Data\Medicus.fdb")
else:
return MedicusConfig(server="localhost", path=r"C:\Medicus 3\Data\Medicus.fdb")
if __name__ == "__main__":
cfg = get_medicus_config()
print(f"Hostname : {socket.gethostname()}")
print(f"Server : {cfg.server}")
print(f"Path : {cfg.path}")
print(f"DSN : {cfg.dsn}")
+271
View File
@@ -0,0 +1,271 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Stáhne konverzaci pro požadavky, kde:
messagesProcessed IS NULL OR messagesProcessed < updatedAt.
Vloží do medevio_conversation a přílohy do medevio_downloads.
"""
import zlib
import json
import requests
import pymysql
from pathlib import Path
from datetime import datetime
import time
# ==============================
# 🔧 CONFIGURATION
# ==============================
TOKEN_PATH = Path("token.txt")
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
GRAPHQL_QUERY_MESSAGES = r"""
query UseMessages_ListMessages($requestId: String!, $updatedSince: DateTime) {
messages: listMessages(patientRequestId: $requestId, updatedSince: $updatedSince) {
id
createdAt
updatedAt
readAt
text
type
sender {
id
name
surname
clinicId
}
medicalRecord {
id
description
contentType
url
downloadUrl
token
createdAt
updatedAt
}
}
}
"""
# ==============================
# ⏱ DATETIME PARSER
# ==============================
def parse_dt(s):
if not s:
return None
try:
return datetime.fromisoformat(s.replace("Z", "+00:00"))
except:
pass
try:
return datetime.strptime(s[:19], "%Y-%m-%dT%H:%M:%S")
except:
return None
# ==============================
# 🔐 TOKEN
# ==============================
def read_token(path: Path) -> str:
tok = path.read_text(encoding="utf-8").strip()
return tok.replace("Bearer ", "")
# ==============================
# 📡 FETCH MESSAGES
# ==============================
def fetch_messages(headers, request_id):
payload = {
"operationName": "UseMessages_ListMessages",
"query": GRAPHQL_QUERY_MESSAGES,
"variables": {"requestId": request_id, "updatedSince": None},
}
r = requests.post("https://api.medevio.cz/graphql", json=payload, headers=headers, timeout=30)
if r.status_code != 200:
print("❌ HTTP", r.status_code, "for request", request_id)
return []
try:
data = r.json()
except Exception as e:
print(f"❌ Failed to parse JSON for {request_id}: {e}")
print(" Response text:", r.text[:500])
return []
messages = data.get("data", {}).get("messages", []) or []
print(f" 🌐 API returned {len(messages)} messages for {request_id}")
return messages
# ==============================
# 💾 SAVE MESSAGE
# ==============================
def insert_message(cur, req_id, msg):
sender = msg.get("sender") or {}
sender_name = " ".join(
x for x in [sender.get("name"), sender.get("surname")] if x
) or None
sql = """
INSERT INTO medevio_conversation (
id, request_id,
sender_name, sender_id, sender_clinic_id,
text, created_at, read_at, updated_at,
attachment_url, attachment_description, attachment_content_type
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
sender_name = VALUES(sender_name),
sender_id = VALUES(sender_id),
sender_clinic_id = VALUES(sender_clinic_id),
text = VALUES(text),
created_at = VALUES(created_at),
read_at = VALUES(read_at),
updated_at = VALUES(updated_at),
attachment_url = VALUES(attachment_url),
attachment_description = VALUES(attachment_description),
attachment_content_type = VALUES(attachment_content_type)
"""
mr = msg.get("medicalRecord") or {}
cur.execute(sql, (
msg.get("id"),
req_id,
sender_name,
sender.get("id"),
sender.get("clinicId"),
msg.get("text"),
parse_dt(msg.get("createdAt")),
parse_dt(msg.get("readAt")),
parse_dt(msg.get("updatedAt")),
mr.get("downloadUrl") or mr.get("url"),
mr.get("description"),
mr.get("contentType")
))
# ==============================
# 💾 DOWNLOAD MESSAGE ATTACHMENT
# ==============================
def insert_download(cur, req_id, msg, existing_ids):
mr = msg.get("medicalRecord") or {}
attachment_id = mr.get("id")
if not attachment_id:
return
if attachment_id in existing_ids:
return # skip duplicates
url = mr.get("downloadUrl") or mr.get("url")
if not url:
return
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
data = r.content
except Exception as e:
print("⚠️ Failed to download:", e)
return
filename = url.split("/")[-1].split("?")[0]
cur.execute("""
INSERT INTO medevio_downloads (
request_id, attachment_id, attachment_type,
filename, content_type, file_size, created_at, file_content
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON DUPLICATE KEY UPDATE
file_content = VALUES(file_content),
file_size = VALUES(file_size),
downloaded_at = NOW()
""", (
req_id,
attachment_id,
"MESSAGE_ATTACHMENT",
filename,
mr.get("contentType"),
len(data),
parse_dt(msg.get("createdAt")),
data
))
existing_ids.add(attachment_id)
# ==============================
# 🧠 MAIN
# ==============================
def main():
token = read_token(TOKEN_PATH)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
conn = pymysql.connect(**DB_CONFIG)
# ---- Load existing attachments
with conn.cursor() as cur:
cur.execute("SELECT attachment_id FROM medevio_downloads")
existing_ids = {row["attachment_id"] for row in cur.fetchall()}
print(f"📦 Already downloaded attachments: {len(existing_ids)}\n")
# ---- Select 10 oldest pozadavky (regardless of messagesProcessed)
sql = """
SELECT id
FROM pozadavky
ORDER BY updatedAt ASC
LIMIT 10
"""
with conn.cursor() as cur:
cur.execute(sql)
requests_to_process = cur.fetchall()
print(f"📋 Will process {len(requests_to_process)} oldest pozadavků.\n")
# ---- Process each pozadavek
for idx, row in enumerate(requests_to_process, 1):
req_id = row["id"]
print(f"[{idx}/{len(requests_to_process)}] Processing {req_id} …")
messages = fetch_messages(headers, req_id)
with conn.cursor() as cur:
for msg in messages:
insert_message(cur, req_id, msg)
insert_download(cur, req_id, msg, existing_ids)
conn.commit()
with conn.cursor() as cur:
cur.execute("UPDATE pozadavky SET messagesProcessed = NOW() WHERE id = %s", (req_id,))
conn.commit()
print(f" ✅ {len(messages)} messages saved\n")
time.sleep(0.25)
conn.close()
print("🎉 Done!")
if __name__ == "__main__":
main()
+125
View File
@@ -0,0 +1,125 @@
#fcb2414b-067b-4ca2-91b2-6c36a86d4cbb = Vladimir Buzalka
#0210db7b-8fb0-4b47-b1d8-ec7a10849a63 = Vladko - testovací aplikace
#tento kód otevře pacienta podle jeho UUID a založí mu požadavek chřipka a finito
from pathlib import Path
from datetime import datetime
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
import time
STATE_FILE = Path("../medevio_storage.json")
PATIENT_UUID = "0210db7b-8fb0-4b47-b1d8-ec7a10849a63"
PATIENT_URL = f"https://my.medevio.cz/mudr-buzalkova/klinika/pacienti?pacient={PATIENT_UUID}"
MESSAGE_TEXT = "Dobrý den, vakcína proti chřipce je k dispozici, zítra (úterý 23.9) budeme očkovat od 13-17 hodin, prosím potvrďte jestli můžete přijít a jaký čas se Vám hodí."
def savepage(name: str, page):
"""
Save the current HTML of a Playwright Page to
U:\Dropbox\!!!Days\Downloads Z230\Pages\<name>.html
"""
folder = Path(r"U:\Dropbox\!!!Days\Downloads Z230\Pages")
folder.mkdir(parents=True, exist_ok=True) # ensure the folder exists
# create sortable timestamp like 2025-09-19_14-05-33
ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filepath = folder / f"{ts}_{name}.html"
with filepath.open("w", encoding="utf-8") as f:
f.write(page.content())
print(f"Saved page snapshot to {filepath}")
def main():
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=200)
context = browser.new_context(storage_state=str(STATE_FILE))
# ---- keep a stable reference to the patient card page ----
ptcard = context.new_page()
ptcard.goto(PATIENT_URL, wait_until="networkidle")
#saving ptcard1
# savepage("ptcard1",ptcard)
ptcard.get_by_text("Historie požadavků").wait_for(timeout=15_000)
# 1) Create new request on the patient card
ptcard.get_by_role("button", name="Nový požadavek").click()
ptcard.wait_for_timeout(300) # small settle
# cursor is already in the "Začněte psát…" field
ptcard.keyboard.type("očkování - chřipka")
ptcard.locator("[role='option']", has_text="Očkování - Chřipka").first.click()
ptcard.get_by_role("button", name="Vytvořit požadavek").click()
#saving ptcard1
# savepage("ptcard2",ptcard)
# 2) Ensure we are back on the patient card again
# (some UIs rerender; either way we want a fresh list)
try:
ptcard.get_by_text("Historie požadavků").wait_for(timeout=7_000)
except PWTimeout:
# If for any reason we are not on the card, navigate back explicitly
ptcard.goto(PATIENT_URL, wait_until="networkidle")
ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
# Optional: hard refresh to get the just-created request at the top
ptcard.reload(wait_until="networkidle")
ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
time.sleep(5)
# 3) Open the “Očkování Chřipka …” request card by its H4 text
# (click the whole card container, not just the heading)
try:
# wait until at least one request card is rendered
ptcard.locator("div[data-testid='patient-request-item']").first.wait_for(timeout=10_000)
# locate the specific card that contains the H4 with "Očkování - Chřipka"
chripka_card = ptcard.locator("div[data-testid='patient-request-item']").filter(
has=ptcard.locator("h4:has-text('Očkování - Chřipka')")
).first
# ensure it's attached/visible then click it
chripka_card.wait_for(timeout=10_000)
chripka_card.click(timeout=5_000)
except Exception as e:
# Fallback: click the very first card on the list (newest)
try:
first_card = ptcard.locator("div[data-testid='patient-request-item']").first
first_card.click(timeout=5_000)
except Exception:
# if even that fails, save snapshot for inspection and raise
savepage("ptcard_click_fail", ptcard)
raise
# 4) Wait for request detail and send the message
# Were now on the detail page
try:
ptcard.wait_for_url("**/pozadavky?pozadavek=*", timeout=10_000)
except PWTimeout:
pass # URL may be SPA; rely on textarea presence
ptcard.get_by_placeholder("Napište odpověď").wait_for(timeout=10_000)
ptcard.get_by_placeholder("Napište odpověď").fill(MESSAGE_TEXT)
sent = False
for sel in ["button:has-text('Odeslat')",
"button:has-text('Odeslat zprávu')",
"button:has-text('Odeslat SMS')",
"button:has-text('Odeslat do aplikace')"]:
try:
ptcard.click(sel, timeout=4000)
sent = True
break
except Exception:
continue
if not sent:
raise RuntimeError("Nepodařilo se najít/kliknout tlačítko Odeslat.")
ptcard.wait_for_timeout(2000)
print("✅ Požadavek vytvořen, otevřen a zpráva odeslána.")
if __name__ == "__main__":
main()
+189
View File
@@ -0,0 +1,189 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from datetime import datetime
import re
import time
import pymysql
from pymysql.cursors import DictCursor
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
# =====================================================
STATE_FILE = Path("../medevio_storage.json") # saved login state from your login script
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
cursorclass=DictCursor,
autocommit=False,
)
UUID_COLUMN = "rid" # column with Medevio UUID
FLAG_COLUMN = "pozchripkavytvoren" # bool flag we update after success
FLAG_TS_COL = "pozchripka_vytv_at" # optional timestamp when request created
MESSAGE_TEXT = (
"Dobrý den, vakcína proti chřipce je k dispozici, "
"zítra (úterý 23.9) budeme očkovat od 13-17 hodin, "
"prosím potvrďte jestli můžete přijít a jaký čas se Vám hodí."
)
PATIENT_URL_TMPL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti?pacient={uuid}"
RC_DIGITS = re.compile(r"\D+")
# =====================================================
def normalize_rc(rc: str) -> str:
"""Return digits-only RC (removes slash/spaces)."""
return RC_DIGITS.sub("", rc or "")
def ensure_flag_columns(conn):
"""Create required columns if missing (works for all MySQL/MariaDB)."""
needed = {
FLAG_COLUMN: "TINYINT(1) NULL",
FLAG_TS_COL: "DATETIME NULL",
}
with conn.cursor() as cur:
for col, coldef in needed.items():
cur.execute("""
SELECT COUNT(*) AS cnt
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = DATABASE()
AND TABLE_NAME = 'patients_extracted'
AND COLUMN_NAME = %s
""", (col,))
if cur.fetchone()["cnt"] == 0:
ddl = f"ALTER TABLE `patients_extracted` ADD COLUMN `{col}` {coldef}"
print("Adding column:", ddl)
cur.execute(ddl)
conn.commit()
def fetch_uuid_by_rc(conn, rc_digits: str) -> dict | None:
"""
Return row with rid (primary key), medevio UUID, jmeno, prijmeni for the given RC.
Prints the query and parameter for debugging.
"""
sql = (
f"SELECT rid, `{UUID_COLUMN}` AS uuid, jmeno, prijmeni, rc "
"FROM patients_extracted "
"WHERE REPLACE(REPLACE(rc,'/',''),' ','') = %s "
"LIMIT 1"
)
print("DEBUG SQL:", sql, "| param:", rc_digits)
with conn.cursor() as cur:
cur.execute(sql, (rc_digits,))
row = cur.fetchone()
print("DEBUG result:", row)
return row
def mark_flag_success(conn, rid: str):
"""Update the flag once the Medevio request is created."""
with conn.cursor() as cur:
cur.execute(
f"UPDATE patients_extracted "
f"SET {FLAG_COLUMN}=1, {FLAG_TS_COL}=NOW() "
f"WHERE rid=%s",
(rid,)
)
conn.commit()
def create_flu_request_for_uuid(uuid: str) -> bool:
"""Automates Medevio UI to create 'Očkování - Chřipka' request and send MESSAGE_TEXT."""
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=200)
context = browser.new_context(storage_state=str(STATE_FILE))
ptcard = context.new_page()
url = PATIENT_URL_TMPL.format(uuid=uuid)
ptcard.goto(url, wait_until="networkidle")
# ensure patient card loaded
ptcard.get_by_text("Historie požadavků").wait_for(timeout=15_000)
# create new request
ptcard.get_by_role("button", name="Nový požadavek").click()
ptcard.wait_for_timeout(300)
ptcard.keyboard.type("očkování - chřipka")
ptcard.locator("[role='option']", has_text="Očkování - Chřipka").first.click()
ptcard.get_by_role("button", name="Vytvořit požadavek").click()
time.sleep(5)
# wait until back on card
try:
ptcard.get_by_text("Historie požadavků").wait_for(timeout=7_000)
except PWTimeout:
ptcard.goto(url, wait_until="networkidle")
ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
ptcard.reload(wait_until="networkidle")
ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
time.sleep(2)
# open the new request
try:
ptcard.locator("div[data-testid='patient-request-item']").first.wait_for(timeout=10_000)
chripka_card = ptcard.locator("div[data-testid='patient-request-item']").filter(
has=ptcard.locator("h4:has-text('Očkování - Chřipka')")
).first
chripka_card.click(timeout=5_000)
except Exception:
ptcard.locator("div[data-testid='patient-request-item']").first.click(timeout=5_000)
# send the message
try:
ptcard.wait_for_url("**/pozadavky?pozadavek=*", timeout=10_000)
except PWTimeout:
pass
ptcard.get_by_placeholder("Napište odpověď").wait_for(timeout=10_000)
ptcard.get_by_placeholder("Napište odpověď").fill(MESSAGE_TEXT)
for sel in [
"button:has-text('Odeslat')",
"button:has-text('Odeslat zprávu')",
"button:has-text('Odeslat SMS')",
"button:has-text('Odeslat do aplikace')",
]:
try:
ptcard.click(sel, timeout=4000)
browser.close()
return True
except Exception:
continue
browser.close()
return False
def main():
rc_input = input("Zadejte RC (s/bez lomítka, Enter pro konec): ").strip()
# rc_input="320312460"
if not rc_input:
print("Konec.")
return
rc = normalize_rc(rc_input)
conn = pymysql.connect(**MYSQL_CFG)
try:
ensure_flag_columns(conn)
row = fetch_uuid_by_rc(conn, rc)
if not row or not row.get("uuid"):
print(f"✗ Pacient s RC {rc} nenalezen nebo nemá sloupec {UUID_COLUMN}.")
return
print(f"→ Nalezen: {row.get('prijmeni','')} {row.get('jmeno','')} "
f"| RC {row.get('rc','')} | UUID {row['uuid']} | rid {row['rid']}")
if create_flu_request_for_uuid(row["uuid"]):
mark_flag_success(conn, row["rid"])
print("✅ Požadavek chřipka vytvořen a DB aktualizována.")
else:
print("✗ Nepodařilo se odeslat zprávu v požadavku.")
finally:
conn.close()
if __name__ == "__main__":
main()
@@ -0,0 +1,207 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
import re
import time
import pymysql
from pymysql.cursors import DictCursor
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
# ================== CONFIG ==================
STATE_FILE = Path("../medevio_storage.json")
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
cursorclass=DictCursor,
autocommit=False,
)
# Column that goes into the Medevio URL.
# If your Medevio patient UUID is stored in a different column, change this:
UUID_COLUMN = "rid" # Medevio UUID in your table
FLAG_COLUMN = "pozchripkavytvoren" # set to 1 on success
FLAG_TS_COL = "pozchripka_vytv_at" # timestamp when created
MESSAGE_TEXT = (
"Dobrý den, vakcína proti chřipce je k dispozici, "
"dnes (úterý 23.9) budeme očkovat od 13-17 hodin, "
"prosím, otevřete si tento požadavek a vyberte si termín. Můžete si samozřejmě vybrat i kterýkoliv jiný den, ale hromadně očkujeme další 4 úterky. Další 4 úterky najdete spoustu termínů."
)
PATIENT_URL_TMPL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti?pacient={uuid}"
BATCH_LIMIT = 2 # change if you want to limit how many to process in one run
PAUSE_BETWEEN = 1.0 # seconds between patients (UI courtesy)
# ===========================================
RC_DIGITS = re.compile(r"\D+")
def ensure_flag_columns(conn):
"""Create required columns if missing (portable)."""
needed = {
FLAG_COLUMN: "TINYINT(1) NULL",
FLAG_TS_COL: "DATETIME NULL",
}
with conn.cursor() as cur:
for col, coldef in needed.items():
cur.execute("""
SELECT COUNT(*) AS cnt
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = DATABASE()
AND TABLE_NAME = 'patients_extracted'
AND COLUMN_NAME = %s
""", (col,))
if cur.fetchone()["cnt"] == 0:
cur.execute(f"ALTER TABLE `patients_extracted` ADD COLUMN `{col}` {coldef}")
conn.commit()
def fetch_batch(conn):
"""
Get patients where:
- flu_reply = 'ano'
- mamedevioucet is true-ish
- rc starts with '8' (after removing slash/spaces)
- pozchripkavytvoren is NULL
- uuid column is present
"""
sql = f"""
SELECT
rid, jmeno, prijmeni, rc,
`{UUID_COLUMN}` AS uuid
FROM patients_extracted
WHERE flu_reply = 'ano'
AND (mamedevioucet = 1 OR mamedevioucet = TRUE OR mamedevioucet = '1')
AND REPLACE(REPLACE(rc,'/',''),' ','') LIKE '7%%'
AND {FLAG_COLUMN} IS NULL
AND `{UUID_COLUMN}` IS NOT NULL
AND `{UUID_COLUMN}` <> ''
ORDER BY prijmeni, jmeno
LIMIT %s
"""
with conn.cursor() as cur:
cur.execute(sql, (BATCH_LIMIT,))
return cur.fetchall()
def mark_flag_success(conn, rid: str):
with conn.cursor() as cur:
cur.execute(
f"UPDATE patients_extracted "
f"SET {FLAG_COLUMN}=1, {FLAG_TS_COL}=NOW() "
f"WHERE rid=%s",
(rid,)
)
conn.commit()
def create_flu_request_for_uuid(uuid: str) -> bool:
"""Automate Medevio UI for one patient: create 'Očkování - Chřipka' and send MESSAGE_TEXT."""
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=200)
context = browser.new_context(storage_state=str(STATE_FILE))
ptcard = context.new_page()
url = PATIENT_URL_TMPL.format(uuid=uuid)
ptcard.goto(url, wait_until="networkidle")
# ensure patient card loaded
ptcard.get_by_text("Historie požadavků").wait_for(timeout=15_000)
# create new request
ptcard.get_by_role("button", name="Nový požadavek").click()
ptcard.wait_for_timeout(300)
ptcard.keyboard.type("očkování - chřipka")
ptcard.locator("[role='option']", has_text="Očkování - Chřipka").first.click()
ptcard.get_by_role("button", name="Vytvořit požadavek").click()
time.sleep(2)
# # wait until back on card
# try:
# ptcard.get_by_text("Historie požadavků").wait_for(timeout=7_000)
# except PWTimeout:
# ptcard.goto(url, wait_until="networkidle")
# ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
# ptcard.reload(wait_until="networkidle")
ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
time.sleep(2)
# open the new request
try:
ptcard.locator("div[data-testid='patient-request-item']").first.wait_for(timeout=10_000)
chripka_card = ptcard.locator("div[data-testid='patient-request-item']").filter(
has=ptcard.locator("h4:has-text('Očkování - Chřipka')")
).first
chripka_card.click(timeout=5_000)
except Exception:
ptcard.locator("div[data-testid='patient-request-item']").first.click(timeout=5_000)
# send the message
# try:
# ptcard.wait_for_url("**/pozadavky?pozadavek=*", timeout=10_000)
# except PWTimeout:
# pass
ptcard.get_by_placeholder("Napište odpověď").wait_for(timeout=10_000)
ptcard.get_by_placeholder("Napište odpověď").fill(MESSAGE_TEXT)
time.sleep(2)
for sel in [
"button:has-text('Odeslat')",
"button:has-text('Odeslat zprávu')",
"button:has-text('Odeslat SMS')",
"button:has-text('Odeslat do aplikace')",
]:
try:
ptcard.click(sel, timeout=4000)
browser.close()
return True
except Exception:
continue
browser.close()
return False
def main():
conn = pymysql.connect(**MYSQL_CFG)
try:
ensure_flag_columns(conn)
rows = fetch_batch(conn)
if not rows:
print("Nenalezen žádný pacient pro zpracování.")
return
print(f"Zpracujeme {len(rows)} pacientů…")
processed = ok = fail = 0
for r in rows:
processed += 1
rid = r["rid"]
uuid = r["uuid"]
name = f"{r.get('prijmeni','')}, {r.get('jmeno','')}"
rc = r.get("rc","")
print(f"[{processed:>3}] {name} | RC {rc} | UUID {uuid}")
try:
success = create_flu_request_for_uuid(uuid)
if success:
mark_flag_success(conn, rid)
ok += 1
print(" ✓ vytvořeno + odesláno, DB flag nastaven")
else:
fail += 1
print(" ✗ nepodařilo se odeslat zprávu (tlačítko 'Odeslat' nenalezeno)")
except Exception as e:
fail += 1
conn.rollback()
print(f" ✗ chyba: {type(e).__name__}: {e}")
time.sleep(PAUSE_BETWEEN)
print(f"Hotovo. processed={processed}, ok={ok}, fail={fail}")
finally:
conn.close()
if __name__ == "__main__":
main()
@@ -0,0 +1,322 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
import re
import time
import unicodedata
import pymysql
from pymysql.cursors import DictCursor
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout, Page
# ================== CONFIG ==================
STATE_FILE = Path("../medevio_storage.json")
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
cursorclass=DictCursor,
autocommit=False,
)
# Column that goes into the Medevio URL.
# If your Medevio patient UUID is stored in a different column, change this:
UUID_COLUMN = "rid" # Medevio UUID in your table
FLAG_COLUMN = "pozchripkavytvoren" # set to 1 on success
FLAG_TS_COL = "pozchripka_vytv_at" # timestamp when created
# Optional: set your personal RID here to test on a single card; set to None for batch mode
TEST_RID = None # e.g. "fcb2414b-067b-4ca2-91b2-6c36a86d4cbb"
# TEST_RID = "fcb2414b-067b-4ca2-91b2-6c36a86d4cbb"
MESSAGE_TEXT = (
"Dobrý den, vakcína proti chřipce je k dispozici. "
"Vy nemáte účet v Medeviu a tedy si nemůžete vybrat termín, takže to zkusíme udělat manuálně. "
"Hlavní očkovací dny jsou úterý 07/10 a úterý 14/10, kdy očkujeme i COVID, kdo chce. Chřipku samostatně možno i kdykoliv jindy. Tak dejte vědět, jaký termín se Vám hodí a já si to poznamenám."
)
PATIENT_URL_TMPL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti?pacient={uuid}"
BATCH_LIMIT = 50 # change if you want to limit how many to process in one run
PAUSE_BETWEEN = 1.0 # seconds between patients (UI courtesy)
# ===========================================
RC_DIGITS = re.compile(r"\D+")
def mark_flag_skipped(conn, rid: str):
"""
Pokud už požadavek na chřipku existuje:
- nastaví pozchripkavytvoren = 1
- zapíše aktuální čas do pozchripka_vytv_at
"""
with conn.cursor() as cur:
cur.execute(
f"UPDATE patients_extracted "
f"SET {FLAG_COLUMN}=1, {FLAG_TS_COL}=NOW() "
f"WHERE rid=%s",
(rid,)
)
conn.commit()
# ---------- DB helpers ----------
def ensure_flag_columns(conn):
"""Create required columns if missing (portable)."""
needed = {
FLAG_COLUMN: "TINYINT(1) NULL",
FLAG_TS_COL: "DATETIME NULL",
}
with conn.cursor() as cur:
for col, coldef in needed.items():
cur.execute("""
SELECT COUNT(*) AS cnt
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = DATABASE()
AND TABLE_NAME = 'patients_extracted'
AND COLUMN_NAME = %s
""", (col,))
if cur.fetchone()["cnt"] == 0:
cur.execute(f"ALTER TABLE `patients_extracted` ADD COLUMN `{col}` {coldef}")
conn.commit()
def fetch_batch(conn):
"""
Batch mode (TEST_RID is None):
- flu_reply = 'ano'
- mamedevioucet true-ish
- rc starts with '7' (after removing slash/spaces) <-- keep/adjust as you need
- pozchripkavytvoren is NULL
- uuid column present
Test mode (TEST_RID set): returns only that rid.
"""
if TEST_RID:
sql = f"""
SELECT
rid, jmeno, prijmeni, rc,
`{UUID_COLUMN}` AS uuid
FROM patients_extracted
WHERE rid = %s
LIMIT 1
"""
with conn.cursor() as cur:
cur.execute(sql, (TEST_RID,))
return cur.fetchall()
sql = f"""
SELECT
rid, jmeno, prijmeni, rc,
`{UUID_COLUMN}` AS uuid
FROM patients_extracted
WHERE flu_reply = 'ano'
AND mamedevioucet = 0
AND {FLAG_COLUMN} IS NULL
AND `{UUID_COLUMN}` IS NOT NULL
AND `{UUID_COLUMN}` <> ''
ORDER BY prijmeni, jmeno
LIMIT %s
"""
with conn.cursor() as cur:
cur.execute(sql, (BATCH_LIMIT,))
return cur.fetchall()
def mark_flag_success(conn, rid: str):
with conn.cursor() as cur:
cur.execute(
f"UPDATE patients_extracted "
f"SET {FLAG_COLUMN}=1, {FLAG_TS_COL}=NOW() "
f"WHERE rid=%s",
(rid,)
)
conn.commit()
# ---------- UI helpers ----------
def _strip_diacritics(s: str) -> str:
"""Return s without diacritics (e.g., 'chřipka' -> 'chripka')."""
return ''.join(c for c in unicodedata.normalize('NFKD', s) if not unicodedata.combining(c))
def has_existing_chripka_request(page: Page, timeout_ms: int = 15000) -> bool:
"""
Detect an existing 'Očkování - Chřipka' request on the patient card.
- Checks both card view (data-testid='patient-request-item' h4) and
table/row view (data-testid='patient-request-row' strong).
- Case/diacritics-insensitive.
"""
try:
page.get_by_text("Historie požadavků").wait_for(timeout=timeout_ms)
except PWTimeout:
# Some layouts may render without this exact header proceed anyway.
pass
# Let the list render
page.wait_for_timeout(600)
titles = []
try:
titles += page.locator("[data-testid='patient-request-item'] h4").all_text_contents()
except Exception:
pass
try:
titles += page.locator("[data-testid='patient-request-row'] strong").all_text_contents()
except Exception:
pass
# Fallback if no headings were captured: read whole items/rows
if not titles:
try:
titles += page.locator("[data-testid='patient-request-item']").all_text_contents()
except Exception:
pass
try:
titles += page.locator("[data-testid='patient-request-row']").all_text_contents()
except Exception:
pass
if not titles:
return False
pat = re.compile(r"\bchripka\b", re.IGNORECASE)
for t in titles:
if pat.search(_strip_diacritics(t)):
return True
return False
def create_flu_request_for_uuid(uuid: str) -> str:
"""
Automate Medevio UI for one patient:
- Open patient card
- If a Chřipka request already exists, return 'skipped'
- Else create 'Očkování - Chřipka' and send MESSAGE_TEXT -> return 'created'
- On failure to send, return 'failed'
"""
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=200)
context = browser.new_context(storage_state=str(STATE_FILE))
ptcard = context.new_page()
url = PATIENT_URL_TMPL.format(uuid=uuid)
ptcard.goto(url, wait_until="networkidle")
# Ensure the card loaded (best-effort)
try:
ptcard.get_by_text("Historie požadavků").wait_for(timeout=7_000)
except PWTimeout:
pass
# ----- pre-check for existing Chřipka request -----
if has_existing_chripka_request(ptcard):
browser.close()
return "skipped"
# ----- Create new request -----
ptcard.get_by_role("button", name="Nový požadavek").click()
ptcard.wait_for_timeout(300)
ptcard.keyboard.type("očkování - chřipka")
ptcard.locator("[role='option']", has_text="Očkování - Chřipka").first.click()
ptcard.get_by_role("button", name="Vytvořit požadavek").click()
time.sleep(2)
# Wait until back on card and the list is visible again
try:
ptcard.get_by_text("Historie požadavků").wait_for(timeout=10_000)
except PWTimeout:
pass
time.sleep(1.0)
# Open the new request (prefer the tile that mentions Chřipka)
try:
ptcard.locator("div[data-testid='patient-request-item']").first.wait_for(timeout=10_000)
chripka_card = ptcard.locator("div[data-testid='patient-request-item']").filter(
has=ptcard.locator("h4", has_text=re.compile(r"(?i)ch[řr]ipka"))
).first
if chripka_card.count() == 0:
ptcard.locator("div[data-testid='patient-request-item']").first.click(timeout=5_000)
else:
chripka_card.click(timeout=5_000)
except Exception:
# fallback: try the first request item
try:
ptcard.locator("div[data-testid='patient-request-item']").first.click(timeout=5_000)
except Exception:
browser.close()
return "failed"
# ----- Send the message -----
try:
ptcard.get_by_placeholder("Napište odpověď").wait_for(timeout=10_000)
ptcard.get_by_placeholder("Napište odpověď").fill(MESSAGE_TEXT)
time.sleep(1.2)
for sel in [
"button:has-text('Odeslat')",
"button:has-text('Odeslat zprávu')",
"button:has-text('Odeslat SMS')",
"button:has-text('Odeslat do aplikace')",
]:
try:
ptcard.click(sel, timeout=4000)
browser.close()
return "created"
except Exception:
continue
except Exception:
pass
browser.close()
return "failed"
# ---------- main ----------
def main():
conn = pymysql.connect(**MYSQL_CFG)
try:
ensure_flag_columns(conn)
rows = fetch_batch(conn)
if not rows:
print("Nenalezen žádný pacient pro zpracování.")
return
print(f"Zpracujeme {len(rows)} pacientů…")
processed = ok = fail = skipped = 0
for r in rows:
processed += 1
rid = r["rid"]
uuid = r["uuid"]
name = f"{r.get('prijmeni','')}, {r.get('jmeno','')}"
rc = r.get("rc", "")
print(f"[{processed:>3}] {name} | RC {rc} | UUID {uuid}")
try:
result = create_flu_request_for_uuid(uuid)
if result == "created":
mark_flag_success(conn, rid)
ok += 1
print(" ✓ vytvořeno + odesláno, DB flag nastaven")
elif result == "skipped":
mark_flag_skipped(conn, rid)
skipped += 1
print(" ↷ již existuje požadavek na chřipku přeskočeno")
else:
fail += 1
print(" ✗ nepodařilo se odeslat zprávu (tlačítko 'Odeslat' nenalezeno?)")
except Exception as e:
fail += 1
conn.rollback()
print(f" ✗ chyba: {type(e).__name__}: {e}")
time.sleep(PAUSE_BETWEEN)
print(f"Hotovo. processed={processed}, ok={ok}, skipped={skipped}, fail={fail}")
finally:
conn.close()
if __name__ == "__main__":
main()
+239
View File
@@ -0,0 +1,239 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import pymysql
import re
from pathlib import Path
from datetime import datetime
import time
# ==============================
# ⚙️ CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
}
BASE_DIR = Path(r"d:\Dropbox\Ordinace\Dokumentace_ke_zpracování\MP")
BASE_DIR.mkdir(parents=True, exist_ok=True)
def sanitize_name(name: str) -> str:
"""Replace invalid filename characters with underscore."""
return re.sub(r'[<>:"/\\|?*\x00-\x1F]', "_", name).strip()
def make_abbrev(title: str) -> str:
"""
Create abbreviation from displayTitle:
- First letter of each word
- Keep digits together
- Uppercase
"""
if not title:
return ""
words = re.findall(r"[A-Za-zÁ-Žá-ž0-9]+", title)
abbr = ""
for w in words:
if w.isdigit():
abbr += w
else:
abbr += w[0]
return abbr.upper()
# ==============================
# 🧹 DELETE UNEXPECTED FILES
# ==============================
def clean_folder(folder: Path, valid_files: set):
"""
Remove unexpected files.
RULE:
- Files starting with `▲` are ALWAYS kept.
"""
if not folder.exists():
return
for f in folder.iterdir():
if f.is_file():
# zpracované soubory (▲filename.pdf) nikdy nemažeme
if f.name.startswith(""):
continue
sanitized = sanitize_name(f.name)
if sanitized not in valid_files:
print(f"🗑️ Removing unexpected file: {f.name}")
try:
f.unlink()
except Exception as e:
print(f"⚠️ Could not delete {f}: {e}")
# ==============================
# 📦 DB CONNECTION
# ==============================
conn = pymysql.connect(**DB_CONFIG)
cur_meta = conn.cursor(pymysql.cursors.DictCursor)
cur_blob = conn.cursor()
print("🔍 Loading metadata from DB (FAST)…")
cur_meta.execute("""
SELECT d.id AS download_id,
d.request_id,
d.filename,
d.created_at,
p.updatedAt AS req_updated_at,
p.pacient_jmeno AS jmeno,
p.pacient_prijmeni AS prijmeni,
p.displayTitle
FROM medevio_downloads d
JOIN pozadavky p ON d.request_id = p.id
ORDER BY p.updatedAt DESC
""")
rows = cur_meta.fetchall()
print(f"📋 Found {len(rows)} attachment records.\n")
# ==============================
# 🧠 MAIN LOOP
# ==============================
processed_requests = set()
for r in rows:
req_id = r["request_id"]
if req_id in processed_requests:
continue
processed_requests.add(req_id)
# ========== FETCH ALL VALID FILES FOR THIS REQUEST ==========
cur_meta.execute(
"SELECT filename FROM medevio_downloads WHERE request_id=%s",
(req_id,)
)
valid_files = {sanitize_name(row["filename"]) for row in cur_meta.fetchall()}
# ========== FOLDER NAME BASED ON UPDATEDAT ==========
updated_at = r["req_updated_at"] or datetime.now()
date_str = updated_at.strftime("%Y-%m-%d")
prijmeni = sanitize_name(r["prijmeni"] or "Unknown")
jmeno = sanitize_name(r["jmeno"] or "")
title = r.get("displayTitle") or ""
abbr = make_abbrev(title)
clean_folder_name = sanitize_name(
f"{date_str} {prijmeni}, {jmeno} [{abbr}] {req_id}"
)
# ========== DETECT EXISTING FOLDER (WITH OR WITHOUT ▲) ==========
existing_folder = None
folder_has_flag = False
for f in BASE_DIR.iterdir():
if f.is_dir() and req_id in f.name:
existing_folder = f
folder_has_flag = ("" in f.name)
break
# pokud složka existuje → pracujeme v ní
main_folder = existing_folder if existing_folder else BASE_DIR / clean_folder_name
# ========== MERGE DUPLICATES ==========
possible_dups = [
f for f in BASE_DIR.iterdir()
if f.is_dir() and req_id in f.name and f != main_folder
]
for dup in possible_dups:
print(f"♻️ Merging duplicate folder: {dup.name}")
clean_folder(dup, valid_files)
main_folder.mkdir(parents=True, exist_ok=True)
for f in dup.iterdir():
if f.is_file():
# prostě přesuneme, ▲ případně zůstane v názvu
target = main_folder / f.name
if not target.exists():
f.rename(target)
shutil.rmtree(dup, ignore_errors=True)
# ========== CLEAN MAIN FOLDER ==========
clean_folder(main_folder, valid_files)
# ========== DOWNLOAD MISSING FILES ==========
added_new_file = False
main_folder.mkdir(parents=True, exist_ok=True)
for filename in valid_files:
dest_plain = main_folder / filename
dest_marked = main_folder / ("" + filename)
# soubor už existuje (buď filename, nebo ▲filename)
if dest_plain.exists() or dest_marked.exists():
continue
# stáhneme nový soubor → znamená že se má odstranit ▲ složky
added_new_file = True
cur_blob.execute(
"SELECT file_content FROM medevio_downloads "
"WHERE request_id=%s AND filename=%s",
(req_id, filename)
)
row = cur_blob.fetchone()
if not row:
continue
content = row[0]
if not content:
continue
with open(dest_plain, "wb") as f:
f.write(content)
print(f"💾 Wrote: {dest_plain.relative_to(BASE_DIR)}")
# ==============================
# 🔵 REMOVE FOLDER-LEVEL ▲ ONLY IF NEW FILE ADDED
# ==============================
if added_new_file:
# složka se má přejmenovat bez ▲
if "" in main_folder.name:
new_name = main_folder.name.replace("", "")
new_name = new_name.strip() # pro jistotu
new_path = main_folder.parent / new_name
if new_path != main_folder:
try:
main_folder.rename(new_path)
print(f"🔄 Folder flag ▲ removed → {new_name}")
main_folder = new_path
except Exception as e:
print(f"⚠️ Could not rename folder: {e}")
else:
# žádné nové soubory → NIKDY nesahat na název složky
pass
print("\n🎯 Export complete.\n")
cur_blob.close()
cur_meta.close()
conn.close()
+80
View File
@@ -0,0 +1,80 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Open the Medevio daily agenda calendar,
inspect the rendered HTML, and probe JS memory
to see what data is exposed.
"""
from playwright.sync_api import sync_playwright
STATE_FILE = "../medevio_storage.json"
AGENDA_URL = (
"https://my.medevio.cz/mudr-buzalkova/klinika/kalendar/agenda-dne/"
"?kalendar=144c4e12-347c-49ca-9ec0-8ca965a4470d&datum=2025-10-17"
)
def main():
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=False, slow_mo=150)
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
print("🔗 Opening agenda-day calendar...")
page.goto(AGENDA_URL, wait_until="networkidle", timeout=90_000)
# -------- Check login --------
body = (page.text_content("body") or "").lower()
if any(x in body for x in ["přihlášení", "přihlásit", "sign in", "login"]):
raise SystemExit("❌ Not logged in refresh medevio_storage.json.")
# -------- Wait for appointments to render --------
page.wait_for_timeout(4000)
# -------- Dump a few appointment blocks --------
blocks = page.locator("div.rbc-event-inner-content, div[data-testid='Reservation']").evaluate_all(
"(els) => els.map(e => e.outerHTML)"
)
print(f"\n✅ Found {len(blocks)} appointment blocks. Showing first 3:\n")
for snippet in blocks[:3]:
print(snippet)
print("-" * 80)
# -------- Explore window memory --------
print("\n🔍 Inspecting global JS variables...")
keys = page.evaluate("Object.keys(window)")
interesting = [k for k in keys if any(w in k.lower() for w in ["mede", "cal", "react", "state", "reserv"])]
print("Interesting keys:", interesting[:20])
for candidate in [
"window.__INITIAL_STATE__",
"window.__INITIAL_DATA__",
"window.__REACT_DEVTOOLS_GLOBAL_HOOK__",
"window.medevioCalendar",
"window.calendarStore",
"window.reduxStore",
"window.reactProps",
]:
try:
data = page.evaluate(f"JSON.stringify({candidate}, null, 2)")
if data and len(data) > 200:
print(f"\n===== {candidate} =====\n{data[:1000]}...\n")
except Exception:
pass
# -------- Optionally: listen for network requests while you click --------
def log_request(req):
url = req.url
if any(x in url for x in ["pozadavek", "request", "api"]):
print("📡", url)
page.on("request", log_request)
print("\n👉 Now click manually on a few agenda items to open their detail cards.")
print(" Any backend calls will appear below.\n")
page.wait_for_timeout(40000) # give yourself ~40s to click around
browser.close()
if __name__ == "__main__":
main()
+104
View File
@@ -0,0 +1,104 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from playwright.sync_api import sync_playwright
import json
import requests
STATE_FILE = "../medevio_storage.json"
AGENDA_URL = (
"https://my.medevio.cz/mudr-buzalkova/klinika/kalendar/agenda-dne/"
"?kalendar=144c4e12-347c-49ca-9ec0-8ca965a4470d&datum=2025-10-17"
)
GRAPHQL_URL = "https://api.medevio.cz/graphql"
def extract_agenda_rows(page):
rows = page.locator("div[data-testid='reservation-row']")
if rows.count() == 0:
raise SystemExit("❌ No rows found — check selector or login session.")
results = []
print(f"\nFound {rows.count()} rows, showing sample structure:")
for row in rows.all()[:3]:
print("-" * 80)
print(row.evaluate("el => el.outerHTML")[:500], "...\n")
# Now extract data safely
for row in rows.all():
rid = row.get_attribute("data-id") or ""
# Try to read each cell dynamically
cells = row.locator("div.MuiDataGrid-cell")
record = {"id": rid}
for c in cells.all():
field = c.get_attribute("data-field") or "unknown"
text = (c.text_content() or "").strip()
record[field] = text
results.append(record)
return results
def step1_extract_appointments():
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=False, slow_mo=150)
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
print("🔗 Opening Medevio agenda-day page...")
page.goto(AGENDA_URL, wait_until="networkidle", timeout=90_000)
page.wait_for_selector("div[data-testid='reservation-row']", timeout=30_000)
appointments = extract_agenda_rows(page)
browser.close()
print(f"✅ Extracted {len(appointments)} appointments")
for a in appointments:
print(f"{a.get('StartDateTime','?')} {a.get('Patient','?')}: {a.get('Reason','?')} ({a['id']})")
return appointments
def step2_fetch_detail(session_cookies, reservation_id):
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
}
query = {
"operationName": "ReservationDetail",
"variables": {"id": reservation_id},
"query": """
query ReservationDetail($id: ID!) {
reservation(id: $id) {
id
reason
startDateTime
endDateTime
status
note
patient { id name age }
doctor { id name }
location { name }
}
}
""",
}
print(f"\n📡 Fetching GraphQL detail for {reservation_id}...")
response = requests.post(GRAPHQL_URL, headers=headers, cookies=session_cookies, data=json.dumps(query))
print("Status:", response.status_code)
print(json.dumps(response.json(), indent=2, ensure_ascii=False))
if __name__ == "__main__":
appointments = step1_extract_appointments()
if not appointments:
raise SystemExit("No appointments found.")
# Use first appointment for detail fetch
reservation_id = appointments[0]["id"]
# Load session cookies from storage
with open(STATE_FILE, "r", encoding="utf-8") as f:
state = json.load(f)
cookies = {c["name"]: c["value"] for c in state.get("cookies", []) if "medevio" in c["domain"]}
step2_fetch_detail(cookies, reservation_id)
+44
View File
@@ -0,0 +1,44 @@
import json, requests
GRAPHQL_URL = "https://api.medevio.cz/graphql"
FULL_INTROSPECTION_QUERY = """
query IntrospectionQuery {
__schema {
queryType { name }
mutationType { name }
subscriptionType { name }
types {
...FullType
}
}
}
fragment FullType on __Type {
kind
name
fields(includeDeprecated: true) {
name
}
}
"""
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
}
# Load cookies from storage
state = json.load(open("../medevio_storage.json", encoding="utf-8"))
cookies = {c["name"]: c["value"] for c in state["cookies"] if "medevio" in c["domain"]}
payload = {"operationName": "IntrospectionQuery", "query": FULL_INTROSPECTION_QUERY}
r = requests.post(GRAPHQL_URL, headers=headers, cookies=cookies, data=json.dumps(payload))
print("Status:", r.status_code)
try:
data = r.json()
print(json.dumps(data, indent=2, ensure_ascii=False)[:2000])
except Exception as e:
print("Could not decode response:", e)
print(r.text)
+41
View File
@@ -0,0 +1,41 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from playwright.sync_api import sync_playwright
import json, os, time
STATE_FILE = "../medevio_storage.json"
GRAPHQL_LOG = f"graphql_capture_{int(time.time())}.jsonl"
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=False, slow_mo=200)
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
def log_graphql(req):
if "graphql" in req.url and req.method == "POST":
try:
body = req.post_data or ""
data = json.loads(body)
with open(GRAPHQL_LOG, "a", encoding="utf-8") as f:
f.write(json.dumps(data, ensure_ascii=False) + "\n")
print(f"📡 {data.get('operationName')} saved")
except Exception:
pass
page.on("request", log_graphql)
print("🔗 Opening Medevio main page...")
page.goto("https://my.medevio.cz/mudr-buzalkova/klinika/kalendar/agenda-dne/"
"?kalendar=144c4e12-347c-49ca-9ec0-8ca965a4470d", wait_until="networkidle")
print("\n👉 Click various items in Medevio (calendar, reservations, requests, etc.).")
print(" Every GraphQL call will be saved to", GRAPHQL_LOG)
print(" Press Ctrl+C or close the browser when done.\n")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
browser.close()
print(f"\n✅ Finished — GraphQL calls saved to {GRAPHQL_LOG}")
+100
View File
@@ -0,0 +1,100 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Query Medevio for the full agenda of 17 Oct 2025 and print raw API response.
"""
import json
import requests
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
CLINIC_SLUG = "mudr-buzalkova"
def load_gateway_token(storage_path="medevio_storage.json"):
"""Return Medevio gateway-access-token from saved Playwright storage."""
import json
from pathlib import Path
path = Path(storage_path)
if not path.exists():
raise SystemExit(f"❌ Storage file not found: {path}")
with path.open("r", encoding="utf-8") as f:
state = json.load(f)
token = next(
(c["value"] for c in state["cookies"]
if c["name"] == "gateway-access-token"), None
)
if not token:
raise SystemExit("❌ gateway-access-token not found in storage file.")
return token
gateway_token = load_gateway_token()
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
"authorization": f"Bearer {gateway_token}",
}
payload = {
"operationName": "ClinicAgenda_ListClinicReservations",
"variables": {
"calendarIds": [CALENDAR_ID],
"clinicSlug": CLINIC_SLUG,
"since": "2025-10-16T22:00:00.000Z",
"until": "2025-10-17T21:59:59.999Z",
"locale": "cs",
"emptyCalendarIds": False,
},
"query": """query ClinicAgenda_ListClinicReservations(
$calendarIds: [UUID!],
$clinicSlug: String!,
$locale: Locale!,
$since: DateTime!,
$until: DateTime!,
$emptyCalendarIds: Boolean!
) {
reservations: listClinicReservations(
clinicSlug: $clinicSlug,
calendarIds: $calendarIds,
since: $since,
until: $until
) @skip(if: $emptyCalendarIds) {
id
start
end
note
done
color
request {
id
displayTitle(locale: $locale)
extendedPatient {
name
surname
dob
insuranceCompanyObject { shortName }
}
}
}
}""",
}
print("📡 Querying Medevio API for agenda...")
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
print("Status:", r.status_code)
try:
data = r.json()
print(json.dumps(data, indent=2, ensure_ascii=False))
except Exception as e:
print("❌ Could not parse JSON:", e)
print(r.text)
+176
View File
@@ -0,0 +1,176 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Query Medevio for the full agenda of 17 Oct 2025,
print raw API response, and export to Excel.
"""
import json
import time
from pathlib import Path
import requests
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import Font, Alignment
from openpyxl.utils import get_column_letter
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
CLINIC_SLUG = "mudr-buzalkova"
# ==================== Load Token ====================
def load_gateway_token(storage_path="medevio_storage.json"):
"""Return Medevio gateway-access-token from saved Playwright storage."""
from pathlib import Path
path = Path(storage_path)
if not path.exists():
raise SystemExit(f"❌ Storage file not found: {path}")
with path.open("r", encoding="utf-8") as f:
state = json.load(f)
token = next(
(c["value"] for c in state["cookies"]
if c["name"] == "gateway-access-token"), None
)
if not token:
raise SystemExit("❌ gateway-access-token not found in storage file.")
return token
gateway_token = load_gateway_token()
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
"authorization": f"Bearer {gateway_token}",
}
payload = {
"operationName": "ClinicAgenda_ListClinicReservations",
"variables": {
"calendarIds": [CALENDAR_ID],
"clinicSlug": CLINIC_SLUG,
"since": "2025-10-20T00:00:00.001Z",
"until": "2025-10-27T21:59:59.999Z",
"locale": "cs",
"emptyCalendarIds": False,
},
"query": """query ClinicAgenda_ListClinicReservations(
$calendarIds: [UUID!],
$clinicSlug: String!,
$locale: Locale!,
$since: DateTime!,
$until: DateTime!,
$emptyCalendarIds: Boolean!
) {
reservations: listClinicReservations(
clinicSlug: $clinicSlug,
calendarIds: $calendarIds,
since: $since,
until: $until
) @skip(if: $emptyCalendarIds) {
id
start
end
note
done
color
request {
id
displayTitle(locale: $locale)
extendedPatient {
name
surname
dob
insuranceCompanyObject { shortName }
}
}
}
}""",
}
# ==================== Query API ====================
print("📡 Querying Medevio API for agenda...")
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
print("Status:", r.status_code)
try:
data = r.json()
except Exception as e:
print("❌ Could not parse JSON:", e)
print(r.text)
raise SystemExit()
if "data" not in data or "reservations" not in data["data"]:
raise SystemExit("⚠️ No 'reservations' data found in response.")
reservations = data["data"]["reservations"]
from datetime import datetime
from dateutil import parser, tz
# ===== Process reservations into table =====
rows = []
for r in reservations:
req = r.get("request") or {}
patient = req.get("extendedPatient") or {}
insurance = patient.get("insuranceCompanyObject") or {}
# parse datetimes (convert to local time)
try:
start_dt = parser.isoparse(r.get("start")).astimezone(tz.gettz("Europe/Prague"))
end_dt = parser.isoparse(r.get("end")).astimezone(tz.gettz("Europe/Prague"))
except Exception:
start_dt = end_dt = None
date_str = start_dt.strftime("%Y-%m-%d") if start_dt else ""
time_interval = f"{start_dt.strftime('%H:%M')}-{end_dt.strftime('%H:%M')}" if start_dt and end_dt else ""
rows.append({
"Date": date_str,
"Time": time_interval,
"Title": req.get("displayTitle") or "",
"Patient": f"{patient.get('surname','')} {patient.get('name','')}".strip(),
"DOB": patient.get("dob") or "",
"Insurance": insurance.get("shortName") or "",
"Note": r.get("note") or "",
"Color": r.get("color") or "",
"Request_ID": req.get("id") or "",
"Reservation_ID": r.get("id"),
})
df = pd.DataFrame(rows).sort_values(["Date", "Time"])
# ===== Excel export =====
EXPORT_DIR = Path(r"C:\Users\vlado\PycharmProjects\Medevio\exports")
EXPORT_DIR.mkdir(exist_ok=True)
timestamp = time.strftime("%Y-%m-%d %H-%M-%S")
xlsx_path = EXPORT_DIR / f"Medevio_agenda_{timestamp}.xlsx"
# remove old files
for old in EXPORT_DIR.glob("Medevio_agenda_*.xlsx"):
try:
old.unlink()
except Exception:
pass
df.to_excel(xlsx_path, index=False)
wb = load_workbook(xlsx_path)
ws = wb.active
# style header
for col in range(1, len(df.columns) + 1):
c = ws.cell(row=1, column=col)
c.font = Font(bold=True)
c.alignment = Alignment(horizontal="center")
ws.column_dimensions[get_column_letter(col)].width = 20
ws.freeze_panes = "A2"
wb.save(xlsx_path)
print(f"📘 Exported clean agenda view to:\n{xlsx_path}")
+299
View File
@@ -0,0 +1,299 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Query Medevio for the full agenda of 17 Oct 2025,
print raw API response, and export to Excel.
"""
import re
import json
import time
from pathlib import Path
import requests
import pandas as pd
from openpyxl import load_workbook
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
from openpyxl.utils import get_column_letter
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from Functions import get_reports_folder
from openpyxl.utils.dataframe import dataframe_to_rows
GRAPHQL_URL = "https://api.medevio.cz/graphql"
CALENDAR_ID = "144c4e12-347c-49ca-9ec0-8ca965a4470d"
CLINIC_SLUG = "mudr-buzalkova"
# ==================== Load Token ====================
def load_gateway_token(storage_path="medevio_storage.json"):
"""Return Medevio gateway-access-token from saved Playwright storage."""
path = Path(storage_path)
if not path.exists():
raise SystemExit(f"❌ Storage file not found: {path}")
with path.open("r", encoding="utf-8") as f:
state = json.load(f)
token = next(
(c["value"] for c in state["cookies"]
if c["name"] == "gateway-access-token"), None
)
if not token:
raise SystemExit("❌ gateway-access-token not found in storage file.")
return token
gateway_token = load_gateway_token()
headers = {
"content-type": "application/json",
"origin": "https://my.medevio.cz",
"referer": "https://my.medevio.cz/",
"authorization": f"Bearer {gateway_token}",
}
# === Dynamic date range ===
dnes = datetime.utcnow().date()
since = datetime.combine(dnes, datetime.min.time()).replace(microsecond=1)
until = since + relativedelta(months=1) - timedelta(milliseconds=1)
since_iso = since.isoformat() + "Z"
until_iso = until.isoformat() + "Z"
payload = {
"operationName": "ClinicAgenda_ListClinicReservations",
"variables": {
"calendarIds": [CALENDAR_ID],
"clinicSlug": CLINIC_SLUG,
"since": since_iso,
"until": "2025-11-30T21:59:59.999Z",
"locale": "cs",
"emptyCalendarIds": False,
},
"query": """query ClinicAgenda_ListClinicReservations(
$calendarIds: [UUID!],
$clinicSlug: String!,
$locale: Locale!,
$since: DateTime!,
$until: DateTime!,
$emptyCalendarIds: Boolean!
) {
reservations: listClinicReservations(
clinicSlug: $clinicSlug,
calendarIds: $calendarIds,
since: $since,
until: $until
) @skip(if: $emptyCalendarIds) {
id
start
end
note
done
color
request {
id
displayTitle(locale: $locale)
extendedPatient {
name
surname
dob
insuranceCompanyObject { shortName }
}
}
}
}""",
}
print("since:", since_iso)
print("until:", until_iso)
# ==================== Query API ====================
print("📡 Querying Medevio API for agenda...")
r = requests.post(GRAPHQL_URL, headers=headers, data=json.dumps(payload))
print("Status:", r.status_code)
try:
data = r.json()
except Exception as e:
print("❌ Could not parse JSON:", e)
print(r.text)
raise SystemExit()
if "data" not in data or "reservations" not in data["data"]:
raise SystemExit("⚠️ No 'reservations' data found in response.")
reservations = data["data"]["reservations"]
from dateutil import parser, tz
# ===== Process reservations into table =====
rows = []
for r in reservations:
req = r.get("request") or {}
patient = req.get("extendedPatient") or {}
insurance = patient.get("insuranceCompanyObject") or {}
try:
start_dt = parser.isoparse(r.get("start")).astimezone(tz.gettz("Europe/Prague"))
end_dt = parser.isoparse(r.get("end")).astimezone(tz.gettz("Europe/Prague"))
except Exception:
start_dt = end_dt = None
date_str = start_dt.strftime("%Y-%m-%d") if start_dt else ""
time_interval = f"{start_dt.strftime('%H:%M')}-{end_dt.strftime('%H:%M')}" if start_dt and end_dt else ""
rows.append({
"Date": date_str,
"Time": time_interval,
"Title": req.get("displayTitle") or "",
"Patient": f"{patient.get('surname','')} {patient.get('name','')}".strip(),
"DOB": patient.get("dob") or "",
"Insurance": insurance.get("shortName") or "",
"Note": r.get("note") or "",
"Color": r.get("color") or "",
"Request_ID": req.get("id") or "",
"Reservation_ID": r.get("id"),
})
df = pd.DataFrame(rows).sort_values(["Date", "Time"])
def kw_pattern(kw: str) -> str:
"""
Match the exact phrase kw (case-insensitive),
not as part of a '+something' continuation.
Examples:
'žloutenka a' ✅ matches '… žloutenka a …'
❌ NOT '… žloutenka a+b …'
'žloutenka a+b' ✅ matches exactly that phrase
"""
# start boundary: not preceded by a word char
# end guard: not followed by optional spaces + '+' + word
return rf"(?<!\w){re.escape(kw)}(?!\s*\+\s*\w)"
# ===== Excel export =====
EXPORT_DIR = Path(r"u:\Dropbox\Ordinace\Reporty")
EXPORT_DIR.mkdir(exist_ok=True, parents=True)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
xlsx_path = EXPORT_DIR / f"{timestamp} Agenda (30 dní dopředu).xlsx"
# Safely delete older Agenda reports
for old in EXPORT_DIR.glob("*Agenda (30 dní dopředu).xlsx"):
try:
old.unlink()
except PermissionError:
print(f"⚠️ File is open, skipping delete: {old}")
except Exception as e:
print(f"⚠️ Could not delete {old}: {e}")
# Export DataFrame
df.to_excel(xlsx_path, index=False)
wb = load_workbook(xlsx_path)
ws = wb.active
ws.title = "Agenda" # ✅ rename sheet
# === Apply styling and custom column widths ===
widths = {
1: 11, # Date
2: 13, # Time
3: 45, # Title
4: 30, # Patient
5: 15, # DOB
6: 15, # Insurance
7: 30, # Note
8: 15, # Color
9: 37, # Request_ID
10: 37 # Reservation_ID
}
# Define styles
header_fill = PatternFill("solid", fgColor="FFFF00") # real yellow
alt_fill = PatternFill("solid", fgColor="F2F2F2") # light grey alternate rows
thin_border = Border(
left=Side(style="thin", color="000000"),
right=Side(style="thin", color="000000"),
top=Side(style="thin", color="000000"),
bottom=Side(style="thin", color="000000")
)
# === Format header ===
for col_idx in range(1, len(df.columns) + 1):
col_letter = get_column_letter(col_idx)
cell = ws.cell(row=1, column=col_idx)
cell.font = Font(bold=True)
cell.alignment = Alignment(horizontal="center", vertical="center")
cell.fill = header_fill
cell.value = str(cell.value).upper()
cell.border = thin_border
ws.column_dimensions[col_letter].width = widths.get(col_idx, 20)
# === Format data rows ===
for r_idx, row in enumerate(ws.iter_rows(min_row=2, max_row=ws.max_row, max_col=ws.max_column), start=2):
for cell in row:
cell.border = thin_border
if r_idx % 2 == 0: # alternate row background
cell.fill = alt_fill
ws.freeze_panes = "A2"
from openpyxl.utils.dataframe import dataframe_to_rows
# === Vaccine sheet configuration ===
VACCINE_SHEETS = {
"Chřipka": ["očkování", "chřipka"],
"COVID": ["očkování", "covid"],
"Pneumokok": ["očkování", "pneumo"],
"Hep A": ["očkování", "žloutenka a"],
"Hep B": ["očkování", "žloutenka b"],
"Hep A+B": ["očkování", "žloutenka a+b"],
"Klíšťovka": ["očkování", "klíšť"]
}
# === Generate sheets based on keyword combinations ===
for sheet_name, keywords in VACCINE_SHEETS.items():
mask = pd.Series(True, index=df.index)
title_series = df["Title"].fillna("")
for kw in keywords:
pattern = kw_pattern(kw)
mask &= title_series.str.contains(pattern, flags=re.IGNORECASE, regex=True)
filtered_df = df[mask].copy()
if filtered_df.empty:
print(f"️ No matches for sheet '{sheet_name}' ({' AND '.join(keywords)})")
continue
ws_new = wb.create_sheet(title=sheet_name)
for r in dataframe_to_rows(filtered_df, index=False, header=True):
ws_new.append(r)
# === Apply formatting ===
for col_idx in range(1, len(filtered_df.columns) + 1):
col_letter = get_column_letter(col_idx)
c = ws_new.cell(row=1, column=col_idx)
c.font = Font(bold=True)
c.alignment = Alignment(horizontal="center", vertical="center")
c.fill = PatternFill("solid", fgColor="FFFF00") # bright yellow header
c.value = str(c.value).upper()
c.border = thin_border
ws_new.column_dimensions[col_letter].width = widths.get(col_idx, 20)
# Borders + alternating rows
for r_idx, row in enumerate(ws_new.iter_rows(min_row=2, max_row=ws_new.max_row, max_col=ws_new.max_column), start=2):
for cell in row:
cell.border = thin_border
if r_idx % 2 == 0:
cell.fill = PatternFill("solid", fgColor="F2F2F2")
ws_new.freeze_panes = "A2"
print(f"🟡 Created sheet '{sheet_name}' with {len(filtered_df)} rows ({' AND '.join(keywords)})")
wb.save(xlsx_path)
print(f"📘 Exported clean agenda view to:\n{xlsx_path}")
@@ -0,0 +1,117 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Read OPEN Medevio requests (požadavky) from local MySQL table `pozadavky`
and export to Excel in the same visual format as Agenda.
"""
import pymysql
import pandas as pd
from datetime import datetime
from pathlib import Path
from openpyxl import Workbook
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
from openpyxl.utils import get_column_letter
from openpyxl.utils.dataframe import dataframe_to_rows
# ==============================
# 🔧 CONFIGURATION
# ==============================
DB_CONFIG = {
"host": "192.168.1.76",
"port": 3307,
"user": "root",
"password": "Vlado9674+",
"database": "medevio",
"charset": "utf8mb4",
"cursorclass": pymysql.cursors.DictCursor,
}
EXPORT_DIR = Path(r"u:\Dropbox\Ordinace\Reporty")
EXPORT_DIR.mkdir(parents=True, exist_ok=True)
xlsx_path = EXPORT_DIR / f"{datetime.now():%Y-%m-%d_%H-%M-%S} Otevřené požadavky.xlsx"
# ==============================
# 📡 LOAD DATA
# ==============================
print("📡 Fetching open requests from MySQL...")
conn = pymysql.connect(**DB_CONFIG)
with conn.cursor() as cur:
cur.execute("""
SELECT id AS Request_ID,
displayTitle AS Title,
pacient_prijmeni AS Pacient_Prijmeni,
pacient_jmeno AS Pacient_Jmeno,
pacient_rodnecislo AS RodneCislo,
createdAt AS Created,
updatedAt AS Updated,
doneAt AS Done,
removedAt AS Removed
FROM pozadavky
WHERE doneAt IS NULL AND removedAt IS NULL
ORDER BY createdAt DESC
""")
rows = cur.fetchall()
conn.close()
if not rows:
print("⚠️ No open requests found.")
raise SystemExit()
df = pd.DataFrame(rows)
print(f"✅ Loaded {len(df)} open requests.")
# ==============================
# 🧩 CLEAN + PREPARE
# ==============================
df["Patient"] = (df["Pacient_Prijmeni"].fillna("") + " " + df["Pacient_Jmeno"].fillna("")).str.strip()
df = df.rename(columns={
"RodneCislo": "Rodné číslo",
"Request_ID": "Request ID",
})
df = df[["Created", "Title", "Patient", "Rodné číslo", "Request ID", "Updated"]]
# ==============================
# 🧾 EXPORT TO EXCEL
# ==============================
wb = Workbook()
ws = wb.active
ws.title = "Otevřené požadavky"
# === Styles ===
header_fill = PatternFill("solid", fgColor="00FF99")
alt_fill = PatternFill("solid", fgColor="F2FFF2")
thin_border = Border(
left=Side(style="thin", color="000000"),
right=Side(style="thin", color="000000"),
top=Side(style="thin", color="000000"),
bottom=Side(style="thin", color="000000")
)
# === Write DataFrame ===
for r_idx, row in enumerate(dataframe_to_rows(df, index=False, header=True), start=1):
ws.append(row)
# === Header styling ===
for col_idx in range(1, len(df.columns) + 1):
c = ws.cell(row=1, column=col_idx)
c.font = Font(bold=True)
c.alignment = Alignment(horizontal="center", vertical="center")
c.fill = header_fill
c.border = thin_border
ws.column_dimensions[get_column_letter(col_idx)].width = 25
# === Data styling ===
for r_idx, row in enumerate(ws.iter_rows(min_row=2, max_row=ws.max_row, max_col=ws.max_column), start=2):
for cell in row:
cell.border = thin_border
if r_idx % 2 == 0:
cell.fill = alt_fill
ws.freeze_panes = "A2"
wb.save(xlsx_path)
print(f"📘 Exported {len(df)} open requests → {xlsx_path}")
+98
View File
@@ -0,0 +1,98 @@
# print_patients_first_page_ids.py
from pathlib import Path
import json, time, sys
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
STATE_FILE = r"../medevio_storage.json"
PATIENTS_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
def harvest_ids_on_page(page):
ids = set()
for sel in ["div[role='row'][data-id]", "div.MuiDataGrid-row[data-id]"]:
for row in page.locator(sel).all():
pid = row.get_attribute("data-id")
if pid:
ids.add(pid)
return ids
def set_page_size(page, value="100"):
# Open the page-size combobox
for loc in [
page.get_by_role("combobox", name="Řádků na stránce:"),
page.get_by_role("combobox", name="Rows per page:"),
page.locator("div.MuiTablePagination-root [role='combobox']"),
]:
if loc.count():
loc.first.click()
break
# Select option "100" (portal-safe)
opt = page.get_by_role("option", name=value)
if not opt.count():
opt = page.locator(f"//li[normalize-space(.)='{value}']")
opt.first.wait_for(state="visible", timeout=5000)
opt.first.click()
# Wait a moment for refresh
try:
page.wait_for_selector("div[role='row'][data-id]", timeout=10000)
except PWTimeout:
time.sleep(0.8)
def main():
sf = Path(STATE_FILE)
if not sf.exists():
print(f"ERROR: storage not found: {sf}")
sys.exit(1)
with sync_playwright() as p:
browser = p.chromium.launch(headless=True) # set False to watch
context = browser.new_context(storage_state=str(sf))
context.set_default_navigation_timeout(30000)
context.set_default_timeout(15000)
page = context.new_page()
try:
page.goto(PATIENTS_URL, wait_until="domcontentloaded")
except PWTimeout:
print("Warning: goto timeout; continuing…")
# Detect redirect to login
if "/prihlaseni" in page.url.lower():
print("You were redirected to the login page → saved session is expired. Re-run the login-save step.")
browser.close()
return
# (Optional) print pagination label before/after
try:
print("Before:", page.locator("p.MuiTablePagination-displayedRows").first.inner_text())
except Exception:
pass
try:
set_page_size(page, "100")
except Exception as e:
print(f"Could not set page size to 100: {e!r}")
try:
print("After :", page.locator("p.MuiTablePagination-displayedRows").first.inner_text())
except Exception:
pass
page.wait_for_selector("div[role='row'][data-id]", timeout=15000)
ids = sorted(harvest_ids_on_page(page))
print(f"\nCollected {len(ids)} IDs on first page:")
for pid in ids:
print(pid)
# Also save if you want
out_json = Path("patient_ids_first_page.json")
out_csv = Path("patient_ids_first_page.csv")
out_json.write_text(json.dumps(ids, ensure_ascii=False, indent=2), encoding="utf-8")
out_csv.write_text("patient_id\n" + "\n".join(ids), encoding="utf-8")
print(f"\nSaved → {out_json.resolve()}")
print(f"Saved → {out_csv.resolve()}")
browser.close()
if __name__ == "__main__":
main()
+98
View File
@@ -0,0 +1,98 @@
# print_patients_first_page_ids.py
from pathlib import Path
import json, time, sys
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
STATE_FILE = r"../medevio_storage.json"
PATIENTS_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
def harvest_ids_on_page(page):
ids = set()
for sel in ["div[role='row'][data-id]", "div.MuiDataGrid-row[data-id]"]:
for row in page.locator(sel).all():
pid = row.get_attribute("data-id")
if pid:
ids.add(pid)
return ids
def set_page_size(page, value="100"):
# Open the page-size combobox
for loc in [
page.get_by_role("combobox", name="Řádků na stránce:"),
page.get_by_role("combobox", name="Rows per page:"),
page.locator("div.MuiTablePagination-root [role='combobox']"),
]:
if loc.count():
loc.first.click()
break
# Select option "100" (portal-safe)
opt = page.get_by_role("option", name=value)
if not opt.count():
opt = page.locator(f"//li[normalize-space(.)='{value}']")
opt.first.wait_for(state="visible", timeout=5000)
opt.first.click()
# Wait a moment for refresh
try:
page.wait_for_selector("div[role='row'][data-id]", timeout=10000)
except PWTimeout:
time.sleep(0.8)
def main():
sf = Path(STATE_FILE)
if not sf.exists():
print(f"ERROR: storage not found: {sf}")
sys.exit(1)
with sync_playwright() as p:
browser = p.chromium.launch(headless=False) # set False to watch
context = browser.new_context(storage_state=str(sf))
context.set_default_navigation_timeout(30000)
context.set_default_timeout(15000)
page = context.new_page()
try:
page.goto(PATIENTS_URL, wait_until="domcontentloaded")
except PWTimeout:
print("Warning: goto timeout; continuing…")
# Detect redirect to login
if "/prihlaseni" in page.url.lower():
print("You were redirected to the login page → saved session is expired. Re-run the login-save step.")
browser.close()
return
# (Optional) print pagination label before/after
try:
print("Before:", page.locator("p.MuiTablePagination-displayedRows").first.inner_text())
except Exception:
pass
try:
set_page_size(page, "100")
except Exception as e:
print(f"Could not set page size to 100: {e!r}")
try:
print("After :", page.locator("p.MuiTablePagination-displayedRows").first.inner_text())
except Exception:
pass
page.wait_for_selector("div[role='row'][data-id]", timeout=15000)
ids = sorted(harvest_ids_on_page(page))
print(f"\nCollected {len(ids)} IDs on first page:")
for pid in ids:
print(pid)
# Also save if you want
out_json = Path("patient_ids_first_page.json")
out_csv = Path("patient_ids_first_page.csv")
out_json.write_text(json.dumps(ids, ensure_ascii=False, indent=2), encoding="utf-8")
out_csv.write_text("patient_id\n" + "\n".join(ids), encoding="utf-8")
print(f"\nSaved → {out_json.resolve()}")
print(f"Saved → {out_csv.resolve()}")
browser.close()
if __name__ == "__main__":
main()
+249
View File
@@ -0,0 +1,249 @@
# extract_patient_detail.py
# Usage:
# 1) Put your medevio_storage.json path into STATE_FILE.
# 2) Set PATIENT_ID to a real UUID from your list.
# 3) Run: python extract_patient_detail.py
#
# Output: prints a dict to console and saves patient_<ID>.json next to the script.
from pathlib import Path
import json, sys, time, re
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
STATE_FILE = r"../medevio_storage.json"
BASE_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
PATIENT_ID = "fcb2414b-067b-4ca2-91b2-6c36a86d4cbb" # <-- put target ID here
# ---------- helpers ----------
def wait_for_grid(page, timeout=15000):
try:
page.wait_for_selector("div[role='rowgroup']", timeout=timeout)
except PWTimeout:
pass
page.wait_for_selector("div[role='row'][data-id]", timeout=timeout)
def open_detail_via_query(page, patient_id):
# Try opening page with ?pacient=... (SPA should open drawer/detail)
target = f"{BASE_URL}?pacient={patient_id}"
page.goto(target, wait_until="domcontentloaded")
# Wait briefly for the drawer/dialog to render
if not wait_for_detail_open(page, quick=True):
# Some apps need a tiny delay to mount the panel
time.sleep(0.8)
return is_detail_open(page)
def is_detail_open(page):
# Look for a dialog/drawer that likely contains patient detail.
# Typical MUI patterns: role="dialog", or an aside/div with aria-modal etc.
selectors = [
"[role='dialog']",
"div.MuiDrawer-paper",
"div.MuiModal-root [role='dialog']",
"div[aria-modal='true']",
]
for sel in selectors:
loc = page.locator(sel)
if loc.count() and loc.first.is_visible():
return True
return False
def wait_for_detail_open(page, quick=False):
timeout = 4000 if quick else 15000
selectors = [
"[role='dialog']",
"div.MuiDrawer-paper",
"div.MuiModal-root [role='dialog']",
"div[aria-modal='true']",
]
for sel in selectors:
try:
page.wait_for_selector(sel, timeout=timeout, state="visible")
return True
except PWTimeout:
continue
return False
def open_detail_by_click(page, patient_id):
# Click the row with matching data-id (fallback)
wait_for_grid(page, timeout=15000)
row = page.locator(f"div[role='row'][data-id='{patient_id}']").first
if not row.count():
return False
row.click()
return wait_for_detail_open(page)
def find_detail_root(page):
# Return the locator that represents the open detail container
for sel in ["[role='dialog']", "div.MuiDrawer-paper", "div[aria-modal='true']"]:
loc = page.locator(sel)
if loc.count() and loc.first.is_visible():
return loc.first
# Fallback to the last visible modal-ish container
return page.locator("div.MuiModal-root, div.MuiDrawer-paper").last
def extract_text(el):
try:
return el.inner_text().strip()
except Exception:
return ""
def extract_field_by_label(root, label_texts):
"""
Try to find a field value by its label text (CZ/EN variants).
Looks for elements containing the label and then a sibling/value element.
"""
labels_xpath = " | ".join([f".//*[normalize-space()='{t}']" for t in label_texts])
loc = root.locator(f"xpath=({labels_xpath})")
if not loc.count():
# Try contains(label)
labels_xpath2 = " | ".join([f".//*[contains(normalize-space(), '{t}')]" for t in label_texts])
loc = root.locator(f"xpath=({labels_xpath2})")
if not loc.count():
return None
candidate = loc.first
# Value might be in parent/next sibling
parent = candidate.locator("xpath=..")
siblings = [
parent.locator("xpath=following-sibling::*[1]"),
candidate.locator("xpath=following-sibling::*[1]"),
parent.locator(".//*[(self::span or self::div) and string-length(normalize-space())>0]"),
]
for s in siblings:
if s.count():
text = extract_text(s.first)
# Clean common label-value formatting like "E-mail\nx@y.cz"
if text:
# If the label text is included, strip it
for t in label_texts:
text = re.sub(rf"^{re.escape(t)}\s*[:]?\s*", "", text, flags=re.I)
text = re.sub(r"\s+\n\s+", "", text).strip()
return text
# As a last fallback, try reading the parent block's text minus the label
block_text = extract_text(parent)
if block_text:
for t in label_texts:
block_text = re.sub(rf"{re.escape(t)}\s*[:]?\s*", "", block_text, flags=re.I)
return block_text.strip()
return None
def extract_all_text_pairs(root):
"""
Generic key-value sweep for components that render details as 2-column grids.
Returns a dict of guessed label->value pairs.
"""
result = {}
# Try common MUI grid/list patterns
blocks = root.locator("div.MuiGrid-container, dl, ul.MuiList-root")
for i in range(min(20, blocks.count())):
block = blocks.nth(i)
text = extract_text(block)
if not text:
continue
# naive split by newlines, pair neighbors "Label\nValue"
parts = [t.strip() for t in text.splitlines() if t.strip()]
for j in range(len(parts) - 1):
label, value = parts[j], parts[j+1]
# Heuristic: labels usually short, values not identical, ignore obvious noise
if len(label) <= 32 and label != value and ":" not in value:
if label not in result:
result[label] = value
return result
def extract_patient_detail(page, patient_id):
root = find_detail_root(page)
if not root:
return {"id": patient_id, "error": "detail_not_found"}
# Try to get a headline with the name
name = None
for sel in ["h1", "h2", "h3", "header h2", "[data-testid='PatientName']"]:
loc = root.locator(sel)
if loc.count():
nm = extract_text(loc.first)
if nm and len(nm) > 1:
name = nm
break
# Targeted fields (CZ + EN aliases)
fields = {
"Datum narození / Born": extract_field_by_label(root, ["Datum narození", "Datum nar.", "Date of birth", "Born"]),
"Rodné číslo": extract_field_by_label(root, ["Rodné číslo", "", "Personal ID"]),
"Telefon": extract_field_by_label(root, ["Telefon", "Tel.", "Phone", "Mobile"]),
"E-mail": extract_field_by_label(root, ["E-mail", "Email", "E-mail"]),
"Zdravotní pojišťovna": extract_field_by_label(root, ["Pojišťovna", "Zdravotní pojišťovna", "Insurer", "Insurance"]),
"Adresa": extract_field_by_label(root, ["Adresa", "Address"]),
"Poznámka": extract_field_by_label(root, ["Poznámka", "Note", "Notes"]),
"Pohlaví": extract_field_by_label(root, ["Pohlaví", "Gender", "Sex"]),
"Praktický lékař": extract_field_by_label(root, ["Praktický lékař", "GP", "General practitioner"]),
}
# Sweep for any extra key→value pairs we didnt explicitly target
extras = extract_all_text_pairs(root)
# Merge non-empty fields
data = {"id": patient_id}
if name: data["name"] = name
for k, v in fields.items():
if v and v.strip():
data[k] = v.strip()
# Add extras that aren't already present
for k, v in extras.items():
if k not in data and v and v.strip():
data[k] = v.strip()
return data
# ---------- main ----------
def main():
if not PATIENT_ID or len(PATIENT_ID) < 8:
print("Set PATIENT_ID to a valid patient UUID.")
sys.exit(1)
sf = Path(STATE_FILE)
if not sf.exists():
print(f"Storage file not found: {sf}")
sys.exit(1)
with sync_playwright() as p:
browser = p.chromium.launch(headless=False) # set False to watch
context = browser.new_context(storage_state=str(sf))
context.set_default_navigation_timeout(30000)
context.set_default_timeout(15000)
page = context.new_page()
# Try via query param first
opened = open_detail_via_query(page, PATIENT_ID)
# If not opened, go to base list and click the row
if not opened:
# Ensure the base grid exists
page.goto(BASE_URL, wait_until="domcontentloaded")
if "/prihlaseni" in page.url.lower():
print("Redirected to login — refresh your medevio_storage.json.")
browser.close()
return
if not open_detail_by_click(page, PATIENT_ID):
print("Could not open detail panel (neither via query nor by clicking).")
browser.close()
return
# At this point, detail should be open
data = extract_patient_detail(page, PATIENT_ID)
print("\n=== Patient detail ===")
print(json.dumps(data, ensure_ascii=False, indent=2))
out = Path(f"patient_{PATIENT_ID}.json")
out.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
print(f"\nSaved → {out.resolve()}")
browser.close()
if __name__ == "__main__":
main()
+111
View File
@@ -0,0 +1,111 @@
from playwright.sync_api import sync_playwright
import mysql.connector
import time
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
)
# --- load 3 patients from DB ---
conn = mysql.connector.connect(**MYSQL_CFG)
with conn.cursor() as cur:
cur.execute("""
SELECT rid, prijmeni, jmeno, rc
FROM patients_extracted
WHERE prijmeni IS NOT NULL and mamedevioucet is null
ORDER BY prijmeni ASC
LIMIT 3
""")
rows = cur.fetchall()
if not rows:
raise RuntimeError("No entries found in patients_extracted")
STATE_FILE = r"../medevio_storage.json"
BASE_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
NOT_FOUND_SEL = "div[role='alert']:has-text('Pacient nebyl nalezen'), div:has-text('Pacient nebyl nalezen')"
DIALOG_SEL = "[role='dialog'], div.MuiDrawer-paper, div[aria-modal='true']"
def close_dialog_if_open(page):
dlg = page.locator(DIALOG_SEL)
try:
if dlg.count():
# Try a close button; if not, press Escape
try:
dlg.locator("button:has-text('Zavřít'), [aria-label='Zavřít'], [aria-label='Close'], [data-testid='CloseIcon']").first.click(timeout=1000)
except:
page.keyboard.press("Escape")
page.wait_for_selector(DIALOG_SEL, state="detached", timeout=1500)
except:
pass # best-effort close
def main():
with sync_playwright() as p:
browser = p.chromium.launch(headless=True)
try:
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
for rid, surname, name, rc in rows:
# 0) close any previous dialog to avoid stale matches
close_dialog_if_open(page)
target_url = f"{BASE_URL}?pacient={rid}"
page.goto(target_url, wait_until="domcontentloaded")
page.wait_for_load_state("networkidle")
# 1) Not-found toast?
try:
page.wait_for_selector(NOT_FOUND_SEL, timeout=3000)
print(f"{surname} {name} {rc} ⚠️ pacient s RID {rid} nebyl nalezen, přeskočeno")
# (optional) set mamedevioucet=NULL for this rid here
continue
except:
pass
# 2) Detail panel
try:
page.wait_for_selector(DIALOG_SEL, timeout=6000)
except:
print(f"⚠️ {surname} {name} {rc}: detailový panel se nenačetl, přeskočeno")
continue
# 3) Verify dialog belongs to current patient (avoid stale dialog)
detail = page.locator(DIALOG_SEL).first
detail_text = detail.inner_text()
if (surname not in detail_text) and (rc not in detail_text):
# Still looks wrong; give UI a moment and re-check once
page.wait_for_timeout(500)
detail_text = detail.inner_text()
if (surname not in detail_text) and (rc not in detail_text):
print(f"⚠️ {surname} {name} {rc}: detail neodpovídá (stará karta?), přeskočeno")
continue
# 4) Check Medevio account text
if "zatím nemá Medevio účet" in detail_text:
has_account = 0
print(f"{surname} {name} {rc} ❌ zatím nemá Medevio účet")
else:
has_account = 1
print(f"{surname} {name} {rc} ✅ má Medevio účet")
# Update DB by RID (or swap to rc if you prefer)
with conn.cursor() as c:
c.execute(
"UPDATE patients_extracted SET mamedevioucet = %s WHERE rid = %s",
(has_account, rid),
)
conn.commit()
time.sleep(0.5) # gentle pacing
finally:
browser.close()
if __name__ == "__main__":
main()
+101
View File
@@ -0,0 +1,101 @@
import time
from playwright.sync_api import sync_playwright
import mysql.connector
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
)
# --- load 3 patients from DB ---
conn = mysql.connector.connect(**MYSQL_CFG)
with conn.cursor() as cur:
cur.execute("""
SELECT rid, prijmeni, jmeno, rc
FROM patients_extracted
WHERE prijmeni IS NOT NULL and mamedevioucet is null
ORDER BY prijmeni ASC
LIMIT 300
""")
rows = cur.fetchall()
if not rows:
raise RuntimeError("No entries found in patients_extracted")
STATE_FILE = r"../medevio_storage.json"
BASE_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
NOT_FOUND_SEL = "div[role='alert']:has-text('Pacient nebyl nalezen'), div:has-text('Pacient nebyl nalezen')"
DIALOG_SEL = "[role='dialog'], div.MuiDrawer-paper, div[aria-modal='true']"
def close_dialog_if_open(page):
dlg = page.locator(DIALOG_SEL)
try:
if dlg.count():
# Try a close button; if not, press Escape
try:
dlg.locator("button:has-text('Zavřít'), [aria-label='Zavřít'], [aria-label='Close'], [data-testid='CloseIcon']").first.click(timeout=1000)
except:
page.keyboard.press("Escape")
page.wait_for_selector(DIALOG_SEL, state="detached", timeout=1500)
except:
pass # best-effort close
def main():
with sync_playwright() as p:
browser = p.chromium.launch(headless=True)
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
for rid, surname, name, rc in rows:
print(f"\nProcessing {surname} {name} {rc}")
# 1️⃣ Navigation time
t0 = time.perf_counter()
page.goto(f"{BASE_URL}?pacient={rid}", wait_until="domcontentloaded")
# page.wait_for_load_state("networkidle")
t_nav = time.perf_counter() - t0
print(f" ⏱️ page.goto + networkidle: {t_nav:.2f}s")
# 2️⃣ Toast / dialog detection
t1 = time.perf_counter()
not_found = False
try:
page.wait_for_selector(NOT_FOUND_SEL, timeout=2500)
not_found = True
except:
pass
if not_found:
print(f" ⚠️ not-found toast detected after {time.perf_counter() - t1:.2f}s")
continue
try:
page.wait_for_selector(DIALOG_SEL, timeout=8000)
except:
print(f" ⚠️ dialog not found (waited {time.perf_counter() - t1:.2f}s)")
continue
t_dialog = time.perf_counter() - t1
print(f" ⏱️ toast/dialog detection: {t_dialog:.2f}s")
# 3️⃣ Account check + DB update
t2 = time.perf_counter()
text = page.locator(DIALOG_SEL).first.inner_text()
has_account = 0 if "zatím nemá Medevio účet" in text else 1
with conn.cursor() as c:
c.execute("UPDATE patients_extracted SET mamedevioucet=%s WHERE rid=%s",
(has_account, rid))
conn.commit()
t_db = time.perf_counter() - t2
print(f" ⏱️ DB update & text parse: {t_db:.2f}s")
# 4️⃣ Optional pacing
t3 = time.perf_counter()
# time.sleep(0.5)
print(f" ⏱️ explicit sleep: {time.perf_counter() - t3:.2f}s")
browser.close()
if __name__ == "__main__":
main()
@@ -0,0 +1,177 @@
#Tento kod se pripoji do kartoteky Medevio, zmeni na 100 pacientu na stranu, nactene
# medevio_dump_patients_html_to_mysql.py
import time
import json
from pathlib import Path
from datetime import datetime
from typing import Set
import mysql.connector
from mysql.connector import errorcode
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
# ---------- CONFIG ----------
STATE_FILE = r"../medevio_storage.json"
BASE_LIST_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
SAVE_DELAY_SECONDS = 10 # throttle: 10 sec per patient
# MySQL connection settings (fill in)
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
)
# ---------- DB helpers ----------
def db_connect():
try:
conn = mysql.connector.connect(**MYSQL_CFG)
return conn
except mysql.connector.Error as e:
raise SystemExit(f"MySQL connection failed: {e}")
# ---------- Playwright helpers ----------
def wait_for_grid_ready(page):
# grid present & at least one row (be generous on timeout)
page.wait_for_selector("div[role='rowgroup']", timeout=20000)
page.wait_for_selector("div[role='row'][data-id]", timeout=20000)
def set_page_size_100(page): #zde se nastavuje hodnota pacientu na stranu na 100, toto je jedno volani
# Click the page-size combobox (CZ/EN + generic)
for loc in [
page.get_by_role("combobox", name="Řádků na stránce:"),
page.get_by_role("combobox", name="Rows per page:"),
page.locator("div.MuiTablePagination-root [role='combobox']"),
]:
if loc.count():
loc.first.click()
break
# Select 100 (MUI menu often renders in a portal)
opt = page.get_by_role("option", name="100")
if not opt.count():
opt = page.locator("//li[normalize-space(.)='100']")
opt.first.wait_for(state="visible", timeout=5000)
opt.first.click()
# Wait for rows to refresh
try:
page.wait_for_selector("div[role='row'][data-id]", timeout=10000)
except PWTimeout:
time.sleep(0.8)
def click_next_page(page) -> bool: #toto je kliknuti, aby se nacetla dalsi stranka se 100 zaznamy
# Prefer ARIA label
nxt = page.get_by_role("button", name="Go to next page")
if nxt.count():
try:
if nxt.first.is_enabled():
nxt.first.click()
return True
except Exception:
pass
# Fallback (CZ)
nxt2 = page.get_by_role("button", name="Další")
if nxt2.count():
try:
if nxt2.first.is_enabled():
nxt2.first.click()
return True
except Exception:
pass
return False
# ---------- Main workflow ----------
def save_all_patient_htmls(conn,context,next_round): #toto ulozi do mysql vsechny html stranky z kartoteky, takze cca 19
page = context.new_page()
page.set_default_timeout(15000)
page.set_default_navigation_timeout(30000)
# Use domcontentloaded (SPAs often keep network busy)
page.goto(BASE_LIST_URL, wait_until="domcontentloaded")
if "/prihlaseni" in page.url.lower():
raise SystemExit("Session expired → refresh medevio_storage.json via the login script.")
wait_for_grid_ready(page)
# optional: print label like "125 z 1856"
try:
label = page.locator("p.MuiTablePagination-displayedRows").first.inner_text()
print("Pagination label BEFORE:", label)
except Exception:
pass
# Set 100/page
try:
set_page_size_100(page)
try:
label = page.locator("p.MuiTablePagination-displayedRows").first.inner_text()
print("Pagination label AFTER :", label)
except Exception:
pass
except Exception as e:
print(f"Warning: could not set page size to 100: {e!r}")
page_index = 1
while True:
wait_for_grid_ready(page)
#here I need code to save page into kartoteka_html
cur = conn.cursor()
cur.execute(
f"""INSERT INTO kartoteka_html (html,round)
VALUES (%s,%s)""",
(page.content(),next_round),
)
conn.commit()
cur.close()
print(f"DB saved page index {page_index}")
# Try to go next; if cannot, break
if not click_next_page(page):
break
# Wait for DOM to actually update (new rows)
try:
page.wait_for_load_state("domcontentloaded", timeout=10000)
except PWTimeout:
pass
time.sleep(0.5)
page_index += 1
page.close()
print(f"Total pages colleceted collected: {page_index}")
return
def main():
# Check storage exists
if not Path(STATE_FILE).exists():
raise SystemExit(f"Storage not found: {STATE_FILE}")
# DB ready
conn = db_connect()
#vymazat vsechny zaznamy z kartoteka_html, ktere nemaji hodnotu round
cur=conn.cursor()
cur.execute("delete from kartoteka_html where round=0")
conn.commit()
with conn.cursor() as cur:
cur.execute("SELECT MAX(`round`) AS max_round FROM kartoteka_html")
result = cur.fetchone()
# If table empty, use 0 as fallback
next_round = (result[0] or 0) + 1
print("Next round will be:", next_round)
with sync_playwright() as p:
browser = p.chromium.launch(headless=False) # set False to watch
context = browser.new_context(storage_state=STATE_FILE)
save_all_patient_htmls(conn, context,next_round)
browser.close()
conn.close()
print("Done.")
if __name__ == "__main__":
main()
+262
View File
@@ -0,0 +1,262 @@
#Tento kod se pripoji do kartoteky Medevio, zmeni na 100 pacientu na stranu, nactene
# medevio_dump_patients_html_to_mysql.py
import time
import json
from pathlib import Path
from datetime import datetime
from typing import Set
import mysql.connector
from mysql.connector import errorcode
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
# ---------- CONFIG ----------
STATE_FILE = r"../medevio_storage.json"
BASE_LIST_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
SAVE_DELAY_SECONDS = 10 # throttle: 10 sec per patient
# MySQL connection settings (fill in)
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
)
TABLE_NAME = "patients_html" # schema created automatically
# ---------- DB helpers ----------
def db_connect():
try:
conn = mysql.connector.connect(**MYSQL_CFG)
return conn
except mysql.connector.Error as e:
raise SystemExit(f"MySQL connection failed: {e}")
def db_ensure_table(conn):
ddl = f"""
CREATE TABLE IF NOT EXISTS `{TABLE_NAME}` (
patient_id VARCHAR(64) PRIMARY KEY,
html LONGTEXT NOT NULL,
fetched_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
"""
cur = conn.cursor()
cur.execute(ddl)
conn.commit()
cur.close()
def db_existing_ids(conn) -> Set[str]:
ids = set()
cur = conn.cursor()
cur.execute(f"SELECT patient_id FROM `{TABLE_NAME}`")
for (pid,) in cur.fetchall():
ids.add(pid)
cur.close()
return ids
def db_upsert_html(conn, patient_id: str, html: str):
cur = conn.cursor()
cur.execute(
f"""INSERT INTO `{TABLE_NAME}` (patient_id, html, fetched_at)
VALUES (%s, %s, NOW())
ON DUPLICATE KEY UPDATE html = VALUES(html), fetched_at = VALUES(fetched_at)""",
(patient_id, html),
)
conn.commit()
cur.close()
# ---------- Playwright helpers ----------
def wait_for_grid_ready(page):
# grid present & at least one row (be generous on timeout)
page.wait_for_selector("div[role='rowgroup']", timeout=20000)
page.wait_for_selector("div[role='row'][data-id]", timeout=20000)
def set_page_size_100(page):
# Click the page-size combobox (CZ/EN + generic)
for loc in [
page.get_by_role("combobox", name="Řádků na stránce:"),
page.get_by_role("combobox", name="Rows per page:"),
page.locator("div.MuiTablePagination-root [role='combobox']"),
]:
if loc.count():
loc.first.click()
break
# Select 100 (MUI menu often renders in a portal)
opt = page.get_by_role("option", name="100")
if not opt.count():
opt = page.locator("//li[normalize-space(.)='100']")
opt.first.wait_for(state="visible", timeout=5000)
opt.first.click()
# Wait for rows to refresh
try:
page.wait_for_selector("div[role='row'][data-id]", timeout=10000)
except PWTimeout:
time.sleep(0.8)
def harvest_ids_on_current_page(page) -> Set[str]:
ids = set()
for sel in ["div[role='row'][data-id]", "div.MuiDataGrid-row[data-id]"]:
for row in page.locator(sel).all():
pid = row.get_attribute("data-id")
if pid:
ids.add(pid)
return ids
def click_next_page(page) -> bool:
# Prefer ARIA label
nxt = page.get_by_role("button", name="Go to next page")
if nxt.count():
try:
if nxt.first.is_enabled():
nxt.first.click()
return True
except Exception:
pass
# Fallback (CZ)
nxt2 = page.get_by_role("button", name="Další")
if nxt2.count():
try:
if nxt2.first.is_enabled():
nxt2.first.click()
return True
except Exception:
pass
return False
def ensure_detail_open(page) -> bool:
# Detail drawer/dialog visible?
for sel in ["[role='dialog']", "div.MuiDrawer-paper", "div[aria-modal='true']"]:
loc = page.locator(sel)
if loc.count() and loc.first.is_visible():
return True
return False
# ---------- Main workflow ----------
def collect_all_patient_ids(context) -> Set[str]:
page = context.new_page()
page.set_default_timeout(15000)
page.set_default_navigation_timeout(30000)
# Use domcontentloaded (SPAs often keep network busy)
page.goto(BASE_LIST_URL, wait_until="domcontentloaded")
if "/prihlaseni" in page.url.lower():
raise SystemExit("Session expired → refresh medevio_storage.json via the login script.")
wait_for_grid_ready(page)
# optional: print label like "125 z 1856"
try:
label = page.locator("p.MuiTablePagination-displayedRows").first.inner_text()
print("Pagination label BEFORE:", label)
except Exception:
pass
# Set 100/page
try:
set_page_size_100(page)
try:
label = page.locator("p.MuiTablePagination-displayedRows").first.inner_text()
print("Pagination label AFTER :", label)
except Exception:
pass
except Exception as e:
print(f"Warning: could not set page size to 100: {e!r}")
all_ids: Set[str] = set()
page_index = 1
while True:
wait_for_grid_ready(page)
ids_now = harvest_ids_on_current_page(page)
print(f"Page {page_index}: harvested {len(ids_now)} ids")
all_ids |= ids_now
# Try to go next; if cannot, break
if not click_next_page(page):
break
# Wait for DOM to actually update (new rows)
try:
page.wait_for_load_state("domcontentloaded", timeout=10000)
except PWTimeout:
pass
time.sleep(0.5)
page_index += 1
page.close()
print(f"Total unique IDs collected: {len(all_ids)}")
return all_ids
def fetch_and_store_patient_html(context, conn, patient_id: str):
page = context.new_page()
page.set_default_timeout(15000)
page.set_default_navigation_timeout(30000)
url = f"{BASE_LIST_URL}?pacient={patient_id}"
page.goto(url, wait_until="domcontentloaded")
# If detail didnt open, fallback: go to list, click row
if not ensure_detail_open(page):
page.goto(BASE_LIST_URL, wait_until="domcontentloaded")
try:
page.wait_for_selector(f"div[role='row'][data-id='{patient_id}']", timeout=15000)
page.locator(f"div[role='row'][data-id='{patient_id}']").first.click()
# wait for drawer/dialog
page.wait_for_selector("[role='dialog'], div.MuiDrawer-paper, div[aria-modal='true']", timeout=12000)
except PWTimeout:
print(f"[{patient_id}] detail panel did not open — skipping")
page.close()
return
# Save full HTML of the page (includes the open detail drawer)
html = page.content()
db_upsert_html(conn, patient_id, html)
print(f"[{patient_id}] saved HTML ({len(html)} bytes) at {datetime.now().isoformat(timespec='seconds')}")
page.close()
# Throttle per your requirement
time.sleep(SAVE_DELAY_SECONDS)
def main():
# Check storage exists
if not Path(STATE_FILE).exists():
raise SystemExit(f"Storage not found: {STATE_FILE}")
# DB ready
conn = db_connect()
db_ensure_table(conn)
already = db_existing_ids(conn)
print(f"Already in DB: {len(already)} ids")
with sync_playwright() as p:
browser = p.chromium.launch(headless=False) # set False to watch
context = browser.new_context(storage_state=STATE_FILE)
# 1) Collect all IDs from the listing (all pages)
# all_ids = collect_all_patient_ids(context)
all_ids=db_existing_ids(conn)
# 2) Iterate and store HTML (skip existing)
todo = [pid for pid in sorted(all_ids) if pid not in already]
print(f"To fetch now: {len(todo)} ids (skipping {len(all_ids)-len(todo)} already saved)")
for i, pid in enumerate(todo, 1):
try:
fetch_and_store_patient_html(context, conn, pid)
except Exception as e:
print(f"[{pid}] ERROR: {e!r} — continuing with next")
browser.close()
conn.close()
print("Done.")
if __name__ == "__main__":
main()
+258
View File
@@ -0,0 +1,258 @@
# medevio_dump_patients_html_to_mysql.py
import time
import json
from pathlib import Path
from datetime import datetime
from typing import Set
import mysql.connector
from mysql.connector import errorcode
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
# ---------- CONFIG ----------
STATE_FILE = r"../medevio_storage.json"
BASE_LIST_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti"
SAVE_DELAY_SECONDS = 10 # throttle: 10 sec per patient
# MySQL connection settings (fill in)
MYSQL_CFG = dict(
host="192.168.1.74",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
)
TABLE_NAME = "patients_html" # schema created automatically
# ---------- DB helpers ----------
def db_connect():
try:
conn = mysql.connector.connect(**MYSQL_CFG)
return conn
except mysql.connector.Error as e:
raise SystemExit(f"MySQL connection failed: {e}")
def db_ensure_table(conn):
ddl = f"""
CREATE TABLE IF NOT EXISTS `{TABLE_NAME}` (
patient_id VARCHAR(64) PRIMARY KEY,
html LONGTEXT NOT NULL,
fetched_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
"""
cur = conn.cursor()
cur.execute(ddl)
conn.commit()
cur.close()
def db_existing_ids(conn) -> Set[str]:
ids = set()
cur = conn.cursor()
cur.execute(f"SELECT patient_id FROM `{TABLE_NAME}`")
for (pid,) in cur.fetchall():
ids.add(pid)
cur.close()
return ids
def db_upsert_html(conn, patient_id: str, html: str):
cur = conn.cursor()
cur.execute(
f"""INSERT INTO `{TABLE_NAME}` (patient_id, html, fetched_at)
VALUES (%s, %s, NOW())
ON DUPLICATE KEY UPDATE html = VALUES(html), fetched_at = VALUES(fetched_at)""",
(patient_id, html),
)
conn.commit()
cur.close()
# ---------- Playwright helpers ----------
def wait_for_grid_ready(page):
# grid present & at least one row (be generous on timeout)
page.wait_for_selector("div[role='rowgroup']", timeout=20000)
page.wait_for_selector("div[role='row'][data-id]", timeout=20000)
def set_page_size_100(page):
# Click the page-size combobox (CZ/EN + generic)
for loc in [
page.get_by_role("combobox", name="Řádků na stránce:"),
page.get_by_role("combobox", name="Rows per page:"),
page.locator("div.MuiTablePagination-root [role='combobox']"),
]:
if loc.count():
loc.first.click()
break
# Select 100 (MUI menu often renders in a portal)
opt = page.get_by_role("option", name="100")
if not opt.count():
opt = page.locator("//li[normalize-space(.)='100']")
opt.first.wait_for(state="visible", timeout=5000)
opt.first.click()
# Wait for rows to refresh
try:
page.wait_for_selector("div[role='row'][data-id]", timeout=10000)
except PWTimeout:
time.sleep(0.8)
def harvest_ids_on_current_page(page) -> Set[str]:
ids = set()
for sel in ["div[role='row'][data-id]", "div.MuiDataGrid-row[data-id]"]:
for row in page.locator(sel).all():
pid = row.get_attribute("data-id")
if pid:
ids.add(pid)
return ids
def click_next_page(page) -> bool:
# Prefer ARIA label
nxt = page.get_by_role("button", name="Go to next page")
if nxt.count():
try:
if nxt.first.is_enabled():
nxt.first.click()
return True
except Exception:
pass
# Fallback (CZ)
nxt2 = page.get_by_role("button", name="Další")
if nxt2.count():
try:
if nxt2.first.is_enabled():
nxt2.first.click()
return True
except Exception:
pass
return False
def ensure_detail_open(page) -> bool:
# Detail drawer/dialog visible?
for sel in ["[role='dialog']", "div.MuiDrawer-paper", "div[aria-modal='true']"]:
loc = page.locator(sel)
if loc.count() and loc.first.is_visible():
return True
return False
# ---------- Main workflow ----------
def collect_all_patient_ids(context) -> Set[str]:
page = context.new_page()
page.set_default_timeout(15000)
page.set_default_navigation_timeout(30000)
# Use domcontentloaded (SPAs often keep network busy)
page.goto(BASE_LIST_URL, wait_until="domcontentloaded")
if "/prihlaseni" in page.url.lower():
raise SystemExit("Session expired → refresh medevio_storage.json via the login script.")
wait_for_grid_ready(page)
# optional: print label like "125 z 1856"
try:
label = page.locator("p.MuiTablePagination-displayedRows").first.inner_text()
print("Pagination label BEFORE:", label)
except Exception:
pass
# Set 100/page
try:
set_page_size_100(page)
try:
label = page.locator("p.MuiTablePagination-displayedRows").first.inner_text()
print("Pagination label AFTER :", label)
except Exception:
pass
except Exception as e:
print(f"Warning: could not set page size to 100: {e!r}")
all_ids: Set[str] = set()
page_index = 1
while True:
wait_for_grid_ready(page)
ids_now = harvest_ids_on_current_page(page)
print(f"Page {page_index}: harvested {len(ids_now)} ids")
all_ids |= ids_now
# Try to go next; if cannot, break
if not click_next_page(page):
break
# Wait for DOM to actually update (new rows)
try:
page.wait_for_load_state("domcontentloaded", timeout=10000)
except PWTimeout:
pass
time.sleep(0.5)
page_index += 1
page.close()
print(f"Total unique IDs collected: {len(all_ids)}")
return all_ids
def fetch_and_store_patient_html(context, conn, patient_id: str):
page = context.new_page()
page.set_default_timeout(15000)
page.set_default_navigation_timeout(30000)
url = f"{BASE_LIST_URL}?pacient={patient_id}"
page.goto(url, wait_until="domcontentloaded")
# If detail didnt open, fallback: go to list, click row
if not ensure_detail_open(page):
page.goto(BASE_LIST_URL, wait_until="domcontentloaded")
try:
page.wait_for_selector(f"div[role='row'][data-id='{patient_id}']", timeout=15000)
page.locator(f"div[role='row'][data-id='{patient_id}']").first.click()
# wait for drawer/dialog
page.wait_for_selector("[role='dialog'], div.MuiDrawer-paper, div[aria-modal='true']", timeout=12000)
except PWTimeout:
print(f"[{patient_id}] detail panel did not open — skipping")
page.close()
return
# Save full HTML of the page (includes the open detail drawer)
html = page.content()
db_upsert_html(conn, patient_id, html)
print(f"[{patient_id}] saved HTML ({len(html)} bytes) at {datetime.now().isoformat(timespec='seconds')}")
page.close()
# Throttle per your requirement
time.sleep(SAVE_DELAY_SECONDS)
def main():
# Check storage exists
if not Path(STATE_FILE).exists():
raise SystemExit(f"Storage not found: {STATE_FILE}")
# DB ready
conn = db_connect()
db_ensure_table(conn)
already = db_existing_ids(conn)
print(f"Already in DB: {len(already)} ids")
with sync_playwright() as p:
browser = p.chromium.launch(headless=True) # set False to watch
context = browser.new_context(storage_state=STATE_FILE)
# 1) Collect all IDs from the listing (all pages)
all_ids = collect_all_patient_ids(context)
# 2) Iterate and store HTML (skip existing)
todo = [pid for pid in sorted(all_ids) if pid not in already]
print(f"To fetch now: {len(todo)} ids (skipping {len(all_ids)-len(todo)} already saved)")
for i, pid in enumerate(todo, 1):
try:
fetch_and_store_patient_html(context, conn, pid)
except Exception as e:
print(f"[{pid}] ERROR: {e!r} — continuing with next")
browser.close()
conn.close()
print("Done.")
if __name__ == "__main__":
main()
@@ -0,0 +1,110 @@
import mysql.connector
from bs4 import BeautifulSoup
import re
import time
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
)
#Helper functions
def is_valid_rc(rc: str) -> bool:
"""
Very basic RC check:
remove any slash
must be 9 or 10 digits
"""
rc_clean = rc.replace("/", "")
return bool(re.fullmatch(r"\d{9,10}", rc_clean))
conn = mysql.connector.connect(**MYSQL_CFG)
# --- get latest HTML (single-row result) ---
with conn.cursor() as cur:
cur.execute("""
SELECT html
FROM kartoteka_html
where round=3
ORDER BY `fetched-at` DESC
""")
rows = cur.fetchall()
if not rows:
raise RuntimeError("No HTML found in kartoteka_html")
for row in rows:
html = row[0]
soup = BeautifulSoup(html, "html.parser")
records = []
for row in soup.find_all("div", attrs={"role": "row", "data-id": True}):
data_id = row["data-id"]
# full name -> surname + rest
name_btn = row.find("button", class_="MuiTypography-root")
fullname = name_btn.get_text(strip=True) if name_btn else ""
parts = fullname.split()
surname = parts[0] if parts else ""
name = " ".join(parts[1:]) if len(parts) > 1 else ""
# RC
id_cell = row.find("div", attrs={"data-field": "IdentificationNumber"})
rc = (id_cell.get("title", "") if id_cell else "")
rc = rc.replace("/", "").replace("\\", "")
# Phone
ph_cell = row.find("div", attrs={"data-field": "Phone"})
raw_phone = ph_cell.get("title", "") if ph_cell else ""
raw_phone = raw_phone.replace("\u00A0", " ") # NBSP -> space
phone = re.sub(r"[^\d+]", "", raw_phone) # keep + and digits
# Insurance
ins_cell = row.find("div", attrs={"data-field": "InsuranceCompany"})
poj = ins_cell.get("title", "") if ins_cell else ""
# Skip rows with no name or no RC or not valid TC
if not fullname or not rc:
continue
if not is_valid_rc(rc):
continue
records.append((data_id, fullname, rc, phone, poj))
# --- per-patient lookup: use a fresh cursor each time (or buffered=True) ---
with conn.cursor(buffered=True) as cur2:
cur2.execute(
"""
SELECT *
FROM patients_extracted
WHERE rc=%s
""",
(rc,),
)
rows = cur2.fetchall()
# print(surname, name, rc, len(rows))
if len(rows) > 1:
print(f"Pacient {surname} {name} {rc} je v medeviu {len(rows)}x")
time.sleep(1)
if len(rows)==0:
print(f"Pacient {surname} {name} {rc} je v medeviu {len(rows)}x")
time.sleep(1)
if len(rows)==1 and rows[0][0]!=data_id:
print(f"Pacient {surname} {name} {rc} má v medeviu jiný id, v db je {rows[0][0]} and nyní je {data_id}")
time.sleep(.1)
if len(rows) == 1:
cur2.execute("""
Update patients_extracted set rid=%s where rc=%s""",(data_id,rc))
conn.commit()
# preview
# for r in records[:10]:
# print(f"ID: {r[0]} Name: {r[1]} RC: {r[2]} Phone: {r[3]} Pojistovna: {r[4]}")
#
# print("Total patients:", len(records))
@@ -0,0 +1,188 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from pathlib import Path
from datetime import datetime
import pymysql
from pymysql.cursors import DictCursor
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout
# ========= CONFIG =========
MYSQL_CFG = dict(
host="192.168.1.76",
port=3307,
user="root",
password="Vlado9674+",
database="medevio",
cursorclass=DictCursor,
autocommit=False, # we commit in batches
)
# Column in patients_extracted that stores Medevio UUID used in the URL:
UUID_COLUMN = "rid" # <-- change if your column name differs
# Output columns (will be created if missing; MySQL 8.0+ supports IF NOT EXISTS):
REGISTERED_COL = "medevio_registered" # TINYINT(1) NULL/0/1
CHECKED_AT_COL = "medevio_checked_at" # DATETIME NULL
ERROR_COL = "medevio_check_error" # TEXT NULL (optional)
# Medevio routing
PATIENT_URL_TMPL = "https://my.medevio.cz/mudr-buzalkova/klinika/pacienti?pacient={uuid}"
# Login session (created earlier with your script)
STATE_FILE = Path("../medevio_storage.json")
# Batch/pace
BATCH_LIMIT = 5 # how many patients per run
SLEEP_SECONDS = 3 # wait between patients (requested)
NAV_TIMEOUT = 20_000 # ms
TEXT_TIMEOUT = 15_000 # ms (for main area/heading)
# Texts indicating NOT registered:
NOT_REGISTERED_STRINGS = [
"Pacientka zatím nemá Medevio účet.",
"Pacient zatím nemá Medevio účet.",
]
# ==========================
SELECT_SQL = f"""
SELECT {UUID_COLUMN} AS uuid, jmeno, prijmeni, rc
FROM patients_extracted
WHERE {UUID_COLUMN} IS NOT NULL
AND {UUID_COLUMN} <> ''
AND {REGISTERED_COL} IS NULL
LIMIT {BATCH_LIMIT};
"""
UPDATE_OK_SQL = f"""
UPDATE patients_extracted
SET {REGISTERED_COL}=%s, {CHECKED_AT_COL}=NOW(), {ERROR_COL}=NULL
WHERE {UUID_COLUMN}=%s
"""
UPDATE_ERR_SQL = f"""
UPDATE patients_extracted
SET {REGISTERED_COL}=NULL, {CHECKED_AT_COL}=NOW(), {ERROR_COL}=%s
WHERE {UUID_COLUMN}=%s
"""
DDL_SQLS = [
f"ALTER TABLE patients_extracted ADD COLUMN {REGISTERED_COL} TINYINT(1) NULL",
f"ALTER TABLE patients_extracted ADD COLUMN {CHECKED_AT_COL} DATETIME NULL",
f"ALTER TABLE patients_extracted ADD COLUMN {ERROR_COL} TEXT NULL",
]
CHECKS_FOR_DDL_SQLS=[
f"SELECT COUNT(*) AS cnt FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'patients_extracted' AND COLUMN_NAME = '{REGISTERED_COL}'",
f"SELECT COUNT(*) AS cnt FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'patients_extracted' AND COLUMN_NAME = '{CHECKED_AT_COL}'",
f"SELECT COUNT(*) AS cnt FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'patients_extracted' AND COLUMN_NAME = '{ERROR_COL}'",
]
def ensure_columns(conn):
with conn.cursor() as cur:
for ddl,ddlcheck in zip(DDL_SQLS,CHECKS_FOR_DDL_SQLS):
cur.execute(ddlcheck)
row = cur.fetchone()
if row["cnt"] == 0:
print("Column missing")
cur.execute(ddl)
print(f"✓ Executed: {ddl}")
else:
print("Column exists")
conn.commit()
def pick_registered_flag(page_text: str) -> int:
text = page_text or ""
# If any NOT-registered phrase is present → 0; otherwise assume registered → 1
for marker in NOT_REGISTERED_STRINGS:
if marker in text:
return 0
return 1
def main():
# --- DB: fetch a batch to process ---
conn = pymysql.connect(**MYSQL_CFG)
try:
ensure_columns(conn)
with conn.cursor() as cur:
cur.execute("SET NAMES utf8mb4 COLLATE utf8mb4_czech_ci")
cur.execute("SET collation_connection = 'utf8mb4_czech_ci'")
cur.execute(SELECT_SQL)
rows = cur.fetchall()
if not rows:
print("No patients to check (all have medevio_registered filled).")
return
print(f"Will process {len(rows)} patients…")
# --- Playwright session ---
with sync_playwright() as p:
browser = p.chromium.launch(headless=False, slow_mo=0)
context = browser.new_context(storage_state=str(STATE_FILE))
page = context.new_page()
page.set_default_timeout(NAV_TIMEOUT)
processed = ok = errs = 0
for r in rows:
processed += 1
# pid = r["id"]
uuid = r["uuid"]
name = f"{r.get('prijmeni','')}, {r.get('jmeno','')}"
rc = r.get("rc","")
url = PATIENT_URL_TMPL.format(uuid=uuid)
print(f"URL pro otevření pacienta je: {url}0")
print(f"[{processed:>3}] {name} | RC {rc} | {uuid}{url}")
try:
page.goto(url, wait_until="domcontentloaded")
# Optionally wait for a stable anchor; fallback to sleep
try:
# A stable bit we saw earlier
page.get_by_text("Historie požadavků").wait_for(timeout=TEXT_TIMEOUT)
except PWTimeout:
pass
# Wait the requested 3 seconds for the UI to settle
time.sleep(SLEEP_SECONDS)
# Get full text and detect
full_text = page.content() # HTML; safer to check visible text too:
vis_text = page.inner_text("body")
registered = pick_registered_flag(full_text) if full_text else pick_registered_flag(vis_text)
with conn.cursor() as cur:
cur.execute(UPDATE_OK_SQL, (registered, pid))
conn.commit()
ok += 1
state = "REGISTERED" if registered == 1 else "NOT REGISTERED"
print(f"{state}")
except Exception as e:
conn.rollback()
errs += 1
msg = f"{type(e).__name__}: {e}"
with conn.cursor() as cur:
cur.execute(UPDATE_ERR_SQL, (msg[:1000], pid))
conn.commit()
print(f" ! ERROR → {msg}")
browser.close()
print(f"Done. processed={processed}, ok={ok}, errors={errs}")
finally:
conn.close()
if __name__ == "__main__":
main()
@@ -0,0 +1,118 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from urllib.parse import urlparse, parse_qs
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout, Page
STATE_FILE = "../medevio_storage.json"
POZADAVKY_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pozadavky?neprirazene=1"
# ---------- helpers ----------
def get_uuid_from_href(href: str) -> str | None:
try:
q = parse_qs(urlparse(href).query)
val = q.get("pozadavek", [None])[0]
return val if val else None
except Exception:
return None
def is_flu_request(text: str) -> bool:
return bool(re.search(r"ch(r|ř)ipk", text, re.IGNORECASE))
def scrape_visible_rows(page: Page, seen: set) -> list[dict]:
"""Collect all *new* visible rows on the current screen."""
bucket: list[dict] = []
rows = page.locator('tr[data-testid="patient-request-row"]')
n = rows.count()
for i in range(n):
row = rows.nth(i)
href_el = row.locator('a[href*="pozadavky?pozadavek="]').first
href = href_el.get_attribute("href") if href_el.count() else None
req_id = get_uuid_from_href(href) if href else None
if not req_id or req_id in seen:
continue
name = (row.locator('td:nth-child(2) a span').first.text_content(timeout=0) or "").strip()
rc = (row.locator('a.MuiTypography-overline2').first.text_content(timeout=0) or "").strip()
text_p = row.locator('td:nth-child(3) p.MuiTypography-body1, td:nth-child(4) p.MuiTypography-body1').first
text_req = (text_p.text_content(timeout=0) or "").strip()
if not text_req:
aria = row.locator('td:nth-child(3) [aria-label], td:nth-child(4) [aria-label]').first
text_req = (aria.get_attribute("aria-label") or "").strip() if aria.count() else ""
avatar = row.locator('[data-testid="queue-avatar"]').first
assigned_to = (avatar.get_attribute("aria-label") or "").strip() if avatar.count() else ""
initials = (avatar.text_content(timeout=0) or "").strip() if avatar.count() else ""
seen.add(req_id)
bucket.append({
"id": req_id,
"name": name,
"rc": rc,
"text": text_req,
"assigned_to": assigned_to,
"initials": initials,
})
return bucket
def assign_request_to_buzalka(page: Page, request_uuid: str) -> None:
"""Open request detail by UUID and assign it to MUDr. Buzalka (já)."""
url = f"{POZADAVKY_URL.split('?')[0]}?pozadavek={request_uuid}"
page.goto(url, wait_until="domcontentloaded", timeout=60_000)
combo = page.locator('div[role="combobox"][aria-labelledby="queue-select-label"]')
combo.wait_for(state="visible")
combo.click()
option = page.get_by_role("option", name=re.compile(r"MUDr\.?\s*Buzalka", re.I))
option.click()
page.wait_for_load_state("networkidle")
page.locator("button.MuiDialog-close").click()
print(f"✔ Assigned to MUDr. Buzalka: {request_uuid}")
# ---------- main ----------
def main():
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=False) # we want to see the page
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
page.goto(POZADAVKY_URL, wait_until="domcontentloaded", timeout=60_000)
# check login
body = (page.text_content("body") or "").lower()
if any(x in body for x in ["přihlášení", "přihlásit", "sign in", "login"]):
raise SystemExit("Not logged in refresh medevio_storage.json.")
try:
page.wait_for_selector('tr[data-testid="patient-request-row"]', timeout=20_000)
except PWTimeout:
raise SystemExit("Rows not found: tr[data-testid=patient-request-row].")
seen: set[str] = set()
assigned_count = 0
print("\n>>> Scroll the page manually. Press Enter here any time to scrape current view.")
print(" Press Ctrl+C to finish.\n")
while True:
input("Press Enter to scan visible rows...")
for item in scrape_visible_rows(page, seen):
text = item["text"]
initials = (item["initials"] or "").upper()
assigned_to = (item["assigned_to"] or "").lower()
if is_flu_request(text) and not ("buzalka" in assigned_to or initials == "VB"):
assign_request_to_buzalka(page, item["id"])
assigned_count += 1
print(f"Total newly assigned so far: {assigned_count}")
if __name__ == "__main__":
main()
+236
View File
@@ -0,0 +1,236 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib.parse import urlparse, parse_qs
import re
import time
from playwright.sync_api import sync_playwright, TimeoutError as PWTimeout, Page
# ===== funkce pro přiřazení jednoho požadavku =====
def assign_request_to_buzalka(page: Page, request_uuid: str) -> None:
"""
Otevře kartu požadavku podle UUID a přiřadí ji MUDr. Buzalka (já).
Po uložení změny zavře dialog a vypíše potvrzení.
"""
url = f"https://my.medevio.cz/mudr-buzalkova/klinika/pozadavky?pozadavek={request_uuid}"
page.goto(url, wait_until="domcontentloaded", timeout=60_000)
combo = page.locator('div[role="combobox"][aria-labelledby="queue-select-label"]')
combo.wait_for(state="visible")
combo.click()
option = page.get_by_role("option", name=re.compile(r"MUDr\.?\s*Buzalka", re.I))
option.click()
page.wait_for_load_state("networkidle")
page.locator("button.MuiDialog-close").click()
print(f"✔ Požadavek {request_uuid} přiřazen: MUDr. Buzalka (já)")
# ===== hlavní část: projít listing a řešit chřipku =====
POZADAVKY_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pozadavky"
# POZADAVKY_URL = "https://my.medevio.cz/mudr-buzalkova/klinika/pozadavky?neprirazene=1"
STATE_FILE = "../medevio_storage.json"
from playwright.sync_api import Page
import time
def _find_scroll_container(page: Page):
"""Return an ElementHandle of the real scrollable container, or None -> use window."""
handle = page.evaluate_handle("""
() => {
const isScrollable = el => !!el && (el.scrollHeight > el.clientHeight || el.scrollWidth > el.clientWidth);
const row = document.querySelector('tr[data-testid="patient-request-row"]');
if (row) {
let el = row.parentElement;
while (el) {
const style = getComputedStyle(el);
const overflowY = style.overflowY;
if (isScrollable(el) && (overflowY === 'auto' || overflowY === 'scroll')) return el;
el = el.parentElement;
}
}
const guesses = [
'[role="rowgroup"]', '[role="table"]', '.MuiTableContainer-root',
'[data-testid="requests-table"]', '.MuiContainer-root', 'main'
];
for (const sel of guesses) {
const el = document.querySelector(sel);
if (el) {
const style = getComputedStyle(el);
const overflowY = style.overflowY;
if (isScrollable(el) && (overflowY === 'auto' || overflowY === 'scroll')) return el;
}
}
return null;
}
""")
# If JS returned null, convert to Python None
try:
if handle is None or handle.json_value() is None:
return None
except Exception:
return None
return handle
def _has_handle(page: Page, handle) -> bool:
"""Check the handle still points to a live element; else False -> use window."""
if not handle:
return False
try:
return bool(page.evaluate("(el)=>!!el", handle))
except Exception:
return False
def _scroll_step(page: Page, container_handle, px=800):
if _has_handle(page, container_handle):
try:
page.evaluate(
"(args) => { const [el, dy] = args; el.scrollBy(0, dy); }",
[container_handle, px]
)
return
except Exception:
pass
# Fallback to window
page.evaluate("dy => window.scrollBy(0, dy)", px)
def _scroll_to_bottom(page: Page, container_handle):
if _has_handle(page, container_handle):
try:
page.evaluate("(el) => el.scrollTo(0, el.scrollHeight)", container_handle)
return
except Exception:
pass
page.evaluate("() => window.scrollTo(0, document.body.scrollHeight)")
def _click_load_more_if_any(page: Page) -> bool:
btn = page.locator("button:has-text('Načíst více'), button:has-text('Zobrazit další'), button:has-text('Load more')")
if btn.count() and btn.is_visible():
btn.click()
return True
return False
def load_all_requests(page: Page, max_rounds: int = 200, stagnation_limit: int = 4) -> None:
"""
Incrementally loads the entire list of requests.
Stops after 'stagnation_limit' rounds without row growth, or after max_rounds.
"""
page.wait_for_selector('tr[data-testid="patient-request-row"]', timeout=20000)
container = _find_scroll_container(page)
prev_count = page.locator('tr[data-testid="patient-request-row"]').count()
stagnant = 0
for _ in range(max_rounds):
if _click_load_more_if_any(page):
page.wait_for_load_state("networkidle")
# small incremental scrolls
for _ in range(4):
_scroll_step(page, container, px=800)
time.sleep(0.15)
# touch bottom at least once
_scroll_to_bottom(page, container)
# settle
page.wait_for_load_state("networkidle")
spinners = page.locator('[role="progressbar"], .MuiCircularProgress-root')
if spinners.count():
try:
spinners.first.wait_for(state="detached", timeout=5000)
except Exception:
pass
# growth check
curr_count = page.locator('tr[data-testid="patient-request-row"]').count()
if curr_count <= prev_count:
stagnant += 1
else:
stagnant = 0
prev_count = curr_count
if stagnant >= stagnation_limit:
break
def get_uuid_from_href(href: str) -> str | None:
try:
q = parse_qs(urlparse(href).query)
val = q.get("pozadavek", [None])[0]
return val
except Exception:
return None
def is_flu_request(text: str) -> bool:
# libovolná varianta slova „chřipk“ (chřipka, chřipky, …), case-insensitive, s diakritikou i bez
return bool(re.search(r"ch(r|ř)ipk", text, re.IGNORECASE))
def main():
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=False)
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
page.goto(POZADAVKY_URL, wait_until="domcontentloaded", timeout=60_000)
body = (page.text_content("body") or "").lower()
if any(x in body for x in ["přihlášení", "přihlásit", "sign in", "login"]):
raise SystemExit("Vypadá to, že nejsi přihlášený obnov prosím medevio_storage.json.")
try:
page.wait_for_selector('tr[data-testid="patient-request-row"]', timeout=20_000)
except PWTimeout:
raise SystemExit("Nenašel jsem řádky požadavků (selector tr[data-testid=patient-request-row]).")
# after navigating to the listing and ensuring first rows are visible:
load_all_requests(page)
rows = page.locator('tr[data-testid="patient-request-row"]')
print("Loaded rows:", rows.count())
for i in range(count):
row = rows.nth(i)
# UUID z href
a_with_req = row.locator('a[href*="pozadavky?pozadavek="]').first
href = a_with_req.get_attribute("href") if a_with_req.count() else None
req_id = get_uuid_from_href(href) if href else None
if not req_id:
continue
# Text požadavku (pro filtr „chřipka“)
text_p = row.locator('td:nth-child(3) p.MuiTypography-body1, td:nth-child(4) p.MuiTypography-body1').first
text_req = text_p.inner_text().strip() if text_p.count() else ""
if not text_req:
aria = row.locator('td:nth-child(3) [aria-label], td:nth-child(4) [aria-label]').first
text_req = (aria.get_attribute("aria-label") or "").strip() if aria.count() else ""
if not is_flu_request(text_req):
# není to chřipkový požadavek přeskočit
continue
# Zjištění přiřazení z avatara v listingu
avatar = row.locator('[data-testid="queue-avatar"]').first
assigned_to = (avatar.get_attribute("aria-label") or "").strip() if avatar.count() else ""
initials = avatar.inner_text().strip() if avatar.count() else ""
already_mine = ("buzalka" in assigned_to.lower()) or (initials.upper() == "VB")
if already_mine:
print(f"= SKIP (už přiřazeno mně): {req_id} | {text_req}")
continue
print(f"→ Přiřazuji chřipkový požadavek: {req_id} | {text_req}")
assign_request_to_buzalka(page, req_id)
time.sleep(1)
context.close()
browser.close()
if __name__ == "__main__":
main()
+41
View File
@@ -0,0 +1,41 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from playwright.sync_api import sync_playwright
STATE_FILE = "../medevio_storage.json"
REQUEST_URL = (
"https://my.medevio.cz/mudr-buzalkova/klinika/"
"pozadavky?pozadavek=e28cbf71-8280-4078-a881-c44119bbccc2"
)
def main():
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=False)
context = browser.new_context(storage_state=STATE_FILE)
page = context.new_page()
# otevři konkrétní požadavek
page.goto(REQUEST_URL, wait_until="domcontentloaded", timeout=60_000)
# combobox Fronta
combo = page.locator('div[role="combobox"][aria-labelledby="queue-select-label"]')
combo.wait_for(state="visible")
combo.click()
# vyber „MUDr. Buzalka (já)“
option = page.get_by_role("option", name=re.compile(r"MUDr\.?\s*Buzalka", re.I))
option.click()
# počkej, dokud síť neutichne (změna je odeslaná/uložená)
page.wait_for_load_state("networkidle")
# zavři dialog
page.locator("button.MuiDialog-close").click()
context.close()
browser.close()
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More