"""
Hier sind die alleinstehende Funktionen

Funktionen:
- send_msg_telegram: Asynchrones Senden einer HTML-formatierten Nachricht an IT Gruppe
- generate_uid: Generiert eine einzigartige UID

Imports:
- random: Für die Generierung zufälliger Nummern.
- string: Für den Zugriff auf Zeichensätze.
- telegram: Für die Interaktion mit der Telegram API.
- telegram.ext: Für die Verwaltung von Telegram Bots und deren Nachrichtenverarbeitung.

"""

import random
import string
import logging
import os
import asyncio
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from googleapiclient.errors import HttpError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from werkzeug.utils import secure_filename
from typing import Optional, Dict, Tuple
import pycountry
import phonenumbers
from phonenumbers import geocoder
from phonenumbers.phonenumberutil import region_code_for_country_code
from setup import headers_tookan, json_api_key_tookan, PLANDAY_CLIENT_ID, PLANDAY_REFRESH_TOKEN, lara_bot_id, onduty_it
from models import get_all_data_from_fk_table, get_value_by_id, get_data_from_fk_table, get_all_user_id, get_cred_google_service_acc, update_child_data, insert_child_data, delete_child_data, insert_partner_db, update_partner_data, delete_partner_data,  get_doc_fk_table, insert_docs_db
from datetime import datetime, timedelta
import requests
import json
import ast
import mimetypes
import re
from zoneinfo import ZoneInfo

logging.basicConfig(level=logging.INFO)

from telegram import __version__ as TG_VER
try:
    from telegram import __version_info__
except ImportError:
    __version_info__ = (0, 0, 0, 0, 0)  # type: ignore[assignment]

import telegram
from telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters


today = datetime.now().strftime("%Y%m%d")

async def send_msg_telegram(TOKEN, msg, id):
    """
    Senden Nachrichten an Telegramuser/Grupper

    Args:
        TOKEN String: Token für Telegram Bot
        msg String: Nachricht 
        id int: id für den Chat
    """
        
        
    bot = telegram.Bot(TOKEN)
    await bot.send_message(id, msg, parse_mode='HTML')

def generate_uid():
    """
    Generiert eine UID mit der vorgegebenen Vorlage.

    Dies wurde von der Gastro Kurier GmbH vorgegeben.

    Returns:
        str: rückgabe einer UID
    """
    # Buchstaben sollten alle gross sein
    existing_uids = get_all_user_id()

    while True:
        
        letters = string.ascii_uppercase
        first_char = random.choice(letters)
        part1 = first_char + ('0000000' + hex(random.randint(0, 268435455))[2:]).zfill(8)[-7:]
        part2 = ('0000' + hex(random.randint(0, 4095))[2:]).zfill(5)[-4:]
        part3 = ('0000' + hex(random.randint(0, 4095))[2:]).zfill(5)[-4:]
        hex_value = 'UID' + part1 + part2 + part3
        
        if hex_value not in existing_uids:
            existing_uids.add(hex_value)
            return hex_value

SERVICE_ACCOUNT_FILE = 'google/credentials.json'

# Define the scopes
SCOPES = ['https://www.googleapis.com/auth/drive']

def check_folder_exists(service, folder_id):
    try:
        # Attempt to get the folder metadata to check if it exists
        folder = service.files().get(fileId=folder_id, fields='id',supportsAllDrives=True).execute()
        return True
    except HttpError as error:
        logging.error("An error occurred: %s", error)
        if error.resp.status == 404:
            logging.error("Folder with ID %s not found.", folder_id)
        return False

def create_google_folder(folder_name, parent_id="1idjXBYaU6B5vcB5GiR3bKWScLvNowdfI"):
    """
    Creates a folder in Google Drive with the given name, within a specified parent folder.

    Args:
        folder_name (str): The name of the folder to create.
        parent_id (str): The ID of the parent folder in which to create the new folder (default is set).

    Returns:
        str: The URL of the created folder, or a message indicating an error.
    """
    # Create credentials using the service account file
    cred_json = get_cred_google_service_acc()

    try:
        credentials = service_account.Credentials.from_service_account_info(
            cred_json, scopes=SCOPES)
    except Exception as e:
        logging.error("Failed to load service account credentials: %s", e)
        return "Error loading service account credentials."

    # Build the Google Drive service object
    try:
        service = build('drive', 'v3', credentials=credentials)
    except Exception as e:
        logging.error("Failed to build the Drive service: %s", e)
        return "Error building the Drive service."

    # Check if the parent folder exists and is accessible
    if not check_folder_exists(service, parent_id):
        return f"Parent folder with ID {parent_id} not found or inaccessible."

    # File metadata for folder creation
    file_metadata = {
        'name': folder_name,
        'mimeType': 'application/vnd.google-apps.folder',
        'parents': [parent_id] if parent_id else []
    }
    
    # Attempt to create the folder and fetch its URL
    try:
        file = service.files().create(body=file_metadata, fields='id, webViewLink', supportsAllDrives=True).execute()
        folder_url = file.get('webViewLink')
        return folder_url
    except HttpError as error:
        logging.error("An error occurred during folder creation: %s", error)
        return "Error creating the folder."

def create_tookan_acc(phone, username, firstname, lastname, email):
    """_summary_

    Args:
        phone (_type_): Phonenumber
        username (_type_): username
        firstname (_type_): firstname
        lastname (_type_): lastname
        email (_type_): email

    Returns:
        fleet_id int: Id of Tookan 
    """

    val = {
        "email": email,
        "phone": phone,
        "username": email,
        "password": "0800!Dinner",
        "first_name": firstname,
        "last_name": lastname,
        "team_id": "1558633",
        "timezone": "-120"
    }
    val.update(json_api_key_tookan)

    response = requests.post('https://api.tookanapp.com/v2/add_agent', json=val, headers=headers_tookan)
    data = response.json()
    if data['status'] == 200:
        if response.status_code == 200:
            #update_tookan_id(data['data']['fleet_id'], user_id)
            return data['data']['fleet_id']
        
    asyncio.run(send_msg_telegram(lara_bot_id, f"Tookan acc creation fail of {email}: {response.text} | Talentsphere", onduty_it))
    return None

def update_tookan_user_status(fleet_id, block_status, reason):
    """
    Blocks or unblocks a user in the Tookan system based on the provided fleet ID and block status.

    Args:
        fleet_id (int): The unique identifier for the Tookan user to be blocked or unblocked.
        block_status (int): The status indicating whether to block (1) or unblock (0) the user. Default is 1 (block).

    Returns:
        dict: The response data from the Tookan API, indicating success or failure of the operation.
    """
    val = {
        "fleet_id": fleet_id,
        "block_status": block_status,
        "block_reason": reason
    }
    val.update(json_api_key_tookan)

    response = requests.post('https://api.tookanapp.com/v2/block_and_unblock_fleet', json=val, headers=headers_tookan)
    data = response.json()
    if data['status'] == 200:
        return True
    return False

def get_user_planday_employeeGroups(planday_id):
    """
    Retrieves the department name of a Planday employee using their unique Planday ID.

    This function sends a GET request to the Planday API to fetch employee details based on the provided
    Planday ID. It extracts and returns the department name from the response data.

    Parameters:
    - planday_id (int): The unique identifier of the employee in the Planday system.

    Returns:
    - str or None: The name of the department associated with the employee if found; otherwise, None.

    Raises:
    - requests.exceptions.RequestException: An error from the `requests` library if the HTTP request fails.
    - KeyError: If the expected 'departmentNames' key is not found in the JSON response.

    Note:
    - Ensure that the Planday API credentials and client ID are correctly configured in the environment.
    - The function assumes that the employee has at least one department assigned; if not, it returns None.
    """
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    response = requests.get(f'https://openapi.planday.com/hr/v1.0/employees/{planday_id}', headers=planday_headers)
    data = response.json()
    if response.status_code == 200:
        if data['data']['employeeGroups']:
            return data['data']['employeeGroups']
    return None

def move_planday_acc_from_DNF_to_employeeGroups(planday_id, employeeGroups):
    """
    Reactivates a Planday employee account and assigns them to specified employeeGroups.

    This function sends a PUT request to the Planday API to reactivate an employee account identified by
    the provided Planday ID. It also assigns the employee to the given list of department IDs.

    Parameters:
    - planday_id (int): The unique identifier of the employee in the Planday system.
    - employeeGroups (list of int): A list of department IDs to which the employee will be assigned upon reactivation.

    Returns:
    - bool: True if the reactivation and assignment were successful (HTTP status code 200), False otherwise.

    Raises:
    - requests.exceptions.RequestException: An error from the `requests` library if the HTTP request fails.

    Note:
    - Ensure that the Planday API credentials and client ID are correctly configured in the environment.
    - The function assumes that the provided department IDs are valid and exist in the Planday system.
    """
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    url = f'https://openapi.planday.com/hr/v1.0/employees/{planday_id}'
    if employeeGroups is None:
        return True

    employeeGroups = json.loads(employeeGroups)
    payload = {
        "employeeGroups": employeeGroups['employeeGroups']
    }

    try:
        response = requests.put(
            url,
            headers={**planday_headers, "Content-Type": "application/json"},
            json=payload
        )
        print(response.status_code, response.text)
        if response.status_code == 204:
            return True
        else:
            return False

    except requests.RequestException as e:
        print(f"Request failed: {e}")
        raise

def update_planday_address(planday_id: str, address: str, postal_code: str, location: str, country: str) -> bool:
    url = f"https://openapi.planday.com/hr/v1.0/employees/{planday_id}"
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)

    payload = {
        "street1": f"{address}, {postal_code} {location}, {country}"

    }

    try:
        response = requests.put(
            url,
            headers={**planday_headers, "Content-Type": "application/json"},
            json=payload
        )
        if response.status_code == 204:
            return True
        else:
            return False
    except Exception as e:
        # optionally log or send to telegram
        return False

ADDRESS_KEYS = ("address", "postal_code", "location", "country")

def _norm(v):
    return (v or "").strip().casefold()

def address_changed(old_data: dict, new_data: dict) -> bool:
    """
    Compare old vs new address fields.

    Args:
        old_data (dict): old user_data row from get_user_data_for_edit_as_string
        new_data (dict): update_data with new values

    Returns:
        bool: True if at least one address field differs, False otherwise
    """
    old_addr = {k: old_data.get(k) for k in ADDRESS_KEYS}
    new_addr = {k: new_data.get(k) for k in ADDRESS_KEYS}
    return any(_norm(old_addr[k]) != _norm(new_addr[k]) for k in ADDRESS_KEYS)

def apply_planday_address_update(planday_id: str, new_data: dict) -> bool:
    """
    Apply address update to Planday.

    Args:
        planday_id (str): Employee's Planday ID
        new_data (dict): update_data with address fields

    Returns:
        bool: True if update succeeded, False otherwise
    """
    if not planday_id:
        return False

    try:
        return update_planday_address(
            planday_id,
            new_data.get("address"),
            new_data.get("postal_code"),
            new_data.get("location"),
            new_data.get("country")
        )
    except Exception as e:
        # optionally log or send to telegram
        return False
    
def get_planday_tokan(client_id):
    """
    Obtain authorization headers for accessing the Planday API by using a refresh token to request a new access token.

    This function sends a POST request to the Planday authentication endpoint with the client ID, grant type,
    and a predefined refresh token. It extracts the access token from the response and prepares an authorization
    header that can be used in subsequent API requests to Planday.

    Parameters:
    - client_id (str): The client ID associated with the Planday API credentials.

    Returns:
    - dict: A dictionary containing the headers necessary for authenticated API requests. This includes:
        - 'Authorization': A bearer token format of the access token.
        - 'X-ClientId': The client ID passed to the function.

    Raises:
    - requests.exceptions.RequestException: An error from the `requests` library if the HTTP request fails.
    - KeyError: If the expected 'access_token' is not found in the JSON response.

    Note:
    - The refresh token is hardcoded in the function, which is not recommended for production code.
      It's advisable to secure such tokens and make them configurable through secure means.
    """
    data_planday = {'client_id': client_id, 'grant_type': 'refresh_token', 'refresh_token': 'UZnpFHpVpkKmkBx6y4AG5A' }
    response_planday = requests.post('https://id.planday.com/connect/token', data=data_planday)

    json_resp = json.loads(response_planday.text)

    token_planday = json_resp['access_token']

    headers = {
        "Authorization": "Bearer %s" % token_planday,
        "X-ClientId": client_id
    }
    return headers

def get_department_id(department_name):
    """
    Retrieve the department ID for a given department name from a list of departments.

    This function splits the provided department name to extract the last word, which is assumed to
    represent a unique department code. It then queries a fictional database or data structure
    containing department details and searches for a matching department code within these details
    to return the corresponding department ID.

    Parameters:
    - department_name (str): The full name of the department, where the last word is expected to be a unique code.

    Returns:
    - int or None: The unique department ID corresponding to the given department name. Returns None if no
      matching department is found.

    Raises:
    - KeyError: If 'department_name' key is not found in any item of the planday_departments list.

    Example Usage:
    >>> get_department_id("Finance Department A123")
    101  # Assuming '101' is the ID associated with "Finance Department A123"

    Note:
    - The function `get_all_data_from_fk_table` is assumed to be a pre-existing function that retrieves
      all department data from a source such as a database or a flat file. The structure of the returned
      data is assumed to be a list of dictionaries, where each dictionary represents a department with
      keys including 'department_name' and 'department_id'.
    """
    ou_code_input = department_name.split()[-1]
    planday_departments = get_all_data_from_fk_table("planday_departments")
    for item in planday_departments:
        ou_code_planday = item['department_name'].split()[-1]
        if ou_code_planday == ou_code_input:
            return item['department_id']
        
def get_employee_group_id(department_name):
    """
    Retrieves the unique group ID associated with a specified department name from a predefined list of employee groups.

    This function extracts the last word from the given department name, treating it as a unique identifier code.
    It then searches through a dataset of employee groups for a matching code. If a match is found, the associated
    group ID is returned.

    Parameters:
    - department_name (str): The name of the department, where the last word is expected to be the unique identifier code.

    Returns:
    - int or None: The group ID associated with the provided department name if found, otherwise None.

    Raises:
    - KeyError: If 'name' or 'groupId' keys are missing in any of the dictionaries within the list returned by
      `get_all_data_from_fk_table`.

    Note:
    - The `get_all_data_from_fk_table` function is assumed to access a database or some form of persistent storage
      to retrieve a list of dictionaries, each representing an employee group with at least 'name' and 'groupId' keys.
    - This function does not handle multiple matches; it returns the group ID for the first match found.

    Example Usage:
    >>> get_employee_group_id("Sales Division X200")
    500  # Assuming '500' is the group ID for "Sales Division X200"

    This function is particularly useful for systems where department or group identifiers are embedded in
    descriptive text fields and consistency in naming conventions is maintained.
    """
    ou_code_input = department_name.split()[-1]
    planday_groups = get_all_data_from_fk_table("planday_employee_group")
    for item in planday_groups:
        ou_code_planday = item['name'].split()[-1]
        if ou_code_planday == ou_code_input:
            return item['groupId']
    return None
            

def create_planday_acc(firstname, lastname, email, department, phone, prefix, address, postal_code,location, gender_id, country, birthDate):
    """
    Creates a new employee account on the Planday platform using provided personal and departmental details.

    This function compiles an employee's information into a formatted request to the Planday API. It handles the
    assembly of complex objects like addresses and phone numbers, resolves department and group IDs, and posts
    this data to the Planday HR endpoint to create an employee record.

    Parameters:
    - firstname (str): The first name of the employee.
    - lastname (str): The last name of the employee.
    - email (str): The email address of the employee.
    - department (str): The department ID used to fetch the department's name and unique identifier.
    - phone (str): The employee's cell phone number.
    - prefix (str): The dial code prefix for the employee's phone number.
    - address (str): The street address of the employee.
    - postal_code (str): The postal code of the employee's address.
    - location (str): The city or locality of the employee's address.
    - salutation_id (int): The ID used to fetch and determine the salutation and gender of the employee.
    - country (str): The country of the employee's address.
    - birthDate (str): The birth date of the employee in ISO 8601 format (YYYY-MM-DD).

    Returns:
    - int or None: Returns the unique Planday employee ID if the creation is successful, otherwise None.

    Raises:
    - requests.exceptions.RequestException: If the HTTP request to the Planday API fails.
    - KeyError: If expected keys are missing from the API response.

    Example Usage:
    >>> create_planday_acc("John", "Doe", "john.doe@example.com", "5", "555-1234", "+1", "123 Elm St",
                           "90210", "Springfield", 1, "USA", "1980-01-01")
    12345  # Assuming the Planday API returns an employee ID of 12345

    Note:
    - This function directly interacts with external services (Planday API) and expects certain helper functions
      to provide necessary preprocessing of input data (e.g., `get_department_id`, `get_gender`).
    - The function assumes that all helper functions are implemented correctly and available in the scope.
    - It is assumed that the API keys and client IDs are correctly configured in the environment.
    """
    street1 = f"{address}, {postal_code} {location}, {country}"
    gender = get_gender(gender_id)
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    department_name = get_value_by_id("users_region", "id",department, "region")
    department_id = get_department_id(department_name)
    print(department_name, department_id)
    employeeGroup_id = get_employee_group_id(department_name)
    phone_country_code = get_country_code_by_dial_code(prefix)
    val = {
        "firstName": firstname,
        "lastName": lastname,
        "userName": email,
        "departments": [department_id],
        "employeeGroups": [employeeGroup_id],
        "cellPhone": phone,
        "cellPhoneCountryId": prefix[1:],
        "cellPhoneCountryCode": phone_country_code,
        "gender": gender,
        "street1": street1,
        "birthDate": birthDate
        }
    response = requests.post('https://openapi.planday.com/hr/v1.0/employees', headers=planday_headers, json=val)
    data = response.json()
    print(data)
    if response.status_code == 200:
        
        return data['data']['id']
    asyncio.run(send_msg_telegram(lara_bot_id, f"Planday acc creation fail: {response.text} | Talentsphere", onduty_it))
    return None

def deactivate_planday_acc(employee_id, reason="Gekündigt", keep_shifts=True, date=None):
    """
    Deactivates an employee account on the Planday platform using the provided employee ID.
    
    Sends a PUT request with required JSON payload.

    Parameters:
    - employee_id (int): The employee's ID.
    - reason (str): Reason for deactivation.
    - keep_shifts (bool): Whether to keep the employee's existing shifts.
    - date (str): Deactivation date in YYYY-MM-DD format. Defaults to today.

    Returns:
    - bool: True if successful (204), False otherwise.
    """
    
    if date is None:
        date = datetime.today().strftime('%Y-%m-%d')

    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    payload = {
        "date": date,
        "reason": reason,
        "keepShifts": keep_shifts
    }

    url = f'https://openapi.planday.com/hr/v1.0/employees/deactivate/{employee_id}'
    
    try:
        response = requests.put(url, headers=planday_headers, json=payload)
        print(response.status_code, response.text)
        if response.status_code == 204:
            return True
        else:
            return False

    except requests.RequestException as e:
        return False


def move_planday_acc_DNF(employee_id):

    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    payload = {
        'employeeGroups': [272744]
    }

    url = f'https://openapi.planday.com/hr/v1.0/employees/{employee_id}'
    
    try:
        response = requests.put(url, headers=planday_headers, json=payload)
        print(response.status_code, response.text)
        if response.status_code == 204:
            return True
        else:
            return False

    except requests.RequestException as e:
        return False

def reactivate_planday_acc(employee_id, department):
    """
    Reactivates an employee account on the Planday platform using the provided employee ID
    and assigns them to the given department.
    
    Parameters:
    - employee_id (int): The employee's ID.
    - department (int): The internal region ID which maps to a Planday department.
    
    Returns:
    - bool: True if successful (204), False otherwise.
    """

    # Ermittelt den Planday-Abteilungsnamen und die externe Abteilungs-ID
    department_name = get_value_by_id("users_region", "id", department, "region")
    department_id = get_department_id(department_name)  # muss die Planday-ID zurückgeben!

    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    url = f'https://openapi.planday.com/hr/v1.0/employees/reactivate/{employee_id}'

    payload = {
        "departments": [department_id]  # ⚠️ Name des Feldes muss exakt so heißen, wie die API erwartet
    }

    try:
        response = requests.put(
            url,
            headers={**planday_headers, "Content-Type": "application/json"},
            json=payload
        )
        if response.status_code == 200:
            return True
        else:
            return False

    except requests.RequestException as e:
        print(f"Request failed: {e}")
        raise


def get_country_dialing_codes():
    unique_codes = {}
    countries = []

    for c in pycountry.countries:
        rc = getattr(c, "alpha_2", None)
        if not rc: 
            continue
        cc = phonenumbers.country_code_for_region(rc)
        if cc > 0:
            dial = f"+{cc}"
            # nur einen Vertreter pro Code merken
            unique_codes.setdefault(dial, rc)
            countries.append({"country": c.name, "region_code": rc, "dial_code": dial})

    # Kosovo
    unique_codes.setdefault("+383", "XK")
    if not any(x["region_code"] == "XK" for x in countries):
        countries.append({"country": "Kosovo", "region_code": "XK", "dial_code": "+383"})

    # Ausgaben sortieren
    country_dial_codes_unique = [
        {"dial_code": d, "region_code": r} 
        for d, r in sorted(unique_codes.items(), key=lambda x: int(x[0][1:]))
    ]
    countries.sort(key=lambda x: x["country"])

    return country_dial_codes_unique, countries

def get_country_code_by_dial_code(dial_code):
    """
    Converts a telephone dialing code to its corresponding ISO country code.

    This function normalizes the input dial code to ensure it starts with a '+', and then attempts to map this
    dialing code to a country's ISO code using the country's numeric code. The conversion relies on parsing
    the dial code into a numeric country code and then finding the corresponding ISO country code.

    Parameters:
    - dial_code (str): The international telephone dialing prefix for a country, which may or may not start with '+'.

    Returns:
    - str or None: The ISO country code associated with the given dial code if found; otherwise, None.

    Raises:
    - ValueError: If the conversion of the dial code to an integer fails or if the `region_code_for_country_code` does not
      recognize the numeric country code.

    Example Usage:
    >>> get_country_code_by_dial_code('+1')
    'US'  # Assuming '+1' corresponds to the United States

    Notes:
    - The function prints an error message and returns None if it encounters an error during the process,
      including improper input formats or unrecognized dial codes.
    - It assumes the presence of a `region_code_for_country_code` function or similar utility that maps numeric
      country codes to their ISO country codes.

    This function is particularly useful for applications that need to associate telephone numbers with countries,
    such as in telecommunications software or geographic data processing.
    """

    # Normalize dial code to ensure it starts with '+'
    if not dial_code.startswith('+'):
        dial_code = '+' + dial_code
    try:
        # Convert the dial code to an integer country code
        country_code = int(dial_code[1:])
        # Get the region code (country ISO code) from the country code
        region_code = region_code_for_country_code(country_code)
        return region_code
    except Exception as e:
        return None

def get_gender(salutation):
    """
    Determines the gender based on the provided salutation.

    This function compares the given salutation to common gender-specific titles used in French ("Monsieur") and
    German ("Herr") to determine the gender. If the salutation matches these male titles, it returns 'Male'.
    Otherwise, it assumes the gender is 'Female'.

    Parameters:
    - salutation (str): The salutation or title used to address an individual, typically indicating their gender.

    Returns:
    - str: 'Male' if the salutation indicates a male individual, 'Female' otherwise.

    Example Usage:
    >>> get_gender("Monsieur")
    'Male'
    >>> get_gender("Frau")
    'Female'

    Note:
    - This function currently supports a limited range of salutations and assumes any non-specified salutations are female.
      Additional salutations can be added to expand coverage or improve accuracy.
    - The function is designed with specific cultural norms in mind and might not be applicable globally without modifications.
    """

    if 1 == salutation:
        return "Male"
    else:
        return "Female"

def compare_detailed_changes_administration(data_dict):
    """
    Analyzes changes between 'content_old' and 'content_new' within the provided data dictionary,
    identifying differences in dictionary-formatted string values.

    This function processes a list of rows contained in `data_dict`. Each row is expected to have
    two entries: 'content_old' and 'content_new', which are strings that represent dictionaries.
    It compares these dictionary entries for each row, extracts differences, and updates the rows
    with dictionaries reflecting changes in values for overlapping keys.

    Parameters:
    - data_dict (dict): A dictionary containing keys 'data' and 'columns'.
        'data' should be a list of lists where each inner list represents a row of data.
        'columns' should be a list of column names indicating where 'content_old' and 'content_new' are positioned.

    Returns:
    - dict: The same `data_dict` input dictionary where the 'content_old' and 'content_new' fields in each row
            have been replaced with dictionaries only containing keys that had different values between old and new.

    Raises:
    - IndexError: If 'content_old' or 'content_new' are not found in the 'columns' list.
    - SyntaxError, ValueError: These exceptions may be raised and caught within the helper function `load_dict`
      due to improper formatting or content in the string representations of the dictionaries.

    Example Usage:
    >>> data = {
        'columns': ['id', 'content_old', 'content_new'],
        'data': [
            [1, "{ 'a': 1, 'b': 2 }", "{ 'a': 1, 'b': 3 }"]
        ]
    }
    >>> compare_detailed_changes(data)
    {'columns': ['id', 'content_old', 'content_new'], 'data': [[1, {}, {'b': 3}]]}

    Note:
    - The function assumes the string representations in 'content_old' and 'content_new' are valid Python
      dictionaries expressed as strings. Errors in formatting can lead to empty dictionaries being processed.
    """

    def load_dict(data):
              # Preprocess to handle dates and numeric values
        try:
            return ast.literal_eval(data)
        except SyntaxError as e:
            return {}
        except ValueError as e:
            return {}

    # Process each row for differences
    for row in data_dict['data']:
        index_old = data_dict['columns'].index('content_old')
        index_new = data_dict['columns'].index('content_new')
        
        content_old = load_dict(row[index_old])
        content_new = load_dict(row[index_new])
        
        differences_old = {}
        differences_new = {}

        common_keys = set(content_old.keys()).intersection(content_new.keys())
        for key in common_keys:
            old_value = content_old.get(key)
            new_value = content_new.get(key)
            if old_value != new_value:
                differences_old[key] = old_value
                differences_new[key] = new_value

        row[index_old] = differences_old if differences_old else {}
        row[index_new] = differences_new if differences_new else {}

    return data_dict


def compare_detailed_changes_users(data_list):
    """
    Compares old and new content in a list of user data entries to identify changes, additions, and deletions,
    and marks each entry accordingly.

    Process:
        - Defines a helper function `compare_content` that:
            - Compares `content_old` and `content_new` for each row to identify differences.
            - Stores differences in separate dictionaries, `differences_old` and `differences_new`.
            - Flags new keys in `content_new` that do not exist in `content_old`.
            - Extracts the `operation` field to track whether the entry was inserted, updated, or deleted.
        - Iterates through each user, partner, children, and file data entry in `data_list`:
            - Compares the content of each entry and marks entries as added or deleted based on the operation.
            - Adds metadata like deletion reasons, identifiers, and other relevant attributes.

    Args:
        - data_list (list of dicts): List of dictionaries where each dictionary contains user data, including old and new
          content, operations, and other metadata.

    Returns:
        - list of dicts: The modified `data_list` with additional keys indicating added or deleted entries,
          and populated with old/new content differences.

    Example Usage:
        - Use this function to track changes across user data, showing added, deleted, or modified records.

    Notes:
        - Assumes data entries contain fields like `content_old`, `content_new`, and `operation` for comparisons.
        - Handles cases where some data fields are missing or added between old and new content.

    """

    def compare_content(row):
        """
        Compares `content_old` and `content_new` within a row to identify field changes, tracking differences
        and storing them in `differences_old` and `differences_new`.

        Process:
            - Retrieves shared keys between old and new content, ignoring `operation`.
            - Checks for differences in each common key and stores them.
            - Flags any new keys in `content_new` that are not in `content_old`.
            - Extracts `operation` from either old or new content.

        Args:
            - row (dict): A dictionary containing `content_old` and `content_new`.

        Returns:
            - dict: The modified row with differences and operation flags.
        """

        # Retrieve old and new content, initializing differences
        content_old = row.get('content_old', {})
        content_new = row.get('content_new', {})
        differences_old = {}
        differences_new = {}

        # Identify common keys, ignoring 'operation'
        common_keys = set(content_old.keys()).intersection(content_new.keys()).difference({'operation'})

        # Check each common key for differences
        for key in common_keys:
            old_value = content_old.get(key)
            new_value = content_new.get(key)
            if old_value != new_value:
                differences_old[key] = old_value
                differences_new[key] = new_value

        # Retrieve and store 'operation' from content
        operation = content_new.get('operation', content_old.get('operation', None))
        row['operation'] = operation
        row['content_old'] = differences_old if differences_old else {}
        row['content_new'] = differences_new if differences_new else {}

        # Flag new keys in content_new not present in content_old
        for key in content_new.keys():
            if key not in content_old:
                differences_new[key] = content_new[key]

        return row

    # Iterate over each entry in the data list
    for entry in data_list:
        # Process 'user_data' entries
        for user_row in entry.get('user_data', []):
            compare_content(user_row)

        # Process 'partner_data' entries with additional metadata for deletion
        for partner_row in entry.get('partner_data', []):
            delete_reason = None
            if 'partner_delete_reason' in partner_row['content_new']:
                delete_reason = partner_row['content_new']['partner_delete_reason']
            compare_content(partner_row)

            # Initialize deleted and added flags
            partner_row['deleted'] = False
            partner_row['added'] = False

            # Handle deletion and addition based on operation
            operation = partner_row['operation']
            if operation == 'delete':
                partner_row['deleted'] = True
                partner_row['delete_reason'] = delete_reason
            if operation != 'delete':
                partner_row['content_new'].pop('partner_delete_reason', None)
            if operation == 'insert':
                partner_row['added'] = True
            partner_row['content_new'].pop('operation', None)

        # Process 'children_data' entries, adding identifiers and deletion/addition flags
        for child_row in entry.get('children_data', []):
            first_name = child_row['content_old'].get('first_name') or child_row['content_new'].get('first_name')
            child_row['content_new'].pop('UserId', None)  # Remove irrelevant UserId field
            compare_content(child_row)

            # Mark the child as deleted or added based on the operation type
            if child_row['operation'] == 'delete':
                child_row['deleted'] = True
            elif child_row['operation'] == 'insert':
                child_row['added'] = True
            else:
                child_row['deleted'] = False
                child_row['added'] = False

            # Assign identifier to the child based on their first name
            if first_name:
                child_row['identifier'] = first_name

            child_row['content_new'].pop('operation', None)

        # Process 'files_data' entries, tracking addition or deletion
        for files_row in entry.get('files_data', []):
            # Retrieve file name from old or new content
            files_row['name'] = files_row['content_old'].get('name') or files_row['content_new'].get('name')
            compare_content(files_row)

            # Determine if the file was deleted or added based on content presence
            if not files_row['content_new'] and files_row['content_old']:
                files_row['deleted'] = True
            elif not files_row['content_old'] and files_row['content_new']:
                files_row['added'] = True
            else:
                files_row['deleted'] = False
                files_row['added'] = False

            # Explicit check for delete or insert operation
            if files_row['operation'] == 'delete':
                files_row['deleted'] = True
            elif files_row['operation'] == 'insert':
                files_row['added'] = True
            else:
                files_row['deleted'] = False
                files_row['added'] = False
                
            files_row['content_new'].pop('operation', None)

    # Return the updated data list with flagged changes
    return data_list


def parse_content(content_str):
    """Helper function to parse content strings into dictionaries."""
    try:
        return ast.literal_eval(content_str)
    except (SyntaxError, ValueError):
        return {}
    
def convert_raw_rows_to_structure(rows):
    """
    Converts a list of raw database rows into a structured format with user, partner, children, and file data,
    tracking changes between old and new content.

    Process:
        - Initializes a structured dictionary for each row, parsing details such as user ID, editor, timestamp, and operation.
        - Separates each row's old and new content for user, partner, children, and file data.
        - Handles cases where data has been added or removed, ensuring unmatched items are included.
        - Appends the processed structure to a list representing all data.

    Args:
        - rows (list of lists): A list of rows, where each row contains:
            - id (int): Primary identifier.
            - user_id (str): User identifier.
            - content_old (str): Old content state (to be parsed).
            - content_new (str): New content state (to be parsed).
            - func (str): Operation performed.
            - editor (str): Username of the editor who made changes.
            - ts (datetime or str): Timestamp of the change, converted to a formatted string if datetime.

    Returns:
        - list of dicts: Each dictionary represents a row, structured with changes tracked for each type of data.

    Example Usage:
        - Convert raw rows to a structured format for use in tracking change history, showing additions, updates, or deletions.

    Notes:
        - Assumes helper functions `parse_content` to parse JSON-like content and `convert_to_user_data_format` for content formatting.
        - Handles changes for each data category separately, accommodating different lengths in old vs. new data lists.
    """

    all_data = []

    # Iterate through each row
    for row in rows:
        # Initialize row_data with keys for basic information and empty lists for data categories
        row_data = {
            'id': row[0],            # First column is the id
            'user_id': row[1],        # Second column is the user_id
            'func': row[4],           # The 5th column is func (operation performed)
            'editor': row[5],         # The 6th column is editor (who made the changes)
            'ts': row[6].strftime('%Y-%m-%d %H:%M:%S') if isinstance(row[6], datetime) else row[6],  # Convert ts to string if it's a datetime
            'user_data': [],          # Initialize user_data list
            'partner_data': [],       # Initialize partner_data list
            'children_data': [],      # Initialize children_data list
            'files_data': []          # Initialize files_data list
        }

        # Extract and parse content_old and content_new (3rd and 4th columns)
        content_old = parse_content(row[2])  # row[2] is 'content_old'
        content_new = parse_content(row[3])  # row[3] is 'content_new'

        # Format parsed content into user_data, partner_data, children_data, and files_data
        content_old = convert_to_user_data_format(content_old)
        content_new = convert_to_user_data_format(content_new)

        # Separate content for each data category
        old_user_data = content_old.get('user_data', [])
        new_user_data = content_new.get('user_data', [])
        old_partner_data = content_old.get('partner_data', [])
        new_partner_data = content_new.get('partner_data', [])
        old_children_data = content_old.get('children_data', [])
        new_children_data = content_new.get('children_data', [])
        old_files_data = content_old.get('files_data', [])
        new_files_data = content_new.get('files_data', [])

        # Process user_data entries, handling changes in length
        for old_entry, new_entry in zip(old_user_data, new_user_data):
            row_data['user_data'].append({
                'content_old': old_entry,
                'content_new': new_entry
            })

        # Handle added or removed users in user_data
        if len(new_user_data) > len(old_user_data):
            for new_entry in new_user_data[len(old_user_data):]:  # Added entries
                row_data['user_data'].append({
                    'content_old': {},
                    'content_new': new_entry
                })
        elif len(old_user_data) > len(new_user_data):
            for old_entry in old_user_data[len(new_user_data):]:  # Removed entries
                row_data['user_data'].append({
                    'content_old': old_entry,
                    'content_new': {}
                })

        # Process partner_data entries, handling changes in length
        for old_entry, new_entry in zip(old_partner_data, new_partner_data):
            row_data['partner_data'].append({
                'content_old': old_entry,
                'content_new': new_entry
            })

        # Handle added or removed partners in partner_data
        if len(new_partner_data) > len(old_partner_data):
            for new_entry in new_partner_data[len(old_partner_data):]:  # Added partners
                row_data['partner_data'].append({
                    'content_old': {},
                    'content_new': new_entry
                })
        elif len(old_partner_data) > len(new_partner_data):
            for old_entry in old_partner_data[len(new_partner_data):]:  # Removed partners
                row_data['partner_data'].append({
                    'content_old': old_entry,
                    'content_new': {}
                })

        # Process children_data entries, handling changes in length
        for old_entry, new_entry in zip(old_children_data, new_children_data):
            row_data['children_data'].append({
                'content_old': old_entry,
                'content_new': new_entry
            })

        # Handle added or removed children in children_data
        if len(new_children_data) > len(old_children_data):
            for new_entry in new_children_data[len(old_children_data):]:  # Added children
                row_data['children_data'].append({
                    'content_old': {},
                    'content_new': new_entry
                })
        elif len(old_children_data) > len(new_children_data):
            for old_entry in old_children_data[len(new_children_data):]:  # Removed children
                row_data['children_data'].append({
                    'content_old': old_entry,
                    'content_new': {}
                })

        # Process files_data entries, handling changes in length
        for old_entry, new_entry in zip(old_files_data, new_files_data):
            row_data['files_data'].append({
                'content_old': old_entry,
                'content_new': new_entry
            })

        # Handle added or removed files in files_data
        if len(new_files_data) > len(old_files_data):
            for new_entry in new_files_data[len(old_files_data):]:  # Added files
                row_data['files_data'].append({
                    'content_old': {},
                    'content_new': new_entry
                })
        elif len(old_files_data) > len(new_files_data):
            for old_entry in old_files_data[len(new_files_data):]:  # Removed files
                row_data['files_data'].append({
                    'content_old': old_entry,
                    'content_new': {}
                })

        # Append the processed row_data to the overall list
        all_data.append(row_data)

    return all_data


def convert_to_user_data_format(json_data):
    """
    Converts a flat dictionary to a structure with 'user_data', 'children_data', and 'partner_data'
    if they don't already exist. If they already exist, it leaves the structure unchanged.
    
    If 'user_data', 'children_data', or 'partner_data' fields are missing, it wraps the original data 
    into 'user_data' and sets 'children_data' and 'partner_data' as empty lists.
    """
    if isinstance(json_data, str):
        json_data_def = ast.literal_eval(json_data)
    else:
        json_data_def = json_data
    if 'user_data' not in json_data:
        # Convert the flat dictionary into 'user_data' format

        return {
            'user_data': [json_data_def],  # Wrap the existing data into 'user_data'
            'children_data': [],       # Set empty list for 'children_data'
            'partner_data': []         # Set empty list for 'partner_data'
        }
    
    else:
        # Return as is if the structure already contains 'user_data', 'children_data', or 'partner_data'
        return json_data_def
    
def get_fk_data_from_dict(data):
    """
    Replaces foreign key identifiers in a given dictionary with their corresponding descriptive data from
    database tables.

    This function examines specified keys in the input dictionary that are considered foreign keys. For each
    foreign key, it fetches the actual descriptive data from a database table using a helper function
    `get_data_from_fk_table`. It then replaces the numeric foreign key ID in the dictionary with the fetched
    descriptive data.

    Parameters:
    - data (dict): A dictionary containing key-value pairs, where the keys include foreign key references that
                   need to be resolved to more descriptive data.

    Returns:
    - dict: The modified dictionary where foreign key values have been replaced with their corresponding
            descriptive data.

    Raises:
    - KeyError: If a key expected to contain a foreign key ID does not exist in the dictionary.
    - ValueError: If there are issues converting data types during comparisons or data fetching.

    Example Usage:
    >>> data_dict = {
        'approval': '1',
        'civil_status': '2',
        'fleet': '3'
    }
    >>> updated_data = get_fk_data_from_dict(data_dict)
    >>> print(updated_data)
    {'approval': 'Approved', 'civil_status': 'Married', 'fleet': 'FleetName'}

    Notes:
    - The `get_data_from_fk_table` function is assumed to return a list of dictionaries, each containing an 'id' and a
      'description' key. The function uses these keys to replace the foreign key IDs in the `data` dictionary.
    - This function only updates the dictionary if the foreign key is present and a match is found; otherwise, the
      original values remain unchanged.
    - The table name is generated by prefixing 'users_' to the key, except for 'salutation', which maps to 'salution' in
      the database.

    This function is useful for resolving foreign keys to human-readable information before presenting data in
    user interfaces or reports.
    """
    
    # List of keys which are foreign keys needing to be replaced with actual data from a database table
    key_list = ['approval', 'civil_status', 'fleet', 'region', 'salutation', 'status_application', 'status_work', 'typ', 'gender', 'partner_approval']

    # Loop over each key and fetch the corresponding data using get_data_from_fk_table
    for key in key_list:        
        # Fetch the data using the function get_data_from_fk_table
        # 'users_' + key ensures the function targets the right table
        # data[key] is the foreign key ID that needs to be replaced
        if key == 'salutation':
            key_db = 'salution'
        else:
            key_db = key

        if key == 'partner_approval':
            real_data = get_data_from_fk_table('users_approval', 1, True)
        else:
            real_data = get_data_from_fk_table('users_' + key_db, 1, True)

        # Replace the FK ID in the original dictionary with the fetched data
        for real in real_data:
            if key in data:
                if str(data[key]) == str(real['id']):
                    data[key] = real['description']
                    continue
    return data

def process_fk_data_for_list(data_list):

    """
    Processes a list of dictionaries containing 'user_data' and 'children_data', replacing foreign key identifiers
    in 'content_old' and 'content_new' with descriptive data using get_fk_data_from_dict.
    
    Args:
    - data_list (list): A list of dictionaries, where each dictionary contains 'user_data' and 'children_data',
      and each data item has 'content_old' and 'content_new' keys.

    Returns:
    - list: The processed list with foreign key IDs replaced by descriptive data in 'content_old' and 'content_new'.
    """

    # Iterate through each entry in the data list
    for entry in data_list:
        # Process 'user_data'
        for user_row in entry.get('user_data', []):
            user_row['content_old'] = get_fk_data_from_dict(user_row['content_old'])
            user_row['content_new'] = get_fk_data_from_dict(user_row['content_new'])

        for user_row in entry.get('partner_data', []):
            user_row['content_old'] = get_fk_data_from_dict(user_row['content_old'])
            user_row['content_new'] = get_fk_data_from_dict(user_row['content_new'])

        # Process 'children_data' (if needed, depending on the use case)
        for child_row in entry.get('children_data', []):
            child_row['content_old'] = get_fk_data_from_dict(child_row['content_old'])
            child_row['content_new'] = get_fk_data_from_dict(child_row['content_new'])

    return data_list


def get_last_month_number():
    # Aktuelles Datum und Uhrzeit erhalten
    current_date = datetime.now()

    # Den ersten Tag des aktuellen Monats ermitteln
    first_day_of_current_month = current_date.replace(day=1)

    # Einen Tag vom ersten Tag des aktuellen Monats abziehen, um den letzten Tag des letzten Monats zu erhalten
    last_day_of_last_month = first_day_of_current_month - timedelta(days=1)

    # Monat des letzten Tages des letzten Monats
    last_month = last_day_of_last_month.month

    return last_month

def get_current_year():
    # Aktuelles Datum und Uhrzeit erhalten
    current_date = datetime.now()

    # Jahr aus dem aktuellen Datum extrahieren
    current_year = current_date.year

    return current_year

def extract_children_data(form_data):
    children = []
    # Iterate over the form data to extract child-related fields
    for key, value in form_data.items():
        if key.startswith('child'):
            # Extract the child index (e.g., 'childFirstname0' -> 0)
            field = key[5:-1]  # Extract 'Firstname', 'Lastname', etc.
            index = int(key[-1])  # Extract '0', '1', etc.

            if field == 'Firstname':
                field = 'first_name'
            elif field == 'Lastname':
                field = 'last_name'
            elif field == 'Gender':
                field = 'gender'
            elif field == 'Birthdate':
                field = 'birth_date'
            elif field == 'Allowances':
                field = 'child_allowances'
            elif field == 'Note':
                field = 'note'
            
            # Ensure the list has a dictionary for each child
            while len(children) <= index:
                # Check if childUserIdX exists for the current index
                user_id_key = f'childUserId{index}'
                if user_id_key in form_data:
                    # Use the existing childUserId if present
                    children.append({'user_id': form_data[user_id_key]})
                else:
                    # Generate a new child_id if childUserIdX is not present
                    children.append({'user_id': generate_uid()})
            
            # Add the field to the corresponding child's dictionary
            children[index][field] = value

            # Handle the delete checkbox
            delete_key = f'childDelete{index}'
            children[index]['delete'] = delete_key in form_data  # True if the key exists, False otherwise
    for child in children:
        if 'child_allowances' not in child:
            child['child_allowances'] = 0  # Set default value to 0 if not present
        else:
            child['child_allowances'] = 1

    return children

def add_user_id(data_dict):
    """
    Adds a unique user ID to the provided data dictionary.

    Process:
        - Generates a unique identifier for the user by calling `generate_uid()`.
        - Adds this generated ID to the `data_dict` under the key 'user_id'.
        - Returns the modified dictionary with the added 'user_id'.

    Args:
        - data_dict (dict): The dictionary containing user data that will be augmented with a new 'user_id'.

    Returns:
        - dict: The input dictionary with an additional key-value pair:
            - 'user_id' (str): The unique identifier generated for the user.

    Example Usage:
        - Use this function to add a unique 'user_id' to user data before saving it to a database or performing other operations.

    Notes:
        - This function assumes the existence of `generate_uid`, a function that generates unique IDs.
    """

    # Generate a unique user ID and add it to the dictionary
    data_dict['user_id'] = generate_uid()
    
    # Return the updated dictionary with 'user_id'
    return data_dict



def update_childrens(childrens_data, user_id):
    """
    Updates information for each child associated with a user, determining whether to insert, update, or delete
    child data based on the provided input.

    Process:
        - Retrieves all existing user IDs to check if each child ID already exists.
        - For each child entry in `childrens_data`:
            - If the child ID exists, updates the child’s data.
            - If the child ID does not exist, inserts new child data with a unique ID.
            - If a 'delete' flag is set to True, deletes the child data.
        - Records the operation performed for each child ('insert', 'update', or 'delete') and stores it in a list.

    Args:
        - childrens_data (list of dict): List of dictionaries containing data about each child, including 'user_id' and 'delete' flags.
        - user_id (str): The unique identifier of the user to whom the children are associated.

    Returns:
        - list of dicts: A list where each dictionary contains:
            - 'user_id': The ID of the child processed.
            - 'operation': The operation performed ('insert', 'update', 'delete') for that child.

    Example Usage:
        - When updating children information for a user, this function decides for each child whether to insert, update,
          or delete the data and returns a summary of the actions taken.

    Notes:
        - Assumes the existence of helper functions like `get_all_user_id`, `delete_child_data`, `update_child_data`,
          `generate_uid`, and `insert_child_data` for performing database operations.
    """

    # Retrieve all existing user IDs to verify if each child already exists
    user_ids = get_all_user_id()
    
    # Initialize a list to store operation results for each child
    results = []
    
    # Process each child entry in `childrens_data`
    for child_data in childrens_data:
        # Initialize a dictionary to record the operation result for the current child
        operation_result = {'user_id': child_data.get('user_id'), 'operation': None}
        
        # Check if the child's user_id exists in the system
        if child_data['user_id'] in user_ids:
            # Update existing child data if the child ID is found
            update_child_data(child_data)
            operation_result['operation'] = 'update'
        
        else:
            # Insert new child data if the child ID is not found
            child_id = generate_uid()
            insert_child_data(child_data, user_id, child_id)
            operation_result['operation'] = 'insert'
        
        # Check if the delete flag is set to True; if so, delete the child record
        if child_data.get("delete", False) == True:
            delete_child_data(child_data['user_id'])
            operation_result['operation'] = 'delete'
        
        # Append the operation result for this child to the results list
        results.append(operation_result)
    
    # Return the list of operation results for all processed children
    return results

def update_partner(partner_data, user_id):
    """
    Updates partner information for a user, determining whether to insert, update, or delete
    partner data based on the provided input.

    Process:
        - Retrieves all existing user IDs to check if the specified partner ID already exists.
        - Checks the `partner_delete` flag in `partner_data` to determine if the partner should be deleted.
        - If the partner exists and is not marked for deletion, updates their information.
        - If the partner ID is new, generates a unique ID and inserts the new partner data.
        - Records the operation performed ('insert', 'update', or 'delete') for audit purposes.

    Args:
        - partner_data (dict): Dictionary containing data about the partner, including 'partner_id' and 'partner_delete'.
        - user_id (str): The unique identifier of the user to whom the partner is associated.

    Returns:
        - dict: A dictionary containing:
            - 'user_id': The partner ID or None if not provided.
            - 'operation': The operation performed ('insert', 'update', 'delete', or None if no operation was executed).

    Example Usage:
        - When partner information is updated, this function decides whether to insert, update, or delete the
          partner data and returns a summary of the action taken.

    Notes:
        - Assumes the existence of helper functions like `get_all_user_id`, `delete_partner_data`, `update_partner_data`,
          `generate_uid`, and `insert_partner_db` for database operations.
    """

    # Retrieve all existing user IDs to verify if the partner already exists
    user_ids = get_all_user_id()

    clean_user_ids = {u for u in (user_ids or []) if u is not None}

    
    # Initialize a dictionary to record the operation result
    operation_result = {'user_id': partner_data.get('partner_id', None), 'operation': None}
    # Check if 'partner_delete' flag is set to "yes" in partner_data
    if 'partner_delete' in partner_data and partner_data['partner_delete'] == "yes":
        # Delete the partner's data if 'partner_delete' is set to "yes"
        delete_partner_data(partner_data['partner_id'])
        operation_result['operation'] = 'delete'

    # Check if the partner ID exists in user IDs and 'partner_delete' is set to "no"
    elif partner_data.get('partner_id', None) in clean_user_ids and 'partner_delete' in partner_data and partner_data['partner_delete'] == "no":
        # If the partner exists, update their data
        update_partner_data(partner_data)
        operation_result['operation'] = 'update'

    # If partner ID is not in user_ids, assume a new partner and insert data
    elif partner_data.get('partner_id', None) not in clean_user_ids:
        # Generate a unique partner ID and insert new partner data
        partner_id = generate_uid()
        insert_partner_db(partner_data, user_id, partner_id)
        operation_result['operation'] = 'insert'
    # Return the result of the operation
    return operation_result

def inform_jan_bc_user_new_planday(user_name, approval_id):
    """
    Generates a message for notifying Jan about a new Planday account creation for a user, including 
    the relevant approval description if found.

    Process:
        - Retrieves a list of approval descriptions from the 'users_approval' foreign key table.
        - Searches the approval list to find a description matching `approval_id`.
        - Constructs a message for Jan containing the user’s name and relevant approval description.
        - If no matching approval description is found, constructs a message indicating that no approval was located.

    Args:
        - user_name (str): The name of the user for whom a Planday account was created.
        - approval_id (str or int): The ID of the approval related to the user's Planday account creation.

    Returns:
        - str: A formatted message for notifying Jan about the new Planday account and its approval status.

    Example Usage:
        - When a new Planday account is created, call this function to generate a message notifying Jan, including
          approval details if available.

    Notes:
        - This function assumes `get_data_from_fk_table` retrieves a list of approvals from a foreign key table
          with details including 'id' and 'description'.
        - The message includes a link to Jan's Telegram profile using his Telegram user ID (6391610367).
    """

    # Retrieve a list of approval descriptions from the 'users_approval' foreign key table
    approval_list = get_data_from_fk_table('users_approval', 1, True)
    
    # Search for the description that matches the provided approval_id
    approval = [x['description'] for x in approval_list if str(x.get('id')) == str(approval_id)]

    # Construct the message based on whether an approval description was found
    if approval and approval[0]:  # Check if a description was found and is not None
        text = (f'Hallo <a href="tg://user?id=6391610367">Jan</a>, '
                f'für den Mitarbeiter {user_name} wurde ein Planday-Account mit der Genehmigung "{approval[0]}" erstellt. '
                'Bitte überprüfen!')
    else:
        # Message if no matching approval was found
        text = (f'Hallo <a href="tg://user?id=6391610367">Jan</a>, '
                f'für den Mitarbeiter {user_name} wurde ein Planday-Account erstellt, aber es wurde keine passende Genehmigung gefunden. '
                'Bitte überprüfen!')
    
    # Return the constructed message
    return text


def convert_to_log_structure(user_data, children_data, children_operations, partner_data, partner_operation, file_list):

    """
    Converts the given user and children data into the required structure with 'user_data' and 'child_data' keys.

    Args:
    - user_data (dict): Dictionary representing the main user data.
    - children_data (list[dict]): List of dictionaries representing the children data.
    - children_operations (list[dict]): List of dictionaries representing operations for each child.

    Returns:
    - dict: A dictionary with 'user_data' (list of user details) and 'children_data' (list of children details), 
            including operation details for each child.
    """

    # Prepare user data, as a list of one dictionary
    formatted_user_data = [user_data]

    # Prepare children data with operation details
    formatted_children_data = []
    for child in children_data:
        # Remove the 'delete' key from each child data
        child = {k: v for k, v in child.items() if k.lower() != 'delete'}
        # Find the corresponding operation for the child based on user_id
        child_operation = next((op['operation'] for op in children_operations if op['user_id'] == child.get('user_id')), None)
        
        # Add the operation to the child data
        child['operation'] = child_operation
        
        # Append the child data to the final children data list
        formatted_children_data.append(child)

    formatted_partner_data = []
    if partner_data:
        if not partner_operation:
            partner_operation = None

        # Remove the 'delete' key from each child data
        if 'partner_delete' in partner_data:
            partner_data.pop('partner_delete', None)
        # Add the operation to the child data
        partner_data['operation'] = partner_operation['operation']
        
        # Append the child data to the final children data list
        formatted_partner_data.append(partner_data)

    formatted_file_list=[]
    if isinstance(file_list, str):

        file_list = ast.literal_eval(file_list)
    if file_list:
        for file in file_list:
            formatted_file_list.append({'id':file['id'], 'name': file['name'], 'operation': file.get('operation', None)})

    # Combine into the final structure
    final_data = {
        'user_data': formatted_user_data,  # List containing user data
        'children_data': formatted_children_data,  # List of children data with operations
        'partner_data': formatted_partner_data,
        'files_data': formatted_file_list
    }

    return final_data

def replace_empty_with_none(data):
    # Check if data is a list of dictionaries
    if isinstance(data, list):
        for entry in data:
            # Iterate over key-value pairs in each dictionary
            for key, value in entry.items():
                # Replace '' with None
                if value == '':
                    entry[key] = None
    # Check if data is a single dictionary
    elif isinstance(data, dict):
        for key, value in data.items():
            if value == '':
                data[key] = None
    return data

def find_folder_id(service, folder_name, parent_id):
    """
    Find the Google Drive folder ID based on folder name within a specified parent folder.

    Args:
        service: Authenticated Google Drive service instance.
        folder_name (str): Name of the folder.
        parent_id (str): ID of the parent folder.

    Returns:
        str: Folder ID if found, None otherwise.
    """
    query = f"mimeType='application/vnd.google-apps.folder' and name='{folder_name}' and '{parent_id}' in parents"
    response = service.files().list(q=query, spaces='drive', fields='files(id, name)' ,includeItemsFromAllDrives= True,supportsAllDrives=True).execute()
    folders = response.get('files', [])

    if not folders:
        return None
    # If multiple folders have the same name, this returns the ID of the first one.
    return folders[0]['id']

def upload_files_to_google_drive(file_paths, folder_name, parent_id='1idjXBYaU6B5vcB5GiR3bKWScLvNowdfI'):
    """
    Uploads multiple files to Google Drive to a specified folder by folder name, within a specified parent folder.
    Automatically determines the MIME type of each file based on its extension.

    Args:
        file_paths (list of str): List of file paths.
        folder_name (str): The name of the Google Drive folder where the files will be uploaded.
        parent_id (str): ID of the parent folder where the target folder resides.

    Returns:
        list: A list of URLs or error messages for each file uploaded.
    """
    # Load credentials from a service account file
    cred_json = get_cred_google_service_acc()  # Ensure this function correctly fetches your credentials
    try:
        credentials = service_account.Credentials.from_service_account_info(
            cred_json, scopes=SCOPES)
    except Exception as e:
        logging.error("Failed to load service account credentials: %s", e)
        return ["Error loading service account credentials."]

    # Build the Google Drive service object
    try:
        service = build('drive', 'v3', credentials=credentials)
    except Exception as e:
        logging.error("Failed to build the Drive service: %s", e)
        return ["Error building the Drive service."]

    # Resolve folder name to ID within the specified parent
    folder_id = find_folder_id(service, folder_name, parent_id)
    if not folder_id:
        return ["Folder not found or inaccessible."]

    results = []

    # Iterate over each file path
    for file_path in file_paths:
        file_name = file_path.split('/')[-1]  # Extract the file name from the path
        mime_type, _ = mimetypes.guess_type(file_path)  # Guess MIME type
        if not mime_type:
            mime_type = 'application/octet-stream'  # Default MIME type if unknown

        # File metadata for the upload
        file_metadata = {
            'name': file_name,
            'parents': [folder_id]
        }

        # Media file upload
        media = MediaFileUpload(file_path, mimetype=mime_type)

        # Attempt to upload the file and fetch its URL
        try:
            file = service.files().create(body=file_metadata, media_body=media, fields='id, webViewLink', supportsAllDrives=True).execute()
            results.append({'name': file_name, 'url':file['webViewLink'], 'id':file['id'], 'operation': 'insert'})
        except Exception as error:
            error_message = f"Error uploading the file {file_name}: {error}"
            logging.error(error_message)
            results.append(error_message)

    return results
    
def list_files_in_folder(folder_name, parent_id='1idjXBYaU6B5vcB5GiR3bKWScLvNowdfI'):
    """
    List all files in a specified Google Drive folder using the folder name.

    Args:
        service: Authenticated Google Drive service instance.
        folder_name (str): The name of the folder.
        parent_id (str): The ID of the parent folder (default is 'root' for the main directory).

    Returns:
        list: A list of dictionaries, each containing details about the files within the folder.
    """
    cred_json = get_cred_google_service_acc()
    try:
        credentials = service_account.Credentials.from_service_account_info(
            cred_json, scopes=SCOPES)
    except Exception as e:
        logging.error("Failed to load service account credentials: %s", e)
        return "Error loading service account credentials."

    # Build the Google Drive service object
    try:
        service = build('drive', 'v3', credentials=credentials)
    except Exception as e:
        logging.error("Failed to build the Drive service: %s", e)
        return "Error building the Drive service."
    
    folder_id = find_folder_id(service, folder_name, parent_id)
    if folder_id is None:
        logging.error("Folder not found or inaccessible")
        return []

    try:
        query = f"'{folder_id}' in parents"
        results = service.files().list(q=query, spaces='drive', fields='files(id, name, mimeType, size, webViewLink, createdTime, modifiedTime)', supportsAllDrives=True, includeItemsFromAllDrives=True).execute()
        files = results.get('files', [])
        filtered_files = [file for file in files if "DELETED" not in file['name'].upper()]
        return filtered_files
    except Exception as e:
        logging.error("Failed to retrieve files: %s", e)
        return []

def get_file_by_id_in_folder(folder_name, file_id, parent_id='1idjXBYaU6B5vcB5GiR3bKWScLvNowdfI'):
    """
    Get a specific file in a specified Google Drive folder using the folder name and file ID.

    Args:
        folder_name (str): The name of the folder.
        file_id (str): The ID of the file to retrieve.
        parent_id (str): The ID of the parent folder (default is 'root' for the main directory).

    Returns:
        dict: A dictionary containing details about the file if found, or None if the file is not found.
    """
    # Load Google Drive service account credentials
    cred_json = get_cred_google_service_acc()
    try:
        credentials = service_account.Credentials.from_service_account_info(
            cred_json, scopes=SCOPES)
    except Exception as e:
        logging.error("Failed to load service account credentials: %s", e)
        return "Error loading service account credentials."

    # Build the Google Drive service object
    try:
        service = build('drive', 'v3', credentials=credentials)
    except Exception as e:
        logging.error("Failed to build the Drive service: %s", e)
        return "Error building the Drive service."

    # Find the folder ID based on the folder name
    folder_id = find_folder_id(service, folder_name, parent_id)
    if folder_id is None:
        logging.error("Folder not found or inaccessible")
        return None

    try:
        # Retrieve the file by its ID directly and check its parent folder
        file = service.files().get(
            fileId=file_id,
            fields='id, name, mimeType, size, webViewLink, createdTime, modifiedTime, parents',
            supportsAllDrives=True
        ).execute()

        # Check if the file is in the desired folder
        if folder_id in file.get('parents', []):
            return file
        else:
            logging.info("File is not located in the specified folder.")
            return None
    except Exception as e:
        logging.error("Failed to retrieve file: %s", e)
        return None

def reset_planday_name(planday_id, first_name, last_name):
    url = f"https://openapi.planday.com/hr/v1.0/employees/{planday_id}"
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)

    payload = {
        "firstName": first_name,
        "lastName": last_name
    }

    try:
        response = requests.put(
            url,
            headers={**planday_headers, "Content-Type": "application/json"},
            json=payload
        )
        if response.status_code == 200:
            return True
        else:
            return False

    except requests.RequestException as e:
        print(f"Request failed: {e}")
        raise





def save_files_tmp(files, form):
    """
    Saves uploaded files to a temporary directory, modifies filenames with date information if available, and 
    returns paths and statuses of saved files.

    Process:
        - Iterates over each uploaded file, filtering to include only files with allowed prefixes.
        - For valid files:
            - Extracts an identifier and prefix from the file key to construct a modified file name.
            - Checks if corresponding start and end dates are provided in `form` and appends them to the filename.
            - Saves the file to a temporary directory with the modified name.
        - Tracks saved file paths and filenames with statuses in dictionaries.

    Args:
        - files (dict): A dictionary of uploaded files, typically from `request.files`.
        - form (dict): A dictionary of form data, typically from `request.form`.

    Returns:
        - tuple: A list of saved file paths and a dictionary with filenames and their statuses.

    Example Usage:
        - After uploading multiple files, this function saves each to `/tmp` and modifies filenames with any
          provided start and end dates.

    Notes:
        - The function assumes `allowed_file` is defined to validate file extensions.
        - Dates are expected in 'YYYY-MM-DD' format and are reformatted to 'YYYYMMDD' for filenames.
        - Files are saved in `/tmp`, and the saved paths are returned for further processing.
    """

    file_paths = []
    file_name = {}

    # Iterate over each file key and filter for specific prefixes
    for file_key in files:
        if not (file_key.startswith('file_front_') or file_key.startswith('file_back_') or file_key.startswith('file_')):
            continue  # Skip files that do not match expected prefixes

        # Retrieve the file object from `files`
        file = files[file_key]

        # Check if the file is allowed by extension
        if file and allowed_file(file.filename):
            # Extract identifier and prefix from file_key
            if file_key.startswith('file_front_'):
                identifier = file_key[len('file_front_'):]
                prefix = 'front_'
            elif file_key.startswith('file_back_'):
                identifier = file_key[len('file_back_'):]
                prefix = 'back_'
            else:
                identifier = file_key[len('file_'):]
                prefix = ''

            # Secure the filename
            filename = secure_filename(file.filename)

            # Use `file_key` as the initial filename (used as an identifier)
            filename = file_key

            # Retrieve start and end dates from `form`, if provided
            start_date_key = f'startDate_{identifier}'
            end_date_key = f'endDate_{identifier}'

            # Format dates to 'YYYYMMDD'
            start_date = form.get(start_date_key, '').replace('-', '')
            end_date = form.get(end_date_key, '').replace('-', '')

            # Append date information to the filename if start or end dates are present
            if end_date:
                date_part = end_date
                if start_date:  # Include start and end dates if both are present
                    date_part = f"{start_date}_to_{end_date}"
                filename = f"{prefix}{identifier}_{date_part}"  # Modify filename with date info

            # Mark the file as 'Created' in `file_name`
            file_name[filename] = 'Created'

            # Define the file path in the /tmp directory and save the file
            file_path = os.path.join('/tmp', filename)
            file.save(file_path)
            file_paths.append(file_path)  # Add file path to the list

    # Return lists of file paths and file names with their statuses
    return file_paths, file_name


def allowed_file(filename):
    """
    Checks if a file has an allowed extension based on predefined allowed file types.

    Process:
        - Defines a set of allowed extensions for files.
        - Checks if the file has an extension by looking for a period ('.') in the filename.
        - Extracts the file extension by splitting the filename on the last period.
        - Verifies if the extension (converted to lowercase) is in the set of allowed extensions.

    Args:
        - filename (str): The name of the file to check.

    Returns:
        - bool: True if the file has an allowed extension, otherwise False.

    Example Usage:
        - For a file "document.pdf", the function will return True if 'pdf' is in `ALLOWED_EXTENSIONS`.
        - For a file "image.bmp", the function will return False if 'bmp' is not in `ALLOWED_EXTENSIONS`.

    Notes:
        - This function assumes that only the last period in the filename indicates the extension.
        - File extensions are case-insensitive.
    """

    # Define a set of allowed file extensions
    ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg'}

    # Check if the filename contains a period and if its extension is allowed
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS


def remove_files(file_paths):
    """
    Attempts to delete each file in a list of file paths and records the result of each operation.

    Process:
        - Iterates through each file path in `file_paths`.
        - Tries to delete the file at each path and records the status of each deletion attempt.
        - Handles different exceptions to capture specific errors:
            - `FileNotFoundError`: File does not exist at the specified path.
            - `PermissionError`: Insufficient permissions to delete the file.
            - Other exceptions are recorded with the specific error message.

    Args:
        - file_paths (list of str): List of file paths to attempt deletion.

    Returns:
        - list of dicts: Each dictionary contains:
            - 'file_path' (str): The path of the file attempted for deletion.
            - 'status' (str): Indicates the result ('deleted', 'not found', 'permission denied', or 'error').
            - 'error' (str, optional): Error message if an unspecified exception occurred.

    Example Usage:
        - Given a list of file paths, the function will attempt to delete each one, returning a list of status results.

    Notes:
        - This function provides detailed feedback for each file deletion attempt, useful for auditing and debugging.
    """

    results = []

    # Iterate over each file path and attempt deletion
    for file_path in file_paths:
        try:
            # Attempt to delete the file
            os.remove(file_path)
            results.append({'file_path': file_path, 'status': 'deleted'})
        
        # Handle specific exceptions for clearer feedback
        except FileNotFoundError:
            # File does not exist at the specified path
            results.append({'file_path': file_path, 'status': 'not found'})
        
        except PermissionError:
            # Insufficient permissions to delete the file
            results.append({'file_path': file_path, 'status': 'permission denied'})
        
        except Exception as e:
            # Other unspecified errors, captured with specific error message
            results.append({'file_path': file_path, 'status': 'error', 'error': str(e)})

    # Return a list of results indicating the status of each deletion
    return results


def extract_date_from_filename(filename):
    """
    Extracts a date from a file name based on an 8-digit pattern (YYYYMMDD format).

    Process:
        - Compiles a regular expression to match exactly 8 consecutive digits in the filename.
        - Searches the filename for occurrences of 8-digit sequences.
        - If any dates are found, returns the last one in the list.
        - If no dates are found, returns None.

    Args:
        - filename (str): The name of the file from which to extract a date.

    Returns:
        - str or None: The extracted date string in 'YYYYMMDD' format if found, otherwise None.

    Example Usage:
        - For a filename "report_20221130_final.pdf", the function will return "20221130".
        - If no 8-digit sequence is present, the function returns None.

    Notes:
        - This function assumes the date format is always 'YYYYMMDD' and looks for an exact 8-digit match.
    """

    # Define a regular expression pattern to match exactly 8 consecutive digits
    date_pattern = re.compile(r'\d{8}')  # Matches exactly 8 digits

    # Find all matches of 8-digit sequences in the filename
    dates = date_pattern.findall(filename)

    # Return the last match if dates were found, otherwise return None
    return dates[-1] if dates else None

def check_if_docs_are_valid_and_update_planday_name(planday_id, url_name,first_name, last_name):
    """
    url_name: list of dicts like
      {'name': 'front_approval_20300620', 'url': '...', 'id': '...', 'operation': 'insert'}
    """

    pat = re.compile(r'^(front|back)_approval_(\d{8})$')
    found = {}

    for item in url_name or []:
        name = (item.get('name') or '').strip()
        m = pat.match(name)
        if not m:
            continue
        side, yyyymmdd = m.groups()
        try:
            d = datetime.strptime(yyyymmdd, "%Y%m%d").date()
        except ValueError:
            continue
        # store latest date if duplicates
        prev = found.get(side)
        if prev is None or d > prev:
            found[side] = d

    if 'front' in found and 'back' in found and found['front'] == found['back']:
        today_zurich = datetime.now(ZoneInfo("Europe/Zurich")).date()
        if found['front'] > today_zurich:
            reset_planday_name(planday_id, first_name, last_name) 

def check_document_requirements(document_specs, file_metadata):
    """
    Checks if required documents are present and meet expiration or validity criteria based on the given specifications.

    Process:
        - For each document specification in `document_specs`:
            - Normalizes the document name to create the expected file names based on the `sites` value.
            - Checks if required files exist in `file_metadata` and if they are expired or deleted.
            - Extracts dates from file names to validate document dates or ranges.
        - Determines document status:
            - Status 1 if all required files are present and meet date requirements.
            - Status 0 if any required file is missing, expired, or deleted.

    Args:
        - document_specs (list of dict): Specifications for each required document, containing 'name', 'sites', and 'needed_date' keys.
        - file_metadata (list of dict): Metadata for each available file, containing 'name' and optionally dates or status.

    Returns:
        - list of dicts: Each dictionary contains the document name and its status:
            - 'status' 1 if all requirements are met (file presence, date validity).
            - 'status' 0 if any requirement fails.

    Example Usage:
        - `document_specs` specifies documents to check, including number of required sides and date needs.
        - `file_metadata` contains files, and the function verifies if all required documents are present and valid.

    Notes:
        - The function uses helper functions like `extract_date_from_filename` to extract dates from file names.
        - Assumes a `today` variable is defined externally, representing the current date for date comparisons.
    """

    results = []

    # Process each document specification
    for doc in document_specs:
        # Normalize document name and prepare it for matching
        base_name = doc['name'].strip().replace(' ', '_')
        required_files = []

        # Determine required file names based on 'sites' specification
        if doc['sites'] == 1:
            required_files.append(f"file_front_{base_name}")
        elif doc['sites'] == 2:
            required_files.append(f"front_{base_name}")
            required_files.append(f"back_{base_name}")

        # Initialize dictionaries to track file existence and extracted dates
        file_exists = {name: False for name in required_files}
        file_dates = {name: None for name in required_files}
        file_expired = False  # Tracks if any file is marked as expired

        # Check each file in metadata against required files
        for file in file_metadata:
            for req_file in required_files:
                if req_file in file['name']:  # Match found in metadata
                    file_exists[req_file] = True
                    # Check if file is marked as expired or deleted
                    if 'EXPIRED' in file['name'] or 'DELETED' in file['name']:
                        file_expired = True
                    
                    # Extract date from the file name if available
                    extracted_date = extract_date_from_filename(file['name'])
                    if extracted_date:
                        file_dates[req_file] = extracted_date

        # Determine if all required files are present
        all_files_present = all(file_exists.values())
        
        # Initialize document status as 0 (invalid) by default
        document_status = 0

        # Evaluate document status based on presence and date requirements
        if all_files_present and not file_expired:
            if doc['needed_date'] == 0:
                document_status = 1  # No date requirement, only existence check
            
            elif doc['needed_date'] == 1:
                # Check if each file meets expiration date requirements
                document_status = 1  # Assume valid unless a date fails
                for date in file_dates.values():
                    if date and today > date:  # Expired file found
                        document_status = 0
                        break

            elif doc['needed_date'] == 2:
                # Check if each file is within a valid date range
                document_status = 1  # Assume valid unless a date range fails
                for date in file_dates.values():
                    if date:
                        # Split date string into start and end dates
                        start_date, end_date = date.split('_to_')
                        # Check if today falls within the date range
                        if not (start_date <= today <= end_date):
                            document_status = 0
                            break

        # Append document status to results
        results.append({'name': doc['name'], 'status': document_status if not file_expired else 0})

    return results


def extract_dates(filename):
    # Regex to find dates in the format YYYYMMDD or range YYYYMMDD_to_YYYYMMDD
    date_pattern = re.compile(r'(\d{8})(_to_(\d{8}))?')
    match = date_pattern.search(filename)
    if match:
        if match.group(2):  # If '_to_' is found, indicating a range
            start_date = match.group(1)
            end_date = match.group(3)
        else:  # If only one date is found
            start_date = None
            end_date = match.group(1)
        return start_date, end_date
    return None, None

def insert_users_doc(url_name, user_id):
    """
    Inserts document details for a user into the database based on file information.

    Process:
        - Retrieves document foreign key mappings from the database to match file names with document types.
        - Loops through each file in `url_name` and matches the file name with a document type from `doc_dict`.
        - Extracts start and end dates from the file name, if available, and inserts these details into the database.
        - Calls `insert_docs_db` to perform the actual database insertion for each matched document.

    Args:
        - url_name (list of dicts): List of dictionaries, each containing 'name' (file name) and 'url' (file URL) keys.
        - user_id (str): Unique identifier for the user to whom the documents belong.

    Returns:
        - None

    Example Usage:
        - If `url_name` contains files with names matching known document types, those documents are processed and inserted
          into the database with associated metadata (e.g., start and end dates).

    Notes:
        - This function assumes access to `get_doc_fk_table` to fetch document types and `insert_docs_db` for database insertion.
        - It also assumes `extract_dates` is a helper function to extract date ranges from the file name.
        - Error handling is not included and could be added for robustness.
    """

    # Retrieve document foreign key mappings (associates document names with their IDs)
    docs = get_doc_fk_table()
    doc_dict = {doc['name']: doc['id'] for doc in docs}

    # Iterate through each file in `url_name`
    for files in url_name:
        filename = files['name']

        # Loop through known document types and match them with the current filename
        for doc_name, doc_id in doc_dict.items():
            if doc_name in filename:
                # Extract start and end dates from the file name
                start_date, end_date = extract_dates(filename)

                # Insert the document record into the database with user ID, doc type, dates, and file URL
                insert_docs_db(user_id, doc_id, start_date, end_date, files['url'])
                break  # Exit inner loop once a match is found for this file


def rename_file(file_id, name):
    cred_json = get_cred_google_service_acc()
    try:
        credentials = service_account.Credentials.from_service_account_info(
            cred_json, scopes=SCOPES)
    except Exception as e:
        logging.error("Failed to load service account credentials: %s", e)
        return "Error loading service account credentials."

    # Build the Google Drive service object
    try:
        service = build('drive', 'v3', credentials=credentials)
    except Exception as e:
        logging.error("Failed to build the Drive service: %s", e)
        return "Error building the Drive service."
    
    file_metadata = {'name': name}
    try:
        updated_file = service.files().update(fileId=file_id, body=file_metadata, supportsAllDrives=True).execute()
        return updated_file
    except Exception as e:
        logging.error("Failed to rename file: %s", e)
        return None
    
def convert_files(files_json):
    # Correcting the formatting issue from single to double quotes for valid JSON parsing
    corrected_json = files_json.replace("'", '"')

    # Load the JSON string into Python dictionary
    files = json.loads(corrected_json)
    
    # Create an empty dictionary to hold file names and their statuses
    files_dict = {}
    
    # Iterate over each file entry in the files data
    for file in files:
        # Check if the file name contains 'DELETED' or 'EXPIRED'
        if 'DELETED' in file['name'] or 'EXPIRED' in file['name']:
            continue  # Skip adding this file to the dictionary
        
        # Add the file name as a key to the dictionary with a status of 'Created'
        files_dict[file['name']] = 'Created'
    
    # Return the dictionary containing file names and their respective statuses
    return files_dict

def add_current_files(data):
    """
    Adds current Google Drive file information to the provided user data.

    Process:
        - Checks if the `data` parameter is a string and converts it to a dictionary if necessary.
        - Retrieves a list of files associated with the user's Google Drive folder.
        - Converts each file entry to a simplified dictionary with only the file ID and name.
        - Updates the `data` dictionary to include the current file list under the 'files_data' key.

    Args:
        - data (str or dict): The user data, which may include previous records of files or other information.
          If `data` is a string, it will be converted to a dictionary.

    Returns:
        - str: The updated user data as a string, now including a 'files_data' key containing the current file list.

    Example Usage:
        - If `data` is a string representation of a dictionary, the function first converts it, retrieves the list of
          current files, and adds them to `data`. The function then returns the modified data as a string.

    Notes:
        - This function assumes `list_files_in_folder` is a helper function that fetches files from a Google Drive folder
          using the user's unique `user_id`.
    """

    # Convert `data` from a string to a dictionary if necessary
    if isinstance(data, str):
        data = ast.literal_eval(data)

    # Retrieve the list of files in the user's Google Drive folder
    files_list = list_files_in_folder(data['user_data'][0]['user_id'])

    # Initialize a list to hold simplified file data entries
    final_file_list = []

    # Process each file and add its ID and name to the `final_file_list`
    for file in files_list:
        final_file_list.append({'id': file['id'], 'name': file['name']})

    # Add `files_data` to the dictionary `data`, containing the current list of files
    data['files_data'] = final_file_list

    # Return the updated data as a string representation
    return str(data)


def remove_files_by_file_id(data, ids_to_remove):
    # Convert the data string to a dictionary if needed
    if isinstance(data, str):
        data = ast.literal_eval(data)

    # Mark files as "deleted" in 'files_data' if their 'id' is in ids_to_remove
    for file in data.get('files_data', []):
        if file['id'] in ids_to_remove:
            file['operation'] = 'delete'
    
    return str(data)


def str_to_list(data):
    if isinstance(data, str):
        data = ast.literal_eval(data)

    return data

def parse_file_data(data_list):
    """
    Converts a list containing string representations of dictionaries into a list of actual dictionaries.
    
    Args:
    - data_list (list): List containing string representations of dictionaries.
    
    Returns:
    - list[dict]: A list of dictionaries parsed from the input list of strings.
    """
    converted_list = []
    for item in data_list:
        try:
            # Parse each string item as a dictionary
            parsed_item = ast.literal_eval(item)
            # Add the parsed dictionary to the new list
            converted_list.append(parsed_item)
        except (ValueError, SyntaxError) as e:
            continue
    return converted_list

def put_driver_in_11KA_in_planday(planday_id):
    url = f"https://openapi.planday.com/hr/v1.0/employees/{planday_id}"
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)
    payload = {
        "employeeGroups": [272732]
    }
    response = requests.put(url, headers=planday_headers, json=payload)
    if response.status_code == 204 or  response.status_code == 200:
        return True
    return False


def append_date_suffix_to_files(file_paths, valid_from=None, valid_until=None):
    """
    Appends a date suffix (validFrom and/or validUntil) to each file name in the given file paths.

    Args:
        file_paths (list): List of file paths to rename.
        valid_from (str): Start date in the format 'YYYY-MM-DD'. Defaults to None.
        valid_until (str): End date in the format 'YYYY-MM-DD'. Defaults to None.

    Returns:
        list: List of updated file paths with the date suffix.
    """
    # Determine the suffix based on available dates
    if valid_from and valid_until:
        # Both dates are present
        formatted_from = valid_from.replace("-", "")
        formatted_until = valid_until.replace("-", "")
        file_name_suffix = f"_{formatted_from}_{formatted_until}"
    elif valid_until:
        # Only validUntil is present
        formatted_until = valid_until.replace("-", "")
        file_name_suffix = f"_{formatted_until}"
    elif valid_from:
        # Only validFrom is present
        formatted_from = valid_from.replace("-", "")
        file_name_suffix = f"_{formatted_from}"
    else:
        # No dates provided
        file_name_suffix = ""

    # Append the suffix to each file name
    file_paths_with_dates = []
    for path in file_paths:
        directory, original_file_name = os.path.split(path)
        name, ext = os.path.splitext(original_file_name)
        new_file_name = f"{name}{file_name_suffix}{ext}"
        new_path = os.path.join(directory, new_file_name)
        os.rename(path, new_path)  # Rename the file
        file_paths_with_dates.append(new_path)

    return file_paths_with_dates


def extract_google_drive_file_id(url):
    """
    Extracts the file ID from a Google Drive file URL.

    Args:
        url (str): The Google Drive file URL.

    Returns:
        str: The extracted file ID, or None if the URL does not contain a valid file ID.
    """
    match = re.search(r"/d/([a-zA-Z0-9_-]+)", url)
    return match.group(1) if match else None

def create_list_with_specific(dict_in_lost, key):
    new_list = []
    for item in dict_in_lost:
        new_list.append(item[key])
    return new_list

def value_changed(old_data: dict, new_data: dict, *keys) -> bool:
    """
    Return True if any of the explicitly provided keys differ between old_data and new_data.
    Usage:
        address_changed(old, new, "address", "postal_code", "location", "country")
        # or
        address_changed(old, new, *my_keys_tuple)
    """
    # allow passing a single iterable as the third arg
    if len(keys) == 1 and isinstance(keys[0], (list, tuple, set)):
        keys = tuple(keys[0])

    if not keys:
        raise ValueError("Pass the keys to compare, e.g. address_changed(old, new, 'address', 'postal_code').")

    return any(_normalize(old_data.get(k)) != _normalize(new_data.get(k)) for k in keys)

def update_planday_employee(
    planday_id: str,
    *,
    # Address pieces (will be combined into street1)
    address: Optional[str] = None,
    postal_code: Optional[str] = None,
    location: Optional[str] = None,   # city
    country: Optional[str] = None,
    # Name
    first_name: Optional[str] = None,
    last_name: Optional[str] = None,
    # Phone
    phone_prefix: Optional[str] = None,   # e.g. +41
    phone_number: Optional[str] = None,   # e.g. 791234567
    # Email
    email: Optional[str] = None,
    brith_date: Optional[str] = None,
    # Pass through any additional raw fields the API supports
    extra: Optional[Dict[str, object]] = None,
    # Networking
    timeout: float = 20.0,
    ) -> Tuple[bool, Optional[int], Optional[str]]:
    """
    Update a Planday employee in one call. Only provided args are sent.

    Returns:
        (ok, status_code, error_message)
        - ok=True if API returned 204 No Content
        - status_code is the HTTP status (or None if request failed before HTTP)
        - error_message is a concise description if not ok
    """
    if not planday_id:
        return (False, None, "Missing planday_id")

    # Get OAuth/Bearer headers from your auth helper
    # NOTE: original code used get_planday_tokan -> assuming it's get_planday_token
    planday_headers = get_planday_tokan(PLANDAY_CLIENT_ID)  # keep your existing helper
    if not isinstance(planday_headers, dict):
        return (False, None, "Auth headers not available")

    payload: Dict[str, object] = {}

    # ----- Address -> street1 (build only from provided parts)
    if any(v is not None for v in (address, postal_code, location, country)):
        parts = []
        if address: parts.append(address.strip())
        town = " ".join(p for p in [postal_code or "", location or ""] if p).strip()
        if town:
            parts.append(town)
        if country: parts.append(country.strip())
        street1 = ", ".join(p for p in parts if p)
        if street1:
            payload["street1"] = street1

    # ----- Name
    if first_name is not None:
        payload["firstName"] = first_name
    if last_name is not None:
        payload["lastName"] = last_name   # fix casing (was "last_name" in your code)

    # ----- Birth Date
    if brith_date is not None:
        payload["birthDate"] = brith_date

    # ----- Phone
    if (phone_prefix is not None) or (phone_number is not None):
        # build only from provided parts, avoid "None" strings
        number = f"{phone_prefix or ''}{phone_number or ''}".strip()
        if number:
            payload["cellPhone"] = number

    # ----- Email
    if email is not None:
        payload["email"] = email

    # ----- Extra direct fields (advanced / passthrough)
    if extra:
        # Don't let extra clobber the basics unless intended
        for k, v in extra.items():
            if v is not None:
                payload[k] = v

    if not payload:
        return (False, None, "No fields to update (empty payload)")
    
    print(payload)

    url = f"https://openapi.planday.com/hr/v1.0/employees/{planday_id}"

    try:
        resp = requests.put(
            url,
            headers={**planday_headers, "Content-Type": "application/json"},
            json=payload,
            timeout=timeout,
        )
    except Exception as e:
        return (False, None, f"Request error: {e}")

    if resp.status_code == 204:
        return True, None

    # Try to extract a meaningful error message
    err_text = None
    try:
        data = resp.json()
        err_text = data.get("error_description") or data.get("message") or str(data)
    except Exception:
        err_text = resp.text or "Unknown error"

    return False , err_text

def _normalize(v, empty_to_none: bool = True):
    if isinstance(v, str):
        # entfernt NUR am Anfang/Ende (nicht in der Mitte)
        s = re.sub(r'^\s+|\s+$', '', v, flags=re.UNICODE)
        if empty_to_none and (s == '' or s.lower() == 'none'):
            return None
        return s
    return v