dev/fix-70 #71

Merged
tom_trgr merged 5 commits from dev/fix-70 into main 2026-01-18 18:11:00 +01:00
6 changed files with 105 additions and 32 deletions
Showing only changes of commit cfd77ae28d - Show all commits

View File

@@ -25,13 +25,14 @@ func OpenDatabase() (models.IDatabase, error) {
} }
func Migrate() error { func Migrate() error {
return nil
dbHost := helper.GetEnv("POSTGRES_HOST", "localhost") dbHost := helper.GetEnv("POSTGRES_HOST", "localhost")
dbName := helper.GetEnv("POSTGRES_DB", "arbeitszeitmessung") dbName := helper.GetEnv("POSTGRES_DB", "arbeitszeitmessung")
// dbUser := helper.GetEnv("POSTGRES_USER", "api_nutzer") // dbUser := helper.GetEnv("POSTGRES_USER", "api_nutzer")
dbPassword := helper.GetEnv("POSTGRES_PASSWORD", "password") dbPassword := helper.GetEnv("POSTGRES_PASSWORD", "password")
dbTz := helper.GetEnv("TZ", "Europe/Berlin") dbTz := helper.GetEnv("TZ", "Europe/Berlin")
migrations := helper.GetEnv("MIGRATIONS_DIR", "../migrations") migrations := helper.GetEnv("MIGRATIONS_PATH", "../migrations")
connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", "migrate", dbPassword, dbHost, dbName, dbTz) connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", "migrate", dbPassword, dbHost, dbName, dbTz)
m, err := migrate.New(fmt.Sprintf("file://%s", migrations), connStr) m, err := migrate.New(fmt.Sprintf("file://%s", migrations), connStr)

View File

@@ -160,19 +160,13 @@ func updateBooking(w http.ResponseWriter, r *http.Request) {
newBooking := (*models.Booking).New(nil, user.CardUID, 0, int16(check_in_out), 1) newBooking := (*models.Booking).New(nil, user.CardUID, 0, int16(check_in_out), 1)
newBooking.Timestamp = timestamp newBooking.Timestamp = timestamp
err = newBooking.InsertWithTimestamp() if newBooking.Verify() {
if err != nil { err = newBooking.InsertWithTimestamp()
log.Printf("Error inserting booking %v -> %v\n", newBooking, err) if err != nil {
log.Printf("Error inserting booking %v -> %v\n", newBooking, err)
}
} }
case "change": case "change":
// absenceType, err := strconv.Atoi(r.FormValue("absence"))
// if err != nil {
// log.Println("Error parsing absence type.", err)
// absenceType = 0
// }
// if absenceType != 0 {
// createAbsence(absenceType, user, loc, r)
// }
for index, possibleBooking := range r.PostForm { for index, possibleBooking := range r.PostForm {
if len(index) > 7 && index[:7] == "booking" { if len(index) > 7 && index[:7] == "booking" {
booking_id, err := strconv.Atoi(index[8:]) booking_id, err := strconv.Atoi(index[8:])

View File

@@ -128,6 +128,9 @@ func (b *Booking) InsertWithTimestamp() error {
if b.Timestamp.IsZero() { if b.Timestamp.IsZero() {
return b.Insert() return b.Insert()
} }
if !checkLastBooking(*b) {
return SameBookingError{}
}
stmt, err := DB.Prepare((`INSERT INTO anwesenheit (card_uid, geraet_id, check_in_out, anwesenheit_typ, timestamp) VALUES ($1, $2, $3, $4, $5) RETURNING counter_id`)) stmt, err := DB.Prepare((`INSERT INTO anwesenheit (card_uid, geraet_id, check_in_out, anwesenheit_typ, timestamp) VALUES ($1, $2, $3, $4, $5) RETURNING counter_id`))
if err != nil { if err != nil {
return err return err

View File

@@ -175,22 +175,39 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
qStr, err := DB.Prepare(` qStr, err := DB.Prepare(`
WITH all_days AS ( WITH all_days AS (
SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date), SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date),
ordered_bookings AS ( normalized_bookings AS (
SELECT SELECT *
a.timestamp::DATE AS work_date, FROM (
a.timestamp, SELECT
a.check_in_out, a.card_uid,
a.counter_id, a.timestamp,
a.anwesenheit_typ, a.timestamp::DATE AS work_date,
sat.anwesenheit_name AS anwesenheit_typ_name, a.check_in_out,
LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp, a.counter_id,
LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check a.anwesenheit_typ,
FROM anwesenheit a sat.anwesenheit_name AS anwesenheit_typ_name,
LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id LAG(a.check_in_out) OVER (
WHERE a.card_uid = $1 PARTITION BY a.card_uid, a.timestamp::DATE
AND a.timestamp::DATE >= $2 ORDER BY a.timestamp
AND a.timestamp::DATE <= $3 ) AS prev_check
) FROM anwesenheit a
LEFT JOIN s_anwesenheit_typen sat
ON a.anwesenheit_typ = sat.anwesenheit_id
WHERE a.card_uid = $1
AND a.timestamp::DATE >= $2
AND a.timestamp::DATE <= $3
) t
WHERE prev_check IS NULL OR prev_check <> check_in_out
),
ordered_bookings AS (
SELECT
*,
LAG(timestamp) OVER (
PARTITION BY card_uid, work_date
ORDER BY timestamp
) AS prev_timestamp
FROM normalized_bookings
)
SELECT SELECT
d.work_date, d.work_date,
COALESCE(MIN(b.timestamp), NOW()) AS time_from, COALESCE(MIN(b.timestamp), NOW()) AS time_from,
@@ -228,6 +245,62 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
GROUP BY d.work_date GROUP BY d.work_date
ORDER BY d.work_date ASC;`) ORDER BY d.work_date ASC;`)
// qStr, err := DB.Prepare(`
// WITH all_days AS (
// SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date),
// ordered_bookings AS (
// SELECT
// a.timestamp::DATE AS work_date,
// a.timestamp,
// a.check_in_out,
// a.counter_id,
// a.anwesenheit_typ,
// sat.anwesenheit_name AS anwesenheit_typ_name,
// LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp,
// LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check
// FROM anwesenheit a
// LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id
// WHERE a.card_uid = $1
// AND a.timestamp::DATE >= $2
// AND a.timestamp::DATE <= $3
// )
// SELECT
// d.work_date,
// COALESCE(MIN(b.timestamp), NOW()) AS time_from,
// COALESCE(MAX(b.timestamp), NOW()) AS time_to,
// COALESCE(
// EXTRACT(EPOCH FROM SUM(
// CASE
// WHEN b.prev_check IN (1, 3) AND b.check_in_out IN (2, 4, 254)
// THEN b.timestamp - b.prev_timestamp
// ELSE INTERVAL '0'
// END
// )), 0
// ) AS total_work_seconds,
// COALESCE(
// EXTRACT(EPOCH FROM SUM(
// CASE
// WHEN b.prev_check IN (2, 4, 254) AND b.check_in_out IN (1, 3)
// THEN b.timestamp - b.prev_timestamp
// ELSE INTERVAL '0'
// END
// )), 0
// ) AS total_pause_seconds,
// COALESCE(jsonb_agg(jsonb_build_object(
// 'check_in_out', b.check_in_out,
// 'timestamp', b.timestamp,
// 'counter_id', b.counter_id,
// 'anwesenheit_typ', b.anwesenheit_typ,
// 'anwesenheit_typ', jsonb_build_object(
// 'anwesenheit_id', b.anwesenheit_typ,
// 'anwesenheit_name', b.anwesenheit_typ_name
// )
// ) ORDER BY b.timestamp), '[]'::jsonb) AS bookings
// FROM all_days d
// LEFT JOIN ordered_bookings b ON d.work_date = b.work_date
// GROUP BY d.work_date
// ORDER BY d.work_date ASC;`)
if err != nil { if err != nil {
log.Println("Error preparing SQL statement", err) log.Println("Error preparing SQL statement", err)
return workDays return workDays

View File

@@ -2,7 +2,7 @@ name: arbeitszeitmessung-dev
services: services:
db: db:
volumes: volumes:
- ../DDB:/var/lib/postgresql/data - ${POSTGRES_PATH}:/var/lib/postgresql/data
- ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d - ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d
ports: ports:
- 5432:5432 - 5432:5432

View File

@@ -3,9 +3,11 @@ POSTGRES_PASSWORD=very_secure # Postgres ADMIN Passwort
POSTGRES_API_USER=api_nutzer # Postgres API Nutzername (für Arbeitszeitmessung) POSTGRES_API_USER=api_nutzer # Postgres API Nutzername (für Arbeitszeitmessung)
POSTGRES_API_PASS=password # Postgres API Passwort (für Arbeitszeitmessung) POSTGRES_API_PASS=password # Postgres API Passwort (für Arbeitszeitmessung)
POSTGRES_PATH=__ROOT__/DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...) POSTGRES_PATH=__ROOT__/DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...)
LOG_PATH=__ROOT__/logs # Pfad für Audit Logs
POSTGRES_DB=arbeitszeitmessung # Postgres Datenbank Name POSTGRES_DB=arbeitszeitmessung # Postgres Datenbank Name
POSTGRES_PORT=127.0.0.1:5432 # Postgres Port will not be exposed by default. regex:^[0-9]{1,5}$ POSTGRES_PORT=127.0.0.1:5432 # Postgres Port normalerweise nicht freigegeben. regex:^[0-9]{1,5}$
MIGRATIONS_PATH=__ROOT__/migrations # Pfad zu DB migrations (wenn nicht verändert wurde, bei default bleiben)
TZ=Europe/Berlin # Zeitzone TZ=Europe/Berlin # Zeitzone
API_TOKEN=dont_access # API Token für ESP Endpoints API_TOKEN=dont_access # API Token für ESP Endpoints
WEB_PORT=8000 # Port from which Arbeitszeitmessung should be accessable regex:^[0-9]{1,5}$ WEB_PORT=8000 # Port unter welchem Webserver erreichbar ist. regex:^[0-9]{1,5}$
LOG_PATH=__ROOT__/logs # Pfad für Audit Logs
LOG_LEVEL=warn # Welche Log-Nachrichten werden in der Konsole erscheinen