diff --git a/Backend/database.go b/Backend/database.go index 8394d1f..7f3abf5 100644 --- a/Backend/database.go +++ b/Backend/database.go @@ -5,7 +5,11 @@ import ( "arbeitszeitmessung/models" "database/sql" "fmt" + "log/slog" + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" _ "github.com/lib/pq" ) @@ -19,3 +23,29 @@ func OpenDatabase() (models.IDatabase, error) { connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", dbUser, dbPassword, dbHost, dbName, dbTz) return sql.Open("postgres", connStr) } + +func Migrate() error { + dbHost := helper.GetEnv("POSTGRES_HOST", "localhost") + dbName := helper.GetEnv("POSTGRES_DB", "arbeitszeitmessung") + // dbUser := helper.GetEnv("POSTGRES_USER", "api_nutzer") + dbPassword := helper.GetEnv("POSTGRES_PASSWORD", "password") + dbTz := helper.GetEnv("TZ", "Europe/Berlin") + + migrations := helper.GetEnv("MIGRATIONS_PATH", "../migrations") + + connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", "migrate", dbPassword, dbHost, dbName, dbTz) + m, err := migrate.New(fmt.Sprintf("file://%s", migrations), connStr) + if err != nil { + return err + } + + slog.Info("Connected to database. Running migrations now.") + + // Migrate all the way up ... + if err := m.Up(); err != nil && err != migrate.ErrNoChange { + return err + } + + slog.Info("Finished migrations starting webserver.") + return nil +} diff --git a/Backend/endpoints/time-create.go b/Backend/endpoints/time-create.go index 5f64103..f368b19 100644 --- a/Backend/endpoints/time-create.go +++ b/Backend/endpoints/time-create.go @@ -7,6 +7,7 @@ import ( "errors" "log" "net/http" + "time" ) // Relevant for arduino inputs -> creates new Booking from get and put method @@ -36,6 +37,7 @@ func createBooking(w http.ResponseWriter, r *http.Request) { } booking := (*models.Booking).FromUrlParams(nil, r.URL.Query()) + booking.Timestamp = time.Now() if booking.Verify() { err := booking.Insert() if errors.Is(models.SameBookingError{}, err) { diff --git a/Backend/endpoints/time.go b/Backend/endpoints/time.go index e3b0193..393044a 100644 --- a/Backend/endpoints/time.go +++ b/Backend/endpoints/time.go @@ -160,19 +160,13 @@ func updateBooking(w http.ResponseWriter, r *http.Request) { newBooking := (*models.Booking).New(nil, user.CardUID, 0, int16(check_in_out), 1) newBooking.Timestamp = timestamp - err = newBooking.InsertWithTimestamp() - if err != nil { - log.Printf("Error inserting booking %v -> %v\n", newBooking, err) + if newBooking.Verify() { + err = newBooking.InsertWithTimestamp() + if err != nil { + log.Printf("Error inserting booking %v -> %v\n", newBooking, err) + } } case "change": - // absenceType, err := strconv.Atoi(r.FormValue("absence")) - // if err != nil { - // log.Println("Error parsing absence type.", err) - // absenceType = 0 - // } - // if absenceType != 0 { - // createAbsence(absenceType, user, loc, r) - // } for index, possibleBooking := range r.PostForm { if len(index) > 7 && index[:7] == "booking" { booking_id, err := strconv.Atoi(index[8:]) diff --git a/Backend/go.sum b/Backend/go.sum index 0c9f849..705f796 100644 --- a/Backend/go.sum +++ b/Backend/go.sum @@ -4,8 +4,6 @@ github.com/Dadido3/go-typst v0.8.0 h1:uTLYprhkrBjwsCXRRuyYUFL0fpYHa2kIYoOB/CGqVN github.com/Dadido3/go-typst v0.8.0/go.mod h1:QYis9sT70u65kn1SkFfyPRmHsPxgoxWbAixwfPReOZA= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/a-h/templ v0.3.943 h1:o+mT/4yqhZ33F3ootBiHwaY4HM5EVaOJfIshvd5UNTY= -github.com/a-h/templ v0.3.943/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo= github.com/a-h/templ v0.3.960 h1:trshEpGa8clF5cdI39iY4ZrZG8Z/QixyzEyUnA7feTM= github.com/a-h/templ v0.3.960/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo= github.com/alexedwards/scs/v2 v2.8.0 h1:h31yUYoycPuL0zt14c0gd+oqxfRwIj6SOjHdKRZxhEw= @@ -39,6 +37,8 @@ github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4 github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -78,7 +78,6 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/wlbr/feiertage v1.10.0/go.mod h1:wJOHvMa6sI5L1FkrTOX/GSoO0hpK3S2YqGLPi8Q84I0= github.com/wlbr/feiertage v1.17.0 h1:AEck/iUQu19iU0xNEoSQTeSTGXF1Ju0tbAwEi/Lmwqk= github.com/wlbr/feiertage v1.17.0/go.mod h1:TVZgmSZgGW/jSxexZ56qdlR6cDj+F/FO8bkw8U6kYxM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= diff --git a/Backend/main.go b/Backend/main.go index 731173c..e0817dd 100644 --- a/Backend/main.go +++ b/Backend/main.go @@ -5,6 +5,7 @@ import ( "arbeitszeitmessung/helper" "arbeitszeitmessung/models" "context" + "database/sql" "log/slog" "net/http" "os" @@ -44,6 +45,15 @@ func main() { models.DB, err = OpenDatabase() if err != nil { slog.Error("Error while opening the database", "Error", err) + return + } + + defer models.DB.(*sql.DB).Close() + + err = Migrate() + if err != nil { + slog.Error("Failed to migrate the database to newest version", "Error", err) + return } fs := http.FileServer(http.Dir("./static")) @@ -70,6 +80,8 @@ func main() { serverSessionMiddleware := endpoints.Session.LoadAndSave(server) + serverSessionMiddleware = loggingMiddleware(serverSessionMiddleware) + // starting the http server slog.Info("Server is running at http://localhost:8080") slog.Error("Error starting Server", "Error", http.ListenAndServe(":8080", serverSessionMiddleware)) @@ -79,7 +91,24 @@ func ParamsMiddleware(next http.HandlerFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { queryParams := r.URL.Query() ctx := context.WithValue(r.Context(), "urlParams", queryParams) - slog.Debug("ParamsMiddleware added urlParams", slog.Any("urlParams", queryParams)) + if len(queryParams) > 0 { + slog.Debug("ParamsMiddleware added urlParams", slog.Any("urlParams", queryParams)) + } next.ServeHTTP(w, r.WithContext(ctx)) }) } + +func loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + // Log the method and the requested URL + slog.Info("Started request", slog.String("Method", r.Method), slog.String("Path", r.URL.Path)) + + // Call the next handler in the chain + next.ServeHTTP(w, r) + + // Log how long it took + slog.Info("Completet Request", slog.String("Time", time.Since(start).String())) + }) +} diff --git a/Backend/models/booking.go b/Backend/models/booking.go index 28b638a..463da92 100644 --- a/Backend/models/booking.go +++ b/Backend/models/booking.go @@ -128,6 +128,9 @@ func (b *Booking) InsertWithTimestamp() error { if b.Timestamp.IsZero() { return b.Insert() } + if !checkLastBooking(*b) { + return SameBookingError{} + } stmt, err := DB.Prepare((`INSERT INTO anwesenheit (card_uid, geraet_id, check_in_out, anwesenheit_typ, timestamp) VALUES ($1, $2, $3, $4, $5) RETURNING counter_id`)) if err != nil { return err @@ -242,12 +245,13 @@ func (b *Booking) Update(nb Booking) { func checkLastBooking(b Booking) bool { var check_in_out int - stmt, err := DB.Prepare((`SELECT check_in_out FROM "anwesenheit" WHERE "card_uid" = $1 ORDER BY "timestamp" DESC LIMIT 1;`)) + slog.Info("Checking with timestamp:", "timestamp", b.Timestamp.String()) + stmt, err := DB.Prepare((`SELECT check_in_out FROM "anwesenheit" WHERE "card_uid" = $1 AND "timestamp"::DATE <= $2::DATE ORDER BY "timestamp" DESC LIMIT 1;`)) if err != nil { log.Fatalf("Error preparing query: %v", err) return false } - err = stmt.QueryRow(b.CardUID).Scan(&check_in_out) + err = stmt.QueryRow(b.CardUID, b.Timestamp).Scan(&check_in_out) if err == sql.ErrNoRows { return true } diff --git a/Backend/models/iworkday.go b/Backend/models/iworkday.go index 6305842..1170aa1 100644 --- a/Backend/models/iworkday.go +++ b/Backend/models/iworkday.go @@ -49,19 +49,16 @@ func GetDays(user User, tsFrom, tsTo time.Time, orderedForward bool) []IWorkDay } for _, absentDay := range absences { - // Kurzarbeit should be integrated in workday + + // Check if there is already a day existingDay, ok := allDays[absentDay.Date().Format(time.DateOnly)] - if !ok { - allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay - continue - } switch { case absentDay.AbwesenheitTyp.WorkTime < 0: if workDay, ok := allDays[absentDay.Date().Format(time.DateOnly)].(*WorkDay); ok { workDay.kurzArbeit = true workDay.kurzArbeitAbsence = absentDay } - case !existingDay.IsEmpty(): + case ok && !existingDay.IsEmpty(): allDays[absentDay.Date().Format(time.DateOnly)] = NewCompondDay(absentDay.Date(), existingDay, &absentDay) default: allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay diff --git a/Backend/models/workDay.go b/Backend/models/workDay.go index c995275..b69a388 100644 --- a/Backend/models/workDay.go +++ b/Backend/models/workDay.go @@ -175,22 +175,39 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay { qStr, err := DB.Prepare(` WITH all_days AS ( SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date), - ordered_bookings AS ( - SELECT - a.timestamp::DATE AS work_date, - a.timestamp, - a.check_in_out, - a.counter_id, - a.anwesenheit_typ, - sat.anwesenheit_name AS anwesenheit_typ_name, - LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp, - LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check - FROM anwesenheit a - LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id - WHERE a.card_uid = $1 - AND a.timestamp::DATE >= $2 - AND a.timestamp::DATE <= $3 - ) + normalized_bookings AS ( + SELECT * + FROM ( + SELECT + a.card_uid, + a.timestamp, + a.timestamp::DATE AS work_date, + a.check_in_out, + a.counter_id, + a.anwesenheit_typ, + sat.anwesenheit_name AS anwesenheit_typ_name, + LAG(a.check_in_out) OVER ( + PARTITION BY a.card_uid, a.timestamp::DATE + ORDER BY a.timestamp + ) AS prev_check + FROM anwesenheit a + LEFT JOIN s_anwesenheit_typen sat + ON a.anwesenheit_typ = sat.anwesenheit_id + WHERE a.card_uid = $1 + AND a.timestamp::DATE >= $2 + AND a.timestamp::DATE <= $3 + ) t + WHERE prev_check IS NULL OR prev_check <> check_in_out + ), + ordered_bookings AS ( + SELECT + *, + LAG(timestamp) OVER ( + PARTITION BY card_uid, work_date + ORDER BY timestamp + ) AS prev_timestamp + FROM normalized_bookings + ) SELECT d.work_date, COALESCE(MIN(b.timestamp), NOW()) AS time_from, @@ -228,6 +245,62 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay { GROUP BY d.work_date ORDER BY d.work_date ASC;`) + // qStr, err := DB.Prepare(` + // WITH all_days AS ( + // SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date), + // ordered_bookings AS ( + // SELECT + // a.timestamp::DATE AS work_date, + // a.timestamp, + // a.check_in_out, + // a.counter_id, + // a.anwesenheit_typ, + // sat.anwesenheit_name AS anwesenheit_typ_name, + // LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp, + // LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check + // FROM anwesenheit a + // LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id + // WHERE a.card_uid = $1 + // AND a.timestamp::DATE >= $2 + // AND a.timestamp::DATE <= $3 + // ) + // SELECT + // d.work_date, + // COALESCE(MIN(b.timestamp), NOW()) AS time_from, + // COALESCE(MAX(b.timestamp), NOW()) AS time_to, + // COALESCE( + // EXTRACT(EPOCH FROM SUM( + // CASE + // WHEN b.prev_check IN (1, 3) AND b.check_in_out IN (2, 4, 254) + // THEN b.timestamp - b.prev_timestamp + // ELSE INTERVAL '0' + // END + // )), 0 + // ) AS total_work_seconds, + // COALESCE( + // EXTRACT(EPOCH FROM SUM( + // CASE + // WHEN b.prev_check IN (2, 4, 254) AND b.check_in_out IN (1, 3) + // THEN b.timestamp - b.prev_timestamp + // ELSE INTERVAL '0' + // END + // )), 0 + // ) AS total_pause_seconds, + // COALESCE(jsonb_agg(jsonb_build_object( + // 'check_in_out', b.check_in_out, + // 'timestamp', b.timestamp, + // 'counter_id', b.counter_id, + // 'anwesenheit_typ', b.anwesenheit_typ, + // 'anwesenheit_typ', jsonb_build_object( + // 'anwesenheit_id', b.anwesenheit_typ, + // 'anwesenheit_name', b.anwesenheit_typ_name + // ) + // ) ORDER BY b.timestamp), '[]'::jsonb) AS bookings + // FROM all_days d + // LEFT JOIN ordered_bookings b ON d.work_date = b.work_date + // GROUP BY d.work_date + // ORDER BY d.work_date ASC;`) + if err != nil { log.Println("Error preparing SQL statement", err) return workDays @@ -258,7 +331,7 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay { if len(workDay.Bookings) == 1 && workDay.Bookings[0].CounterId == 0 { workDay.Bookings = []Booking{} } - if len(workDay.Bookings) > 1 || !helper.IsWeekend(workDay.Date()) { + if len(workDay.Bookings) >= 1 || !helper.IsWeekend(workDay.Date()) { workDays = append(workDays, workDay) } } diff --git a/Cron/autoBackup.sh b/Cron/autoBackup.sh new file mode 100755 index 0000000..5430a1d --- /dev/null +++ b/Cron/autoBackup.sh @@ -0,0 +1,6 @@ +# cron-timing: 05 01 * * 1 +container_name="arbeitszeitmessung-main-db-1" +filename=backup-$(date '+%d%m%Y').sql +database_name=__DATABASE__ +docker exec $container_name pg_dump $database_name > /home/pi/arbeitszeitmessung-backup/$filename +echo "created backup file: "$filename diff --git a/Cron/autoHolidays.sh b/Cron/autoHolidays.sh new file mode 100755 index 0000000..50607b4 --- /dev/null +++ b/Cron/autoHolidays.sh @@ -0,0 +1,3 @@ +# Calls endpoint to write all public Holidays for the current year inside a database. +port=__PORT__ +curl localhost:$port/auto/feiertage diff --git a/Cron/autoLogout.sh b/Cron/autoLogout.sh new file mode 100755 index 0000000..75acb96 --- /dev/null +++ b/Cron/autoLogout.sh @@ -0,0 +1,4 @@ +# cron-timing: 55 23 * * * +# Calls endpoint to log out all users, still logged in for today +port=__PORT__ +curl localhost:$port/auto/logout diff --git a/DB/initdb/01_create_user.sh b/DB/initdb/01_create_user.sh new file mode 100755 index 0000000..9536063 --- /dev/null +++ b/DB/initdb/01_create_user.sh @@ -0,0 +1,55 @@ +#!/bin/bash +set -e # Exit on error + +echo "Creating PostgreSQL user and setting permissions... $POSTGRES_USER for API user $POSTGRES_API_USER" + + + +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE ROLE migrate LOGIN ENCRYPTED PASSWORD '$POSTGRES_PASSWORD'; + GRANT USAGE, CREATE ON SCHEMA public TO migrate; +EOSQL + +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + +# GRANT SELECT, INSERT, UPDATE ON anwesenheit, abwesenheit, user_password, wochen_report, s_feiertage TO $POSTGRES_API_USER; +# GRANT DELETE ON abwesenheit TO $POSTGRES_API_USER; +# GRANT SELECT ON s_personal_daten, s_abwesenheit_typen, s_anwesenheit_typen, s_feiertage TO $POSTGRES_API_USER; +# GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $POSTGRES_API_USER; +# EOSQL + +echo "User creation and permissions setup complete!" + + +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + +-- privilege roles +DO \$\$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'app_base') THEN + CREATE ROLE app_base NOLOGIN; + END IF; +END +\$\$; + +-- dynamic login role +DO \$\$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '$POSTGRES_API_USER') THEN + CREATE ROLE $POSTGRES_API_USER + LOGIN + ENCRYPTED PASSWORD '$POSTGRES_API_PASS'; + END IF; +END +\$\$; + +-- grant base privileges +GRANT app_base TO $POSTGRES_API_USER; +GRANT CONNECT ON DATABASE $POSTGRES_DB TO $POSTGRES_API_USER; +GRANT USAGE ON SCHEMA public TO $POSTGRES_API_USER; + +CREATE EXTENSION IF NOT EXISTS pgcrypto; + +EOSQL + +# psql -v ON_ERROR_STOP=1 --username root --dbname arbeitszeitmessung diff --git a/DB/initdb/02_sample_data.sql b/DB/initdb/02_sample_data.sql deleted file mode 100755 index ea7fcf7..0000000 --- a/DB/initdb/02_sample_data.sql +++ /dev/null @@ -1,8 +0,0 @@ -INSERT INTO "s_personal_daten" ("personal_nummer", "aktiv_beschaeftigt", "vorname", "nachname", "geburtsdatum", "plz", "adresse", "geschlecht", "card_uid", "hauptbeschaeftigungs_ort", "arbeitszeit_per_tag", "arbeitszeit_per_woche", "arbeitszeit_min_start", "arbeitszeit_max_ende", "vorgesetzter_pers_nr") VALUES -(123, 't', 'Kim', 'Mustermensch', '2003-02-01', '08963', 'Altenburger Str. 44A', 1, 'aaaa-aaaa', 1, 8, 40, '07:00:00', '20:00:00', 0); - -INSERT INTO "user_password" ("personal_nummer", "pass_hash") VALUES -(123, crypt('max_pass', gen_salt('bf'))); - -INSERT INTO "s_anwesenheit_typen" ("anwesenheit_id", "anwesenheit_name") VALUES (1, 'Büro'); -INSERT INTO "s_abwesenheit_typen" ("abwesenheit_id", "abwesenheit_name", "arbeitszeit_equivalent") VALUES (1, 'Urlaub', 100), (2, 'Krank', 100), (3, 'Kurzarbeit', -1), (4, 'Urlaub untertags', 50); diff --git a/DB/initdb/03_create_user.sh b/DB/initdb/03_create_user.sh deleted file mode 100755 index 42be086..0000000 --- a/DB/initdb/03_create_user.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -set -e # Exit on error - -echo "Creating PostgreSQL user and setting permissions... $POSTGRES_USER for API user $POSTGRES_API_USER" - -psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL - CREATE USER $POSTGRES_API_USER WITH ENCRYPTED PASSWORD '$POSTGRES_API_PASS'; -EOSQL - -psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL - GRANT CONNECT ON DATABASE $POSTGRES_DB TO $POSTGRES_API_USER; - GRANT USAGE ON SCHEMA public TO $POSTGRES_API_USER; - GRANT SELECT, INSERT, UPDATE ON anwesenheit, abwesenheit, user_password, wochen_report, s_feiertage TO $POSTGRES_API_USER; - GRANT DELETE ON abwesenheit TO $POSTGRES_API_USER; - GRANT SELECT ON s_personal_daten, s_abwesenheit_typen, s_anwesenheit_typen, s_feiertage TO $POSTGRES_API_USER; - GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $POSTGRES_API_USER; -EOSQL - -echo "User creation and permissions setup complete!" - -# psql -v ON_ERROR_STOP=1 --username root --dbname arbeitszeitmessung diff --git a/Docker/docker-compose.dev.yml b/Docker/docker-compose.dev.yml index b5a413b..2fc30dd 100644 --- a/Docker/docker-compose.dev.yml +++ b/Docker/docker-compose.dev.yml @@ -3,7 +3,7 @@ services: db: volumes: - ${POSTGRES_PATH}:/var/lib/postgresql/data - # - ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d + - ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d ports: - 5432:5432 diff --git a/Docker/env.example b/Docker/env.example index ab2147f..64f40a6 100644 --- a/Docker/env.example +++ b/Docker/env.example @@ -2,11 +2,12 @@ POSTGRES_USER=root # Postgres ADMIN Nutzername POSTGRES_PASSWORD=very_secure # Postgres ADMIN Passwort POSTGRES_API_USER=api_nutzer # Postgres API Nutzername (für Arbeitszeitmessung) POSTGRES_API_PASS=password # Postgres API Passwort (für Arbeitszeitmessung) -POSTGRES_PATH=../DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...) -LOG_PATH=../logs # Pfad für Logdatein +POSTGRES_PATH=__ROOT__/DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...) POSTGRES_DB=arbeitszeitmessung # Postgres Datenbank Name -POSTGRES_PORT=127.0.0.1:5432 # Postgres Port will not be exposed by default. regex:^[0-9]{1,5}$ +POSTGRES_PORT=127.0.0.1:5432 # Postgres Port normalerweise nicht freigegeben. regex:^[0-9]{1,5}$ +MIGRATIONS_PATH=__ROOT__/migrations # Pfad zu DB migrations (wenn nicht verändert wurde, bei default bleiben) TZ=Europe/Berlin # Zeitzone API_TOKEN=dont_access # API Token für ESP Endpoints -WEB_PORT=8000 # Port from which Arbeitszeitmessung should be accessable regex:^[0-9]{1,5}$ -TYPST_CONTAINER=arbeitszeitmessung-doc-creator # Name of the pdf compiler container +WEB_PORT=8000 # Port unter welchem Webserver erreichbar ist. regex:^[0-9]{1,5}$ +LOG_PATH=__ROOT__/logs # Pfad für Audit Logs +LOG_LEVEL=warn # Welche Log-Nachrichten werden in der Konsole erscheinen diff --git a/DocumentCreator/Dockerfile b/DocumentCreator/Dockerfile deleted file mode 100644 index e132f75..0000000 --- a/DocumentCreator/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ghcr.io/typst/typst:0.14.0 - -WORKDIR /app -COPY ./templates /app/templates -COPY ./static /app/static - -ENTRYPOINT ["sh", "-c", "while true; do sleep 3600; done"] diff --git a/DocumentCreator/static/logo.png b/DocumentCreator/static/logo.png deleted file mode 100644 index 53694f9..0000000 Binary files a/DocumentCreator/static/logo.png and /dev/null differ diff --git a/DocumentCreator/templates/abrechnung.typ b/DocumentCreator/templates/abrechnung.typ deleted file mode 100644 index 0735264..0000000 --- a/DocumentCreator/templates/abrechnung.typ +++ /dev/null @@ -1,97 +0,0 @@ -#let table-header(..headers) = { - table.header( - ..headers.pos().map(h => strong(h)) - ) -} - - -#let abrechnung(meta, days) = { - set page(paper: "a4", margin: (x:1.5cm, y:2.25cm), - footer:[#grid( - columns: (3fr, .65fr), - align: left + horizon, - inset: .5em, - [#meta.EmployeeName -- #meta.TimeRange], grid.cell(rowspan: 2)[#image("/static/logo.png")], - [Arbeitszeitrechnung maschinell erstellt am #meta.CurrentTimestamp], - ) - ]) - set text(font: "Noto Sans", size:10pt, fill: luma(10%)) - set table( - stroke: 0.5pt + luma(10%), - inset: .5em, - align: center + horizon, - ) - show text: it => { - if it.text == "0min"{ - text(oklch(70.8%, 0, 0deg))[#it] - }else if it.text.starts-with("-"){ - text(red)[#it] - }else{ - it - } - } - - - [= Abrechnung Arbeitszeit -- #meta.EmployeeName] - - [Zeitraum: #meta.TimeRange] - - table( - columns: (1fr, 1fr, 1fr, 1fr, 1fr, 1fr, .875fr, 1.25fr), - fill: (x, y) => - if y == 0 { oklch(87%, 0, 0deg) }, - table-header( - [Datum], [Kommen], [Gehen], [Arbeitsart], [Stunden], [Kurzarbeit], [Pause], [Überstunden] - ), - .. for day in days { - ( - [#day.Date], - if day.DayParts.len() == 0{ - table.cell(colspan: 3)[Keine Buchungen] - }else if day.DayParts.len() == 1 and not day.DayParts.first().IsWorkDay{ - table.cell(colspan: 3)[#day.DayParts.first().WorkType] - } - else { - table.cell(colspan: 3, inset: 0em)[ - #table( - columns: (1fr, 1fr, 1fr), - .. for Zeit in day.DayParts { - ( - if Zeit.IsWorkDay{ - ( - table.cell()[#Zeit.BookingFrom], - table.cell()[#Zeit.BookingTo], - table.cell()[#Zeit.WorkType], - ) - }else{ - (table.cell(colspan: 3)[#Zeit.WorkType],) - } - ) - }, - ) - ] - }, - [#day.Worktime], - [#day.Kurzarbeit], - [#day.Pausetime], - [#day.Overtime], - ) - if day.IsFriday { - ( table.cell(colspan: 8, fill: oklch(87%, 0, 0deg))[Wochenende], ) // note the trailing comma - } - } - ) - - table( - columns: (3fr, 1fr), - align: right, - inset: (x: .25em, y:.75em), - stroke: none, - table.hline(start: 0, end: 2, stroke: stroke(dash:"dashed", thickness:.5pt)), - [Arbeitszeit :], table.cell(align: left)[#meta.WorkTime], - [Kurzarbeit :], table.cell(align: left)[#meta.Kurzarbeit], - [Überstunden :], table.cell(align: left)[#meta.Overtime], - [Überstunden lfd. :],table.cell(align: left)[#meta.OvertimeTotal], - table.hline(start: 0, end: 2), -) -} diff --git a/install.sh b/install.sh index 114c7b5..c3c12e6 100755 --- a/install.sh +++ b/install.sh @@ -4,6 +4,10 @@ set -e envFile=Docker/.env envExample=Docker/env.example +autoBackupScript=Cron/autoBackup.sh +autoHolidaysScript=Cron/autoHolidays.sh +autoLogoutScript=Cron/autoLogout.sh + echo "Checking Docker installation..." if ! command -v docker >/dev/null 2>&1; then echo "Docker not found. Install Docker? [y/N]" @@ -18,12 +22,16 @@ else echo "Docker is already installed." fi +########################################################################### + echo "Checking Docker Compose..." if ! docker compose version >/dev/null 2>&1; then echo "Docker Compose plugin missing. You may need to update Docker." exit 1 fi +########################################################################### + echo "Preparing .env file..." if [ ! -f $envFile ]; then if [ -f $envExample ]; then @@ -44,6 +52,9 @@ if [ ! -f $envFile ]; then raw_val=$(printf "%s" "$rest" | sed 's/ *#.*//') default_value=$(printf "%s" "$raw_val" | sed 's/"//g') + # Replace __ROOT__ with script pwd + default_value="${default_value/__ROOT__/$(pwd)}" + regex="" if [[ "$comment" =~ regex:(.*)$ ]]; then regex="${BASH_REMATCH[1]}" @@ -96,13 +107,80 @@ else echo "Using existing .env. (found at $envFile)" fi +########################################################################### + +LOG_PATH=$(grep -E '^LOG_PATH=' $envFile | cut -d= -f2) +if [ -z "$LOG_PATH" ]; then + echo "LOG_PATH not found in .env using default $(pwd)/logs" + LOG_PATH=$(pwd)/logs +else + LOG_PATH=Docker/$LOG_PATH +fi +mkdir -p $LOG_PATH +echo "Created logs folder at $LOG_PATH" + +########################################################################### + +echo -e "\n\n" echo "Start containers with docker compose up -d? [y/N]" -read -r start_containers +read -r start_containersmkdi if [[ "$start_containers" =~ ^[Yy]$ ]]; then + cd Docker - mkdir ../logs docker compose up -d echo "Containers started." else echo "You can start them manually with: docker compose up -d" fi + +########################################################################### + +echo -e "\n\n" +echo "Setup Crontab for automatic logout, backup and holiday creation? [y/N]" +read -r setup_cron +if [[ "$setup_cron" =~ ^[Yy]$ ]]; then + WEB_PORT=$(grep -E '^WEB_PORT=' $envFile | cut -d= -f2) + if [ -z "$WEB_PORT" ]; then + echo "WEB_PORT not found in .env using default 8000" + WEB_PORT=8000 + fi + + POSTGRES_DB=$(grep -E '^POSTGRES_DB=' $envFile | cut -d= -f2) + if [ -z "$POSTGRES_DB" ]; then + echo "arbeitszeitmessung not found in .env using default arbeitszeitmessung" + POSTGRES_DB="arbeitszeitmessung" + fi + + sed -i "s/__PORT__/$WEB_PORT/" $autoHolidaysScript + sed -i "s/__PORT__/$WEB_PORT/" $autoLogoutScript + sed -i "s/__DATABASE__/$POSTGRES_DB/" $autoBackupScript + + chmod +x $autoBackupScript $autoHolidaysScript $autoLogoutScript + + # echo "Scripts build with PORT=$WEB_PORT and DATABSE=$POSTGRES_DB!" + echo "Adding rules to crontab." + + cron_commands=$(mktemp /tmp/arbeitszeitmessung-cron.XXX) + + for file in Cron/*; do + cron_timing=$(grep -E '^# cron-timing:' "$file" | sed 's/^# cron-timing:[[:space:]]*//') + + if [ -z "$cron_timing" ]; then + echo "No cron-timing found in $file, so it's not added to crontab." + continue + fi + + ( crontab -l ; echo "$cron_timing $(pwd)/$file" )| awk '!x[$0]++' | crontab - + echo "Added entry to crontab: $cron_timing $(pwd)/$file." + done + + if systemctl is-active --quiet cron.service ; then + echo "cron.service is running. Everything should be fine now." + else + echo "cron.service is not running. Please start and enable cron.service." + echo "For how to start a service, see: https://wiki.ubuntuusers.de/systemd/systemctl UNITNAME will be cron.service" + fi + +else + echo "Please setup cron manually by executing crontab -e and adding all files from inside the Cron directory!" +fi diff --git a/migrations/20250901201159_initial.up.sql b/migrations/20250901201159_initial.up.sql index 5144756..23f4a3f 100644 --- a/migrations/20250901201159_initial.up.sql +++ b/migrations/20250901201159_initial.up.sql @@ -1,3 +1,11 @@ +ALTER DEFAULT PRIVILEGES FOR ROLE migrate +IN SCHEMA public +GRANT SELECT ON TABLES TO app_base; + +ALTER DEFAULT PRIVILEGES FOR ROLE migrate +IN SCHEMA public +GRANT USAGE, SELECT ON SEQUENCES TO app_base; + -- create "abwesenheit" table CREATE TABLE "abwesenheit" ( "counter_id" bigserial NOT NULL, @@ -6,6 +14,7 @@ CREATE TABLE "abwesenheit" ( "datum" timestamptz NULL DEFAULT (now())::date, PRIMARY KEY ("counter_id") ); + -- create "anwesenheit" table CREATE TABLE "anwesenheit" ( "counter_id" bigserial NOT NULL, @@ -55,3 +64,6 @@ CREATE TABLE "wochen_report" ( PRIMARY KEY ("id"), CONSTRAINT "wochen_report_personal_nummer_woche_start_key" UNIQUE ("personal_nummer", "woche_start") ); + +GRANT INSERT, UPDATE ON abwesenheit, anwesenheit, wochen_report, user_password TO app_base; +GRANT DELETE ON abwesenheit to app_base; diff --git a/migrations/20250901201710_triggers_extension.down.sql b/migrations/20250901201710_triggers_extension.down.sql index 4260320..9bca733 100644 --- a/migrations/20250901201710_triggers_extension.down.sql +++ b/migrations/20250901201710_triggers_extension.down.sql @@ -3,8 +3,3 @@ DROP FUNCTION update_zuletzt_geandert; DROP TRIGGER IF EXISTS pass_hash_update ON user_password; - - --- revert: Adds crypto extension - -DROP EXTENSION IF EXISTS pgcrypto; diff --git a/migrations/20250901201710_triggers_extension.up.sql b/migrations/20250901201710_triggers_extension.up.sql index 5e8ad1f..0c9d936 100644 --- a/migrations/20250901201710_triggers_extension.up.sql +++ b/migrations/20250901201710_triggers_extension.up.sql @@ -17,5 +17,3 @@ FOR EACH ROW EXECUTE FUNCTION update_zuletzt_geandert(); -- Adds crypto extension - -CREATE EXTENSION IF NOT EXISTS pgcrypto; diff --git a/migrations/20251217215955_feiertage.up.sql b/migrations/20251217215955_feiertage.up.sql index 69072a0..735d7f1 100644 --- a/migrations/20251217215955_feiertage.up.sql +++ b/migrations/20251217215955_feiertage.up.sql @@ -11,3 +11,5 @@ CREATE TABLE "s_feiertage" ( ); -- create index "feiertage_unique_pro_jahr" to table: "s_feiertage" CREATE UNIQUE INDEX "feiertage_unique_pro_jahr" ON "s_feiertage" ((EXTRACT(year FROM datum)), "name"); + +GRANT INSERT, UPDATE ON s_feiertage TO app_base; diff --git a/DB/initdb/01_schema.sql b/schema.sql similarity index 100% rename from DB/initdb/01_schema.sql rename to schema.sql