14 Commits

Author SHA1 Message Date
18b2cbc074 Merge pull request 'dev/fix-70' (#71) from dev/fix-70 into main
Some checks failed
Tests / Run Go Tests (push) Failing after 48s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m35s
Reviewed-on: #71
2026-01-18 18:11:00 +01:00
560c539b19 fixed minor bugs + added loggin middleware
All checks were successful
Tests / Run Go Tests (push) Successful in 1m26s
2026-01-18 00:07:54 +01:00
502955d32f added migrations back + removed distracting log message 2026-01-17 22:53:35 +01:00
cfd77ae28d fixed #70 + made db script ignore double bookings
Some checks failed
Tests / Run Go Tests (push) Failing after 2m0s
2026-01-17 22:25:25 +01:00
1daf4db167 fixed premission problem after making migrations executed by go
All checks were successful
Tests / Run Go Tests (push) Successful in 1m44s
2026-01-17 21:41:46 +01:00
3322f7e9bc updated install script with cron jobs
All checks were successful
Tests / Run Go Tests (push) Successful in 1m35s
2026-01-07 19:55:00 +01:00
4b9824c714 fixed sonarqube errors
All checks were successful
Tests / Run Go Tests (push) Successful in 2m28s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m15s
2026-01-05 12:19:40 +01:00
7ac6c5f9b8 Merge pull request 'dev/pdf' (#69) from dev/pdf into main
Some checks failed
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 4m43s
Tests / Run Go Tests (push) Failing after 3m44s
Reviewed-on: #69
2026-01-05 12:14:46 +01:00
f9fc3d91d1 improved logging + fixed error from log folder
Some checks failed
Tests / Run Go Tests (push) Failing after 3m7s
2026-01-05 12:13:49 +01:00
f0de9961dc removed doc-creator 2026-01-05 12:13:24 +01:00
4ded8632e5 fixed overtime calc issue
Some checks failed
Arbeitszeitmessung Deploy / Build Document Creator (push) Successful in 2m30s
Tests / Run Go Tests (push) Failing after 2m35s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m19s
2026-01-05 00:39:00 +01:00
b2af48463c updated git action to build right image
Some checks failed
Arbeitszeitmessung Deploy / Build Document Creator (push) Successful in 2m11s
Tests / Run Go Tests (push) Failing after 2m46s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m5s
2026-01-05 00:02:33 +01:00
0b72147e02 Merge pull request 'version 2.0.0 rc' (#68) from dev/feiertage into main
Some checks failed
Tests / Run Go Tests (push) Failing after 2m29s
Arbeitszeitmessung Deploy / Build Document Creator (push) Successful in 4m1s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 4m4s
Reviewed-on: #68
2026-01-04 23:56:04 +01:00
d1b46cf894 fixed sonarqube errors
Some checks failed
Tests / Run Go Tests (push) Failing after 1m33s
2026-01-04 23:53:16 +01:00
34 changed files with 409 additions and 147 deletions

View File

@@ -39,34 +39,34 @@ jobs:
push: true push: true
context: Backend context: Backend
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
document-creator: # document-creator:
name: Build Document Creator # name: Build Document Creator
runs-on: ubuntu-latest # runs-on: ubuntu-latest
steps: # steps:
- name: Checkout # - name: Checkout
uses: actions/checkout@v4 # uses: actions/checkout@v4
- name: Login to GitHub Container Registry # - name: Login to GitHub Container Registry
uses: docker/login-action@v3 # uses: docker/login-action@v3
with: # with:
registry: git.letsstein.de # registry: git.letsstein.de
username: ${{ gitea.actor }} # username: ${{ gitea.actor }}
password: ${{ secrets.REGISTRY_TOKEN }} # password: ${{ secrets.REGISTRY_TOKEN }}
- name: Set up QEMU # - name: Set up QEMU
uses: docker/setup-qemu-action@v3 # uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx # - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 # uses: docker/setup-buildx-action@v3
- name: Extract metadata (tags, labels) for Docker # - name: Extract metadata (tags, labels) for Docker
id: meta # id: meta
uses: docker/metadata-action@v5 # uses: docker/metadata-action@v5
with: # with:
images: git.letsstein.de/tom/arbeitszeitmessung-doc-creator # images: git.letsstein.de/tom/arbeitszeitmessung-doc-creator
tags: | # tags: |
type=raw,value=latest # type=raw,value=latest
type=pep440,pattern={{version}} # type=pep440,pattern={{version}}
- name: Build and push # - name: Build and push
uses: docker/build-push-action@v6 # uses: docker/build-push-action@v6
with: # with:
platforms: linux/amd64,linux/arm64 # platforms: linux/amd64,linux/arm64
push: true # push: true
context: Backend # context: DocumentCreator
tags: ${{ steps.meta.outputs.tags }} # tags: ${{ steps.meta.outputs.tags }}

View File

@@ -14,9 +14,11 @@ COPY . .
RUN go build -o server . RUN go build -o server .
FROM alpine:3.22 FROM alpine:3.22
RUN apk add --no-cache tzdata RUN apk add --no-cache tzdata typst
WORKDIR /app WORKDIR /app
COPY --from=build /app/server /app/server COPY --from=build /app/server /app/server
COPY ./doc/static /doc/static
COPY ./doc/templates /doc/templates
COPY /static /app/static COPY /static /app/static
ENTRYPOINT ["./server"] ENTRYPOINT ["./server"]

View File

@@ -5,7 +5,11 @@ import (
"arbeitszeitmessung/models" "arbeitszeitmessung/models"
"database/sql" "database/sql"
"fmt" "fmt"
"log/slog"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/lib/pq" _ "github.com/lib/pq"
) )
@@ -19,3 +23,29 @@ func OpenDatabase() (models.IDatabase, error) {
connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", dbUser, dbPassword, dbHost, dbName, dbTz) connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", dbUser, dbPassword, dbHost, dbName, dbTz)
return sql.Open("postgres", connStr) return sql.Open("postgres", connStr)
} }
func Migrate() error {
dbHost := helper.GetEnv("POSTGRES_HOST", "localhost")
dbName := helper.GetEnv("POSTGRES_DB", "arbeitszeitmessung")
// dbUser := helper.GetEnv("POSTGRES_USER", "api_nutzer")
dbPassword := helper.GetEnv("POSTGRES_PASSWORD", "password")
dbTz := helper.GetEnv("TZ", "Europe/Berlin")
migrations := helper.GetEnv("MIGRATIONS_PATH", "../migrations")
connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", "migrate", dbPassword, dbHost, dbName, dbTz)
m, err := migrate.New(fmt.Sprintf("file://%s", migrations), connStr)
if err != nil {
return err
}
slog.Info("Connected to database. Running migrations now.")
// Migrate all the way up ...
if err := m.Up(); err != nil && err != migrate.ErrNoChange {
return err
}
slog.Info("Finished migrations starting webserver.")
return nil
}

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -21,7 +21,7 @@ func LogoutHandler(w http.ResponseWriter, r *http.Request) {
func autoLogout(w http.ResponseWriter) { func autoLogout(w http.ResponseWriter) {
users, err := models.GetAllUsers() users, err := models.GetAllUsers()
var logged_out_users []models.User var loggedOutUsers []models.User
if err != nil { if err != nil {
fmt.Printf("Error getting user list %v\n", err) fmt.Printf("Error getting user list %v\n", err)
} }
@@ -31,7 +31,7 @@ func autoLogout(w http.ResponseWriter) {
if err != nil { if err != nil {
fmt.Printf("Error logging out user %v\n", err) fmt.Printf("Error logging out user %v\n", err)
} else { } else {
logged_out_users = append(logged_out_users, user) loggedOutUsers = append(loggedOutUsers, user)
log.Printf("Automaticaly logged out user %s, %s ", user.Name, user.Vorname) log.Printf("Automaticaly logged out user %s, %s ", user.Name, user.Vorname)
} }
} }
@@ -39,6 +39,6 @@ func autoLogout(w http.ResponseWriter) {
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(logged_out_users) json.NewEncoder(w).Encode(loggedOutUsers)
} }

View File

@@ -198,8 +198,9 @@ func renderPDFSingle(data []typstData) (bytes.Buffer, error) {
var markup bytes.Buffer var markup bytes.Buffer
var output bytes.Buffer var output bytes.Buffer
typstCLI := typst.DockerExec{ typstCLI := typst.CLI{
ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"), WorkingDirectory: "/doc/",
// ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"),
} }
if err := typst.InjectValues(&markup, map[string]any{"data": data}); err != nil { if err := typst.InjectValues(&markup, map[string]any{"data": data}); err != nil {
@@ -225,8 +226,9 @@ func renderPDFSingle(data []typstData) (bytes.Buffer, error) {
func renderPDFMulti(data []typstData) ([]bytes.Buffer, error) { func renderPDFMulti(data []typstData) ([]bytes.Buffer, error) {
var outputMulti []bytes.Buffer var outputMulti []bytes.Buffer
typstRender := typst.DockerExec{ typstRender := typst.CLI{
ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"), WorkingDirectory: "/doc/",
// ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"),
} }
for _, d := range data { for _, d := range data {

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"log" "log"
"net/http" "net/http"
"time"
) )
// Relevant for arduino inputs -> creates new Booking from get and put method // Relevant for arduino inputs -> creates new Booking from get and put method
@@ -36,6 +37,7 @@ func createBooking(w http.ResponseWriter, r *http.Request) {
} }
booking := (*models.Booking).FromUrlParams(nil, r.URL.Query()) booking := (*models.Booking).FromUrlParams(nil, r.URL.Query())
booking.Timestamp = time.Now()
if booking.Verify() { if booking.Verify() {
err := booking.Insert() err := booking.Insert()
if errors.Is(models.SameBookingError{}, err) { if errors.Is(models.SameBookingError{}, err) {

View File

@@ -94,7 +94,7 @@ func getBookings(w http.ResponseWriter, r *http.Request) {
if day.Date().Before(lastSub) { if day.Date().Before(lastSub) {
continue continue
} }
aggregatedOvertime += day.GetOvertime(user, models.WorktimeBaseDay, false) aggregatedOvertime += day.GetOvertime(user, models.WorktimeBaseDay, true)
} }
if reportedOvertime, err := user.GetReportedOvertime(); err == nil { if reportedOvertime, err := user.GetReportedOvertime(); err == nil {
user.Overtime = (reportedOvertime + aggregatedOvertime).Round(time.Minute) user.Overtime = (reportedOvertime + aggregatedOvertime).Round(time.Minute)
@@ -160,19 +160,13 @@ func updateBooking(w http.ResponseWriter, r *http.Request) {
newBooking := (*models.Booking).New(nil, user.CardUID, 0, int16(check_in_out), 1) newBooking := (*models.Booking).New(nil, user.CardUID, 0, int16(check_in_out), 1)
newBooking.Timestamp = timestamp newBooking.Timestamp = timestamp
if newBooking.Verify() {
err = newBooking.InsertWithTimestamp() err = newBooking.InsertWithTimestamp()
if err != nil { if err != nil {
log.Printf("Error inserting booking %v -> %v\n", newBooking, err) log.Printf("Error inserting booking %v -> %v\n", newBooking, err)
} }
}
case "change": case "change":
// absenceType, err := strconv.Atoi(r.FormValue("absence"))
// if err != nil {
// log.Println("Error parsing absence type.", err)
// absenceType = 0
// }
// if absenceType != 0 {
// createAbsence(absenceType, user, loc, r)
// }
for index, possibleBooking := range r.PostForm { for index, possibleBooking := range r.PostForm {
if len(index) > 7 && index[:7] == "booking" { if len(index) > 7 && index[:7] == "booking" {
booking_id, err := strconv.Atoi(index[8:]) booking_id, err := strconv.Atoi(index[8:])

View File

@@ -4,8 +4,6 @@ github.com/Dadido3/go-typst v0.8.0 h1:uTLYprhkrBjwsCXRRuyYUFL0fpYHa2kIYoOB/CGqVN
github.com/Dadido3/go-typst v0.8.0/go.mod h1:QYis9sT70u65kn1SkFfyPRmHsPxgoxWbAixwfPReOZA= github.com/Dadido3/go-typst v0.8.0/go.mod h1:QYis9sT70u65kn1SkFfyPRmHsPxgoxWbAixwfPReOZA=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/a-h/templ v0.3.943 h1:o+mT/4yqhZ33F3ootBiHwaY4HM5EVaOJfIshvd5UNTY=
github.com/a-h/templ v0.3.943/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/a-h/templ v0.3.960 h1:trshEpGa8clF5cdI39iY4ZrZG8Z/QixyzEyUnA7feTM= github.com/a-h/templ v0.3.960 h1:trshEpGa8clF5cdI39iY4ZrZG8Z/QixyzEyUnA7feTM=
github.com/a-h/templ v0.3.960/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo= github.com/a-h/templ v0.3.960/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/alexedwards/scs/v2 v2.8.0 h1:h31yUYoycPuL0zt14c0gd+oqxfRwIj6SOjHdKRZxhEw= github.com/alexedwards/scs/v2 v2.8.0 h1:h31yUYoycPuL0zt14c0gd+oqxfRwIj6SOjHdKRZxhEw=
@@ -39,6 +37,8 @@ github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4
github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY= github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -78,7 +78,6 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/wlbr/feiertage v1.10.0/go.mod h1:wJOHvMa6sI5L1FkrTOX/GSoO0hpK3S2YqGLPi8Q84I0=
github.com/wlbr/feiertage v1.17.0 h1:AEck/iUQu19iU0xNEoSQTeSTGXF1Ju0tbAwEi/Lmwqk= github.com/wlbr/feiertage v1.17.0 h1:AEck/iUQu19iU0xNEoSQTeSTGXF1Ju0tbAwEi/Lmwqk=
github.com/wlbr/feiertage v1.17.0/go.mod h1:TVZgmSZgGW/jSxexZ56qdlR6cDj+F/FO8bkw8U6kYxM= github.com/wlbr/feiertage v1.17.0/go.mod h1:TVZgmSZgGW/jSxexZ56qdlR6cDj+F/FO8bkw8U6kYxM=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=

View File

@@ -5,6 +5,7 @@ import (
"arbeitszeitmessung/helper" "arbeitszeitmessung/helper"
"arbeitszeitmessung/models" "arbeitszeitmessung/models"
"context" "context"
"database/sql"
"log/slog" "log/slog"
"net/http" "net/http"
"os" "os"
@@ -17,7 +18,16 @@ import (
func main() { func main() {
var err error var err error
var logLevel slog.LevelVar var logLevel slog.LevelVar
switch helper.GetEnv("LOG_LEVEL", "warn") {
case "debug":
logLevel.Set(slog.LevelDebug)
case "info":
logLevel.Set(slog.LevelInfo)
case "warn":
logLevel.Set(slog.LevelWarn) logLevel.Set(slog.LevelWarn)
case "error":
logLevel.Set(slog.LevelError)
}
logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: &logLevel})) logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: &logLevel}))
slog.SetDefault(logger) slog.SetDefault(logger)
@@ -35,6 +45,15 @@ func main() {
models.DB, err = OpenDatabase() models.DB, err = OpenDatabase()
if err != nil { if err != nil {
slog.Error("Error while opening the database", "Error", err) slog.Error("Error while opening the database", "Error", err)
return
}
defer models.DB.(*sql.DB).Close()
err = Migrate()
if err != nil {
slog.Error("Failed to migrate the database to newest version", "Error", err)
return
} }
fs := http.FileServer(http.Dir("./static")) fs := http.FileServer(http.Dir("./static"))
@@ -61,6 +80,8 @@ func main() {
serverSessionMiddleware := endpoints.Session.LoadAndSave(server) serverSessionMiddleware := endpoints.Session.LoadAndSave(server)
serverSessionMiddleware = loggingMiddleware(serverSessionMiddleware)
// starting the http server // starting the http server
slog.Info("Server is running at http://localhost:8080") slog.Info("Server is running at http://localhost:8080")
slog.Error("Error starting Server", "Error", http.ListenAndServe(":8080", serverSessionMiddleware)) slog.Error("Error starting Server", "Error", http.ListenAndServe(":8080", serverSessionMiddleware))
@@ -70,7 +91,24 @@ func ParamsMiddleware(next http.HandlerFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
queryParams := r.URL.Query() queryParams := r.URL.Query()
ctx := context.WithValue(r.Context(), "urlParams", queryParams) ctx := context.WithValue(r.Context(), "urlParams", queryParams)
if len(queryParams) > 0 {
slog.Debug("ParamsMiddleware added urlParams", slog.Any("urlParams", queryParams)) slog.Debug("ParamsMiddleware added urlParams", slog.Any("urlParams", queryParams))
}
next.ServeHTTP(w, r.WithContext(ctx)) next.ServeHTTP(w, r.WithContext(ctx))
}) })
} }
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
// Log the method and the requested URL
slog.Info("Started request", slog.String("Method", r.Method), slog.String("Path", r.URL.Path))
// Call the next handler in the chain
next.ServeHTTP(w, r)
// Log how long it took
slog.Info("Completet Request", slog.String("Time", time.Since(start).String()))
})
}

View File

@@ -128,6 +128,9 @@ func (b *Booking) InsertWithTimestamp() error {
if b.Timestamp.IsZero() { if b.Timestamp.IsZero() {
return b.Insert() return b.Insert()
} }
if !checkLastBooking(*b) {
return SameBookingError{}
}
stmt, err := DB.Prepare((`INSERT INTO anwesenheit (card_uid, geraet_id, check_in_out, anwesenheit_typ, timestamp) VALUES ($1, $2, $3, $4, $5) RETURNING counter_id`)) stmt, err := DB.Prepare((`INSERT INTO anwesenheit (card_uid, geraet_id, check_in_out, anwesenheit_typ, timestamp) VALUES ($1, $2, $3, $4, $5) RETURNING counter_id`))
if err != nil { if err != nil {
return err return err
@@ -242,12 +245,13 @@ func (b *Booking) Update(nb Booking) {
func checkLastBooking(b Booking) bool { func checkLastBooking(b Booking) bool {
var check_in_out int var check_in_out int
stmt, err := DB.Prepare((`SELECT check_in_out FROM "anwesenheit" WHERE "card_uid" = $1 ORDER BY "timestamp" DESC LIMIT 1;`)) slog.Info("Checking with timestamp:", "timestamp", b.Timestamp.String())
stmt, err := DB.Prepare((`SELECT check_in_out FROM "anwesenheit" WHERE "card_uid" = $1 AND "timestamp"::DATE <= $2::DATE ORDER BY "timestamp" DESC LIMIT 1;`))
if err != nil { if err != nil {
log.Fatalf("Error preparing query: %v", err) log.Fatalf("Error preparing query: %v", err)
return false return false
} }
err = stmt.QueryRow(b.CardUID).Scan(&check_in_out) err = stmt.QueryRow(b.CardUID, b.Timestamp).Scan(&check_in_out)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return true return true
} }

View File

@@ -28,7 +28,7 @@ func (c *CompoundDay) GetWorkDay() WorkDay {
// IsEmpty implements [IWorkDay]. // IsEmpty implements [IWorkDay].
func (c *CompoundDay) IsEmpty() bool { func (c *CompoundDay) IsEmpty() bool {
return len(c.DayParts) > 0 return len(c.DayParts) == 0
} }
// Date implements [IWorkDay]. // Date implements [IWorkDay].
@@ -47,12 +47,16 @@ func (c *CompoundDay) GetDayProgress(u User) int8 {
// GetOvertime implements [IWorkDay]. // GetOvertime implements [IWorkDay].
func (c *CompoundDay) GetOvertime(u User, base WorktimeBase, includeKurzarbeit bool) time.Duration { func (c *CompoundDay) GetOvertime(u User, base WorktimeBase, includeKurzarbeit bool) time.Duration {
work := c.GetWorktime(u, base, includeKurzarbeit)
var targetHours time.Duration
var overtime time.Duration switch base {
for _, day := range c.DayParts { case WorktimeBaseDay:
overtime += day.GetOvertime(u, base, includeKurzarbeit) targetHours = u.ArbeitszeitProTagFrac(1)
case WorktimeBaseWeek:
targetHours = u.ArbeitszeitProWocheFrac(.2)
} }
return overtime return (work - targetHours).Round(time.Minute)
} }
// GetPausetime implements [IWorkDay]. // GetPausetime implements [IWorkDay].

View File

@@ -49,19 +49,16 @@ func GetDays(user User, tsFrom, tsTo time.Time, orderedForward bool) []IWorkDay
} }
for _, absentDay := range absences { for _, absentDay := range absences {
// Kurzarbeit should be integrated in workday
// Check if there is already a day
existingDay, ok := allDays[absentDay.Date().Format(time.DateOnly)] existingDay, ok := allDays[absentDay.Date().Format(time.DateOnly)]
if !ok {
allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay
continue
}
switch { switch {
case absentDay.AbwesenheitTyp.WorkTime < 0: case absentDay.AbwesenheitTyp.WorkTime < 0:
if workDay, ok := allDays[absentDay.Date().Format(time.DateOnly)].(*WorkDay); ok { if workDay, ok := allDays[absentDay.Date().Format(time.DateOnly)].(*WorkDay); ok {
workDay.kurzArbeit = true workDay.kurzArbeit = true
workDay.kurzArbeitAbsence = absentDay workDay.kurzArbeitAbsence = absentDay
} }
case !existingDay.IsEmpty(): case ok && !existingDay.IsEmpty():
allDays[absentDay.Date().Format(time.DateOnly)] = NewCompondDay(absentDay.Date(), existingDay, &absentDay) allDays[absentDay.Date().Format(time.DateOnly)] = NewCompondDay(absentDay.Date(), existingDay, &absentDay)
default: default:
allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay

View File

@@ -175,21 +175,38 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
qStr, err := DB.Prepare(` qStr, err := DB.Prepare(`
WITH all_days AS ( WITH all_days AS (
SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date), SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date),
ordered_bookings AS ( normalized_bookings AS (
SELECT *
FROM (
SELECT SELECT
a.timestamp::DATE AS work_date, a.card_uid,
a.timestamp, a.timestamp,
a.timestamp::DATE AS work_date,
a.check_in_out, a.check_in_out,
a.counter_id, a.counter_id,
a.anwesenheit_typ, a.anwesenheit_typ,
sat.anwesenheit_name AS anwesenheit_typ_name, sat.anwesenheit_name AS anwesenheit_typ_name,
LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp, LAG(a.check_in_out) OVER (
LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check PARTITION BY a.card_uid, a.timestamp::DATE
ORDER BY a.timestamp
) AS prev_check
FROM anwesenheit a FROM anwesenheit a
LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id LEFT JOIN s_anwesenheit_typen sat
ON a.anwesenheit_typ = sat.anwesenheit_id
WHERE a.card_uid = $1 WHERE a.card_uid = $1
AND a.timestamp::DATE >= $2 AND a.timestamp::DATE >= $2
AND a.timestamp::DATE <= $3 AND a.timestamp::DATE <= $3
) t
WHERE prev_check IS NULL OR prev_check <> check_in_out
),
ordered_bookings AS (
SELECT
*,
LAG(timestamp) OVER (
PARTITION BY card_uid, work_date
ORDER BY timestamp
) AS prev_timestamp
FROM normalized_bookings
) )
SELECT SELECT
d.work_date, d.work_date,
@@ -228,6 +245,62 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
GROUP BY d.work_date GROUP BY d.work_date
ORDER BY d.work_date ASC;`) ORDER BY d.work_date ASC;`)
// qStr, err := DB.Prepare(`
// WITH all_days AS (
// SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date),
// ordered_bookings AS (
// SELECT
// a.timestamp::DATE AS work_date,
// a.timestamp,
// a.check_in_out,
// a.counter_id,
// a.anwesenheit_typ,
// sat.anwesenheit_name AS anwesenheit_typ_name,
// LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp,
// LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check
// FROM anwesenheit a
// LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id
// WHERE a.card_uid = $1
// AND a.timestamp::DATE >= $2
// AND a.timestamp::DATE <= $3
// )
// SELECT
// d.work_date,
// COALESCE(MIN(b.timestamp), NOW()) AS time_from,
// COALESCE(MAX(b.timestamp), NOW()) AS time_to,
// COALESCE(
// EXTRACT(EPOCH FROM SUM(
// CASE
// WHEN b.prev_check IN (1, 3) AND b.check_in_out IN (2, 4, 254)
// THEN b.timestamp - b.prev_timestamp
// ELSE INTERVAL '0'
// END
// )), 0
// ) AS total_work_seconds,
// COALESCE(
// EXTRACT(EPOCH FROM SUM(
// CASE
// WHEN b.prev_check IN (2, 4, 254) AND b.check_in_out IN (1, 3)
// THEN b.timestamp - b.prev_timestamp
// ELSE INTERVAL '0'
// END
// )), 0
// ) AS total_pause_seconds,
// COALESCE(jsonb_agg(jsonb_build_object(
// 'check_in_out', b.check_in_out,
// 'timestamp', b.timestamp,
// 'counter_id', b.counter_id,
// 'anwesenheit_typ', b.anwesenheit_typ,
// 'anwesenheit_typ', jsonb_build_object(
// 'anwesenheit_id', b.anwesenheit_typ,
// 'anwesenheit_name', b.anwesenheit_typ_name
// )
// ) ORDER BY b.timestamp), '[]'::jsonb) AS bookings
// FROM all_days d
// LEFT JOIN ordered_bookings b ON d.work_date = b.work_date
// GROUP BY d.work_date
// ORDER BY d.work_date ASC;`)
if err != nil { if err != nil {
log.Println("Error preparing SQL statement", err) log.Println("Error preparing SQL statement", err)
return workDays return workDays
@@ -258,7 +331,7 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
if len(workDay.Bookings) == 1 && workDay.Bookings[0].CounterId == 0 { if len(workDay.Bookings) == 1 && workDay.Bookings[0].CounterId == 0 {
workDay.Bookings = []Booking{} workDay.Bookings = []Booking{}
} }
if len(workDay.Bookings) > 1 || !helper.IsWeekend(workDay.Date()) { if len(workDay.Bookings) >= 1 || !helper.IsWeekend(workDay.Date()) {
workDays = append(workDays, workDay) workDays = append(workDays, workDay)
} }
} }

View File

@@ -94,8 +94,8 @@ function checkAll(pattern, state) {
} }
} }
bookingForms = document.querySelectorAll("form.bookings"); const bookingForms = document.querySelectorAll("form.bookings");
for (form of bookingForms) { for (let form of bookingForms) {
let selectKommenInput = form.querySelector("input[name='select_kommen']"); let selectKommenInput = form.querySelector("input[name='select_kommen']");
let kommenGehenSelector = form.querySelector("select"); let kommenGehenSelector = form.querySelector("select");
if (selectKommenInput) { if (selectKommenInput) {

View File

@@ -115,7 +115,7 @@ templ defaultDayComponent(day models.IWorkDay) {
if pause > 0 { if pause > 0 {
<p class="text-neutral-500 flex flex-row items-center"><span class="icon-[material-symbols-light--motion-photos-paused-outline]"></span>{ helper.FormatDuration(pause) }</p> <p class="text-neutral-500 flex flex-row items-center"><span class="icon-[material-symbols-light--motion-photos-paused-outline]"></span>{ helper.FormatDuration(pause) }</p>
} }
if overtime != 0 && day.IsEmpty() == false { if !day.IsEmpty() && overtime != 0 {
<p class="text-neutral-500 flex flex-row items-center"> <p class="text-neutral-500 flex flex-row items-center">
<span class="icon-[material-symbols-light--more-time]"></span> <span class="icon-[material-symbols-light--more-time]"></span>
{ helper.FormatDuration(overtime) } { helper.FormatDuration(overtime) }

View File

@@ -346,7 +346,7 @@ func defaultDayComponent(day models.IWorkDay) templ.Component {
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if overtime != 0 && day.IsEmpty() == false { if !day.IsEmpty() && overtime != 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<p class=\"text-neutral-500 flex flex-row items-center\"><span class=\"icon-[material-symbols-light--more-time]\"></span> ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<p class=\"text-neutral-500 flex flex-row items-center\"><span class=\"icon-[material-symbols-light--more-time]\"></span> ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err

6
Cron/autoBackup.sh Executable file
View File

@@ -0,0 +1,6 @@
# cron-timing: 05 01 * * 1
container_name="arbeitszeitmessung-main-db-1"
filename=backup-$(date '+%d%m%Y').sql
database_name=__DATABASE__
docker exec $container_name pg_dump $database_name > /home/pi/arbeitszeitmessung-backup/$filename
echo "created backup file: "$filename

3
Cron/autoHolidays.sh Executable file
View File

@@ -0,0 +1,3 @@
# Calls endpoint to write all public Holidays for the current year inside a database.
port=__PORT__
curl localhost:$port/auto/feiertage

4
Cron/autoLogout.sh Executable file
View File

@@ -0,0 +1,4 @@
# cron-timing: 55 23 * * *
# Calls endpoint to log out all users, still logged in for today
port=__PORT__
curl localhost:$port/auto/logout

55
DB/initdb/01_create_user.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/bash
set -e # Exit on error
echo "Creating PostgreSQL user and setting permissions... $POSTGRES_USER for API user $POSTGRES_API_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE ROLE migrate LOGIN ENCRYPTED PASSWORD '$POSTGRES_PASSWORD';
GRANT USAGE, CREATE ON SCHEMA public TO migrate;
EOSQL
# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
# GRANT SELECT, INSERT, UPDATE ON anwesenheit, abwesenheit, user_password, wochen_report, s_feiertage TO $POSTGRES_API_USER;
# GRANT DELETE ON abwesenheit TO $POSTGRES_API_USER;
# GRANT SELECT ON s_personal_daten, s_abwesenheit_typen, s_anwesenheit_typen, s_feiertage TO $POSTGRES_API_USER;
# GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $POSTGRES_API_USER;
# EOSQL
echo "User creation and permissions setup complete!"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
-- privilege roles
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'app_base') THEN
CREATE ROLE app_base NOLOGIN;
END IF;
END
\$\$;
-- dynamic login role
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '$POSTGRES_API_USER') THEN
CREATE ROLE $POSTGRES_API_USER
LOGIN
ENCRYPTED PASSWORD '$POSTGRES_API_PASS';
END IF;
END
\$\$;
-- grant base privileges
GRANT app_base TO $POSTGRES_API_USER;
GRANT CONNECT ON DATABASE $POSTGRES_DB TO $POSTGRES_API_USER;
GRANT USAGE ON SCHEMA public TO $POSTGRES_API_USER;
CREATE EXTENSION IF NOT EXISTS pgcrypto;
EOSQL
# psql -v ON_ERROR_STOP=1 --username root --dbname arbeitszeitmessung

View File

@@ -1,8 +0,0 @@
INSERT INTO "s_personal_daten" ("personal_nummer", "aktiv_beschaeftigt", "vorname", "nachname", "geburtsdatum", "plz", "adresse", "geschlecht", "card_uid", "hauptbeschaeftigungs_ort", "arbeitszeit_per_tag", "arbeitszeit_per_woche", "arbeitszeit_min_start", "arbeitszeit_max_ende", "vorgesetzter_pers_nr") VALUES
(123, 't', 'Kim', 'Mustermensch', '2003-02-01', '08963', 'Altenburger Str. 44A', 1, 'aaaa-aaaa', 1, 8, 40, '07:00:00', '20:00:00', 0);
INSERT INTO "user_password" ("personal_nummer", "pass_hash") VALUES
(123, crypt('max_pass', gen_salt('bf')));
INSERT INTO "s_anwesenheit_typen" ("anwesenheit_id", "anwesenheit_name") VALUES (1, 'Büro');
INSERT INTO "s_abwesenheit_typen" ("abwesenheit_id", "abwesenheit_name", "arbeitszeit_equivalent") VALUES (1, 'Urlaub', 100), (2, 'Krank', 100), (3, 'Kurzarbeit', -1), (4, 'Urlaub untertags', 50);

View File

@@ -1,21 +0,0 @@
#!/bin/bash
set -e # Exit on error
echo "Creating PostgreSQL user and setting permissions... $POSTGRES_USER for API user $POSTGRES_API_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE USER $POSTGRES_API_USER WITH ENCRYPTED PASSWORD '$POSTGRES_API_PASS';
EOSQL
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
GRANT CONNECT ON DATABASE $POSTGRES_DB TO $POSTGRES_API_USER;
GRANT USAGE ON SCHEMA public TO $POSTGRES_API_USER;
GRANT SELECT, INSERT, UPDATE ON anwesenheit, abwesenheit, user_password, wochen_report, s_feiertage TO $POSTGRES_API_USER;
GRANT DELETE ON abwesenheit TO $POSTGRES_API_USER;
GRANT SELECT ON s_personal_daten, s_abwesenheit_typen, s_anwesenheit_typen, s_feiertage TO $POSTGRES_API_USER;
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $POSTGRES_API_USER;
EOSQL
echo "User creation and permissions setup complete!"
# psql -v ON_ERROR_STOP=1 --username root --dbname arbeitszeitmessung

View File

@@ -3,7 +3,7 @@ services:
db: db:
volumes: volumes:
- ${POSTGRES_PATH}:/var/lib/postgresql/data - ${POSTGRES_PATH}:/var/lib/postgresql/data
# - ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d - ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d
ports: ports:
- 5432:5432 - 5432:5432

View File

@@ -25,12 +25,11 @@ services:
- ${WEB_PORT}:8080 - ${WEB_PORT}:8080
depends_on: depends_on:
- db - db
- document-creator
volumes: volumes:
- ../logs:/app/Backend/logs - ${LOG_PATH}:/app/logs
restart: unless-stopped restart: unless-stopped
document-creator: # document-creator:
image: git.letsstein.de/tom/arbeitszeitmessung-doc-creator # image: git.letsstein.de/tom/arbeitszeitmessung-doc-creator
container_name: ${TYPST_CONTAINER} # container_name: ${TYPST_CONTAINER}
restart: unless-stopped # restart: unless-stopped

View File

@@ -2,11 +2,12 @@ POSTGRES_USER=root # Postgres ADMIN Nutzername
POSTGRES_PASSWORD=very_secure # Postgres ADMIN Passwort POSTGRES_PASSWORD=very_secure # Postgres ADMIN Passwort
POSTGRES_API_USER=api_nutzer # Postgres API Nutzername (für Arbeitszeitmessung) POSTGRES_API_USER=api_nutzer # Postgres API Nutzername (für Arbeitszeitmessung)
POSTGRES_API_PASS=password # Postgres API Passwort (für Arbeitszeitmessung) POSTGRES_API_PASS=password # Postgres API Passwort (für Arbeitszeitmessung)
POSTGRES_PATH=../DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...) POSTGRES_PATH=__ROOT__/DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...)
LOG_PATH=../logs # Pfad für Logdatein
POSTGRES_DB=arbeitszeitmessung # Postgres Datenbank Name POSTGRES_DB=arbeitszeitmessung # Postgres Datenbank Name
POSTGRES_PORT=127.0.0.1:5432 # Postgres Port will not be exposed by default. regex:^[0-9]{1,5}$ POSTGRES_PORT=127.0.0.1:5432 # Postgres Port normalerweise nicht freigegeben. regex:^[0-9]{1,5}$
MIGRATIONS_PATH=__ROOT__/migrations # Pfad zu DB migrations (wenn nicht verändert wurde, bei default bleiben)
TZ=Europe/Berlin # Zeitzone TZ=Europe/Berlin # Zeitzone
API_TOKEN=dont_access # API Token für ESP Endpoints API_TOKEN=dont_access # API Token für ESP Endpoints
WEB_PORT=8000 # Port from which Arbeitszeitmessung should be accessable regex:^[0-9]{1,5}$ WEB_PORT=8000 # Port unter welchem Webserver erreichbar ist. regex:^[0-9]{1,5}$
TYPST_CONTAINER=arbeitszeitmessung-doc-creator # Name of the pdf compiler container LOG_PATH=__ROOT__/logs # Pfad für Audit Logs
LOG_LEVEL=warn # Welche Log-Nachrichten werden in der Konsole erscheinen

View File

@@ -1,7 +0,0 @@
FROM ghcr.io/typst/typst:0.14.0
WORKDIR /app
COPY ./templates /app/templates
COPY ./static /app/static
ENTRYPOINT ["sh", "-c", "while true; do sleep 3600; done"]

View File

@@ -4,6 +4,10 @@ set -e
envFile=Docker/.env envFile=Docker/.env
envExample=Docker/env.example envExample=Docker/env.example
autoBackupScript=Cron/autoBackup.sh
autoHolidaysScript=Cron/autoHolidays.sh
autoLogoutScript=Cron/autoLogout.sh
echo "Checking Docker installation..." echo "Checking Docker installation..."
if ! command -v docker >/dev/null 2>&1; then if ! command -v docker >/dev/null 2>&1; then
echo "Docker not found. Install Docker? [y/N]" echo "Docker not found. Install Docker? [y/N]"
@@ -18,12 +22,16 @@ else
echo "Docker is already installed." echo "Docker is already installed."
fi fi
###########################################################################
echo "Checking Docker Compose..." echo "Checking Docker Compose..."
if ! docker compose version >/dev/null 2>&1; then if ! docker compose version >/dev/null 2>&1; then
echo "Docker Compose plugin missing. You may need to update Docker." echo "Docker Compose plugin missing. You may need to update Docker."
exit 1 exit 1
fi fi
###########################################################################
echo "Preparing .env file..." echo "Preparing .env file..."
if [ ! -f $envFile ]; then if [ ! -f $envFile ]; then
if [ -f $envExample ]; then if [ -f $envExample ]; then
@@ -44,6 +52,9 @@ if [ ! -f $envFile ]; then
raw_val=$(printf "%s" "$rest" | sed 's/ *#.*//') raw_val=$(printf "%s" "$rest" | sed 's/ *#.*//')
default_value=$(printf "%s" "$raw_val" | sed 's/"//g') default_value=$(printf "%s" "$raw_val" | sed 's/"//g')
# Replace __ROOT__ with script pwd
default_value="${default_value/__ROOT__/$(pwd)}"
regex="" regex=""
if [[ "$comment" =~ regex:(.*)$ ]]; then if [[ "$comment" =~ regex:(.*)$ ]]; then
regex="${BASH_REMATCH[1]}" regex="${BASH_REMATCH[1]}"
@@ -96,13 +107,80 @@ else
echo "Using existing .env. (found at $envFile)" echo "Using existing .env. (found at $envFile)"
fi fi
###########################################################################
LOG_PATH=$(grep -E '^LOG_PATH=' $envFile | cut -d= -f2)
if [ -z "$LOG_PATH" ]; then
echo "LOG_PATH not found in .env using default $(pwd)/logs"
LOG_PATH=$(pwd)/logs
else
LOG_PATH=Docker/$LOG_PATH
fi
mkdir -p $LOG_PATH
echo "Created logs folder at $LOG_PATH"
###########################################################################
echo -e "\n\n"
echo "Start containers with docker compose up -d? [y/N]" echo "Start containers with docker compose up -d? [y/N]"
read -r start_containers read -r start_containersmkdi
if [[ "$start_containers" =~ ^[Yy]$ ]]; then if [[ "$start_containers" =~ ^[Yy]$ ]]; then
cd Docker cd Docker
mkdir ../logs
docker compose up -d docker compose up -d
echo "Containers started." echo "Containers started."
else else
echo "You can start them manually with: docker compose up -d" echo "You can start them manually with: docker compose up -d"
fi fi
###########################################################################
echo -e "\n\n"
echo "Setup Crontab for automatic logout, backup and holiday creation? [y/N]"
read -r setup_cron
if [[ "$setup_cron" =~ ^[Yy]$ ]]; then
WEB_PORT=$(grep -E '^WEB_PORT=' $envFile | cut -d= -f2)
if [ -z "$WEB_PORT" ]; then
echo "WEB_PORT not found in .env using default 8000"
WEB_PORT=8000
fi
POSTGRES_DB=$(grep -E '^POSTGRES_DB=' $envFile | cut -d= -f2)
if [ -z "$POSTGRES_DB" ]; then
echo "arbeitszeitmessung not found in .env using default arbeitszeitmessung"
POSTGRES_DB="arbeitszeitmessung"
fi
sed -i "s/__PORT__/$WEB_PORT/" $autoHolidaysScript
sed -i "s/__PORT__/$WEB_PORT/" $autoLogoutScript
sed -i "s/__DATABASE__/$POSTGRES_DB/" $autoBackupScript
chmod +x $autoBackupScript $autoHolidaysScript $autoLogoutScript
# echo "Scripts build with PORT=$WEB_PORT and DATABSE=$POSTGRES_DB!"
echo "Adding rules to crontab."
cron_commands=$(mktemp /tmp/arbeitszeitmessung-cron.XXX)
for file in Cron/*; do
cron_timing=$(grep -E '^# cron-timing:' "$file" | sed 's/^# cron-timing:[[:space:]]*//')
if [ -z "$cron_timing" ]; then
echo "No cron-timing found in $file, so it's not added to crontab."
continue
fi
( crontab -l ; echo "$cron_timing $(pwd)/$file" )| awk '!x[$0]++' | crontab -
echo "Added entry to crontab: $cron_timing $(pwd)/$file."
done
if systemctl is-active --quiet cron.service ; then
echo "cron.service is running. Everything should be fine now."
else
echo "cron.service is not running. Please start and enable cron.service."
echo "For how to start a service, see: https://wiki.ubuntuusers.de/systemd/systemctl UNITNAME will be cron.service"
fi
else
echo "Please setup cron manually by executing crontab -e and adding all files from inside the Cron directory!"
fi

View File

@@ -1,3 +1,11 @@
ALTER DEFAULT PRIVILEGES FOR ROLE migrate
IN SCHEMA public
GRANT SELECT ON TABLES TO app_base;
ALTER DEFAULT PRIVILEGES FOR ROLE migrate
IN SCHEMA public
GRANT USAGE, SELECT ON SEQUENCES TO app_base;
-- create "abwesenheit" table -- create "abwesenheit" table
CREATE TABLE "abwesenheit" ( CREATE TABLE "abwesenheit" (
"counter_id" bigserial NOT NULL, "counter_id" bigserial NOT NULL,
@@ -6,6 +14,7 @@ CREATE TABLE "abwesenheit" (
"datum" timestamptz NULL DEFAULT (now())::date, "datum" timestamptz NULL DEFAULT (now())::date,
PRIMARY KEY ("counter_id") PRIMARY KEY ("counter_id")
); );
-- create "anwesenheit" table -- create "anwesenheit" table
CREATE TABLE "anwesenheit" ( CREATE TABLE "anwesenheit" (
"counter_id" bigserial NOT NULL, "counter_id" bigserial NOT NULL,
@@ -55,3 +64,6 @@ CREATE TABLE "wochen_report" (
PRIMARY KEY ("id"), PRIMARY KEY ("id"),
CONSTRAINT "wochen_report_personal_nummer_woche_start_key" UNIQUE ("personal_nummer", "woche_start") CONSTRAINT "wochen_report_personal_nummer_woche_start_key" UNIQUE ("personal_nummer", "woche_start")
); );
GRANT INSERT, UPDATE ON abwesenheit, anwesenheit, wochen_report, user_password TO app_base;
GRANT DELETE ON abwesenheit to app_base;

View File

@@ -3,8 +3,3 @@
DROP FUNCTION update_zuletzt_geandert; DROP FUNCTION update_zuletzt_geandert;
DROP TRIGGER IF EXISTS pass_hash_update ON user_password; DROP TRIGGER IF EXISTS pass_hash_update ON user_password;
-- revert: Adds crypto extension
DROP EXTENSION IF EXISTS pgcrypto;

View File

@@ -17,5 +17,3 @@ FOR EACH ROW
EXECUTE FUNCTION update_zuletzt_geandert(); EXECUTE FUNCTION update_zuletzt_geandert();
-- Adds crypto extension -- Adds crypto extension
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@@ -11,3 +11,5 @@ CREATE TABLE "s_feiertage" (
); );
-- create index "feiertage_unique_pro_jahr" to table: "s_feiertage" -- create index "feiertage_unique_pro_jahr" to table: "s_feiertage"
CREATE UNIQUE INDEX "feiertage_unique_pro_jahr" ON "s_feiertage" ((EXTRACT(year FROM datum)), "name"); CREATE UNIQUE INDEX "feiertage_unique_pro_jahr" ON "s_feiertage" ((EXTRACT(year FROM datum)), "name");
GRANT INSERT, UPDATE ON s_feiertage TO app_base;