18 Commits

Author SHA1 Message Date
fdda0ea669 moved migrations
All checks were successful
Tests / Run Go Tests (push) Successful in 2m7s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 2m45s
2026-01-18 22:50:51 +01:00
c10ab98997 fixed problem, where migrate could not connect to db
Some checks failed
Arbeitszeitmessung Deploy / Build Webserver (push) Failing after 1m14s
Tests / Run Go Tests (push) Successful in 1m48s
2026-01-18 22:42:07 +01:00
8dc8c4eed3 working to fix orangepi db connect
Some checks failed
Tests / Run Go Tests (push) Failing after 30s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 2m11s
2026-01-18 21:34:11 +01:00
3f49da49b6 ad hoc fix
All checks were successful
Tests / Run Go Tests (push) Successful in 2m24s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 2m52s
2026-01-18 20:43:38 +01:00
18b2cbc074 Merge pull request 'dev/fix-70' (#71) from dev/fix-70 into main
Some checks failed
Tests / Run Go Tests (push) Failing after 48s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m35s
Reviewed-on: #71
2026-01-18 18:11:00 +01:00
560c539b19 fixed minor bugs + added loggin middleware
All checks were successful
Tests / Run Go Tests (push) Successful in 1m26s
2026-01-18 00:07:54 +01:00
502955d32f added migrations back + removed distracting log message 2026-01-17 22:53:35 +01:00
cfd77ae28d fixed #70 + made db script ignore double bookings
Some checks failed
Tests / Run Go Tests (push) Failing after 2m0s
2026-01-17 22:25:25 +01:00
1daf4db167 fixed premission problem after making migrations executed by go
All checks were successful
Tests / Run Go Tests (push) Successful in 1m44s
2026-01-17 21:41:46 +01:00
3322f7e9bc updated install script with cron jobs
All checks were successful
Tests / Run Go Tests (push) Successful in 1m35s
2026-01-07 19:55:00 +01:00
4b9824c714 fixed sonarqube errors
All checks were successful
Tests / Run Go Tests (push) Successful in 2m28s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m15s
2026-01-05 12:19:40 +01:00
7ac6c5f9b8 Merge pull request 'dev/pdf' (#69) from dev/pdf into main
Some checks failed
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 4m43s
Tests / Run Go Tests (push) Failing after 3m44s
Reviewed-on: #69
2026-01-05 12:14:46 +01:00
f9fc3d91d1 improved logging + fixed error from log folder
Some checks failed
Tests / Run Go Tests (push) Failing after 3m7s
2026-01-05 12:13:49 +01:00
f0de9961dc removed doc-creator 2026-01-05 12:13:24 +01:00
4ded8632e5 fixed overtime calc issue
Some checks failed
Arbeitszeitmessung Deploy / Build Document Creator (push) Successful in 2m30s
Tests / Run Go Tests (push) Failing after 2m35s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m19s
2026-01-05 00:39:00 +01:00
b2af48463c updated git action to build right image
Some checks failed
Arbeitszeitmessung Deploy / Build Document Creator (push) Successful in 2m11s
Tests / Run Go Tests (push) Failing after 2m46s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 3m5s
2026-01-05 00:02:33 +01:00
0b72147e02 Merge pull request 'version 2.0.0 rc' (#68) from dev/feiertage into main
Some checks failed
Tests / Run Go Tests (push) Failing after 2m29s
Arbeitszeitmessung Deploy / Build Document Creator (push) Successful in 4m1s
Arbeitszeitmessung Deploy / Build Webserver (push) Successful in 4m4s
Reviewed-on: #68
2026-01-04 23:56:04 +01:00
d1b46cf894 fixed sonarqube errors
Some checks failed
Tests / Run Go Tests (push) Failing after 1m33s
2026-01-04 23:53:16 +01:00
51 changed files with 423 additions and 212 deletions

View File

@@ -6,6 +6,7 @@ on:
- "*"
branches:
- main
- dev/main
jobs:
webserver:
@@ -39,34 +40,34 @@ jobs:
push: true
context: Backend
tags: ${{ steps.meta.outputs.tags }}
document-creator:
name: Build Document Creator
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: git.letsstein.de
username: ${{ gitea.actor }}
password: ${{ secrets.REGISTRY_TOKEN }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: git.letsstein.de/tom/arbeitszeitmessung-doc-creator
tags: |
type=raw,value=latest
type=pep440,pattern={{version}}
- name: Build and push
uses: docker/build-push-action@v6
with:
platforms: linux/amd64,linux/arm64
push: true
context: Backend
tags: ${{ steps.meta.outputs.tags }}
# document-creator:
# name: Build Document Creator
# runs-on: ubuntu-latest
# steps:
# - name: Checkout
# uses: actions/checkout@v4
# - name: Login to GitHub Container Registry
# uses: docker/login-action@v3
# with:
# registry: git.letsstein.de
# username: ${{ gitea.actor }}
# password: ${{ secrets.REGISTRY_TOKEN }}
# - name: Set up QEMU
# uses: docker/setup-qemu-action@v3
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v3
# - name: Extract metadata (tags, labels) for Docker
# id: meta
# uses: docker/metadata-action@v5
# with:
# images: git.letsstein.de/tom/arbeitszeitmessung-doc-creator
# tags: |
# type=raw,value=latest
# type=pep440,pattern={{version}}
# - name: Build and push
# uses: docker/build-push-action@v6
# with:
# platforms: linux/amd64,linux/arm64
# push: true
# context: DocumentCreator
# tags: ${{ steps.meta.outputs.tags }}

View File

@@ -14,9 +14,13 @@ COPY . .
RUN go build -o server .
FROM alpine:3.22
RUN apk add --no-cache tzdata
RUN apk add --no-cache tzdata typst
WORKDIR /app
COPY --from=build /app/server /app/server
COPY migrations /app/migrations
COPY doc /doc
COPY /static /app/static
ENTRYPOINT ["./server"]

View File

@@ -5,7 +5,12 @@ import (
"arbeitszeitmessung/models"
"database/sql"
"fmt"
"log/slog"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/lib/pq"
)
@@ -16,6 +21,45 @@ func OpenDatabase() (models.IDatabase, error) {
dbPassword := helper.GetEnv("POSTGRES_API_PASS", "password")
dbTz := helper.GetEnv("TZ", "Europe/Berlin")
connStr := fmt.Sprintf("postgres://%s:%s@%s:5432/%s?sslmode=disable&TimeZone=%s", dbUser, dbPassword, dbHost, dbName, dbTz)
connStr := fmt.Sprintf(
"host=%s user=%s dbname=%s password=%s sslmode=disable TimeZone=%s",
dbHost, dbUser, dbName, dbPassword, dbTz)
return sql.Open("postgres", connStr)
}
func Migrate() error {
dbHost := helper.GetEnv("POSTGRES_HOST", "localhost")
dbName := helper.GetEnv("POSTGRES_DB", "arbeitszeitmessung")
dbPassword := helper.GetEnv("POSTGRES_PASSWORD", "password")
dbTz := helper.GetEnv("TZ", "Europe/Berlin")
connStr := fmt.Sprintf(
"host=%s user=%s dbname=%s password=%s sslmode=disable TimeZone=%s",
dbHost, "migrate", dbName, dbPassword, dbTz)
db, err := sql.Open("postgres", connStr)
if err != nil {
return err
}
driver, err := postgres.WithInstance(db, &postgres.Config{})
if err != nil {
return err
}
m, err := migrate.NewWithDatabaseInstance("file:///app/migrations", "postgres", driver)
if err != nil {
return err
}
slog.Info("Connected to database. Running migrations now.")
// Migrate all the way up ...
if err := m.Up(); err != nil && err != migrate.ErrNoChange {
return err
}
slog.Info("Finished migrations starting webserver.")
return nil
}

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -21,7 +21,7 @@ func LogoutHandler(w http.ResponseWriter, r *http.Request) {
func autoLogout(w http.ResponseWriter) {
users, err := models.GetAllUsers()
var logged_out_users []models.User
var loggedOutUsers []models.User
if err != nil {
fmt.Printf("Error getting user list %v\n", err)
}
@@ -31,7 +31,7 @@ func autoLogout(w http.ResponseWriter) {
if err != nil {
fmt.Printf("Error logging out user %v\n", err)
} else {
logged_out_users = append(logged_out_users, user)
loggedOutUsers = append(loggedOutUsers, user)
log.Printf("Automaticaly logged out user %s, %s ", user.Name, user.Vorname)
}
}
@@ -39,6 +39,6 @@ func autoLogout(w http.ResponseWriter) {
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(logged_out_users)
json.NewEncoder(w).Encode(loggedOutUsers)
}

View File

@@ -198,8 +198,9 @@ func renderPDFSingle(data []typstData) (bytes.Buffer, error) {
var markup bytes.Buffer
var output bytes.Buffer
typstCLI := typst.DockerExec{
ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"),
typstCLI := typst.CLI{
WorkingDirectory: "/doc/",
// ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"),
}
if err := typst.InjectValues(&markup, map[string]any{"data": data}); err != nil {
@@ -225,8 +226,9 @@ func renderPDFSingle(data []typstData) (bytes.Buffer, error) {
func renderPDFMulti(data []typstData) ([]bytes.Buffer, error) {
var outputMulti []bytes.Buffer
typstRender := typst.DockerExec{
ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"),
typstRender := typst.CLI{
WorkingDirectory: "/doc/",
// ContainerName: helper.GetEnv("TYPST_CONTAINER", "arbeitszeitmessung-doc-creator"),
}
for _, d := range data {

View File

@@ -7,6 +7,7 @@ import (
"errors"
"log"
"net/http"
"time"
)
// Relevant for arduino inputs -> creates new Booking from get and put method
@@ -36,6 +37,7 @@ func createBooking(w http.ResponseWriter, r *http.Request) {
}
booking := (*models.Booking).FromUrlParams(nil, r.URL.Query())
booking.Timestamp = time.Now()
if booking.Verify() {
err := booking.Insert()
if errors.Is(models.SameBookingError{}, err) {

View File

@@ -94,7 +94,7 @@ func getBookings(w http.ResponseWriter, r *http.Request) {
if day.Date().Before(lastSub) {
continue
}
aggregatedOvertime += day.GetOvertime(user, models.WorktimeBaseDay, false)
aggregatedOvertime += day.GetOvertime(user, models.WorktimeBaseDay, true)
}
if reportedOvertime, err := user.GetReportedOvertime(); err == nil {
user.Overtime = (reportedOvertime + aggregatedOvertime).Round(time.Minute)
@@ -160,19 +160,13 @@ func updateBooking(w http.ResponseWriter, r *http.Request) {
newBooking := (*models.Booking).New(nil, user.CardUID, 0, int16(check_in_out), 1)
newBooking.Timestamp = timestamp
if newBooking.Verify() {
err = newBooking.InsertWithTimestamp()
if err != nil {
log.Printf("Error inserting booking %v -> %v\n", newBooking, err)
}
}
case "change":
// absenceType, err := strconv.Atoi(r.FormValue("absence"))
// if err != nil {
// log.Println("Error parsing absence type.", err)
// absenceType = 0
// }
// if absenceType != 0 {
// createAbsence(absenceType, user, loc, r)
// }
for index, possibleBooking := range r.PostForm {
if len(index) > 7 && index[:7] == "booking" {
booking_id, err := strconv.Atoi(index[8:])

View File

@@ -4,8 +4,6 @@ github.com/Dadido3/go-typst v0.8.0 h1:uTLYprhkrBjwsCXRRuyYUFL0fpYHa2kIYoOB/CGqVN
github.com/Dadido3/go-typst v0.8.0/go.mod h1:QYis9sT70u65kn1SkFfyPRmHsPxgoxWbAixwfPReOZA=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/a-h/templ v0.3.943 h1:o+mT/4yqhZ33F3ootBiHwaY4HM5EVaOJfIshvd5UNTY=
github.com/a-h/templ v0.3.943/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/a-h/templ v0.3.960 h1:trshEpGa8clF5cdI39iY4ZrZG8Z/QixyzEyUnA7feTM=
github.com/a-h/templ v0.3.960/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/alexedwards/scs/v2 v2.8.0 h1:h31yUYoycPuL0zt14c0gd+oqxfRwIj6SOjHdKRZxhEw=
@@ -39,6 +37,8 @@ github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4
github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -78,7 +78,6 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/wlbr/feiertage v1.10.0/go.mod h1:wJOHvMa6sI5L1FkrTOX/GSoO0hpK3S2YqGLPi8Q84I0=
github.com/wlbr/feiertage v1.17.0 h1:AEck/iUQu19iU0xNEoSQTeSTGXF1Ju0tbAwEi/Lmwqk=
github.com/wlbr/feiertage v1.17.0/go.mod h1:TVZgmSZgGW/jSxexZ56qdlR6cDj+F/FO8bkw8U6kYxM=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=

View File

@@ -5,6 +5,7 @@ import (
"arbeitszeitmessung/helper"
"arbeitszeitmessung/models"
"context"
"database/sql"
"log/slog"
"net/http"
"os"
@@ -17,7 +18,16 @@ import (
func main() {
var err error
var logLevel slog.LevelVar
switch helper.GetEnv("LOG_LEVEL", "warn") {
case "debug":
logLevel.Set(slog.LevelDebug)
case "info":
logLevel.Set(slog.LevelInfo)
case "warn":
logLevel.Set(slog.LevelWarn)
case "error":
logLevel.Set(slog.LevelError)
}
logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: &logLevel}))
slog.SetDefault(logger)
@@ -35,6 +45,15 @@ func main() {
models.DB, err = OpenDatabase()
if err != nil {
slog.Error("Error while opening the database", "Error", err)
return
}
defer models.DB.(*sql.DB).Close()
err = Migrate()
if err != nil {
slog.Error("Failed to migrate the database to newest version", "Error", err)
return
}
fs := http.FileServer(http.Dir("./static"))
@@ -61,6 +80,8 @@ func main() {
serverSessionMiddleware := endpoints.Session.LoadAndSave(server)
serverSessionMiddleware = loggingMiddleware(serverSessionMiddleware)
// starting the http server
slog.Info("Server is running at http://localhost:8080")
slog.Error("Error starting Server", "Error", http.ListenAndServe(":8080", serverSessionMiddleware))
@@ -70,7 +91,24 @@ func ParamsMiddleware(next http.HandlerFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
queryParams := r.URL.Query()
ctx := context.WithValue(r.Context(), "urlParams", queryParams)
if len(queryParams) > 0 {
slog.Debug("ParamsMiddleware added urlParams", slog.Any("urlParams", queryParams))
}
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
// Log the method and the requested URL
slog.Info("Started request", slog.String("Method", r.Method), slog.String("Path", r.URL.Path))
// Call the next handler in the chain
next.ServeHTTP(w, r)
// Log how long it took
slog.Info("Completet Request", slog.String("Time", time.Since(start).String()))
})
}

View File

@@ -1,3 +1,11 @@
ALTER DEFAULT PRIVILEGES FOR ROLE migrate
IN SCHEMA public
GRANT SELECT ON TABLES TO app_base;
ALTER DEFAULT PRIVILEGES FOR ROLE migrate
IN SCHEMA public
GRANT USAGE, SELECT ON SEQUENCES TO app_base;
-- create "abwesenheit" table
CREATE TABLE "abwesenheit" (
"counter_id" bigserial NOT NULL,
@@ -6,6 +14,7 @@ CREATE TABLE "abwesenheit" (
"datum" timestamptz NULL DEFAULT (now())::date,
PRIMARY KEY ("counter_id")
);
-- create "anwesenheit" table
CREATE TABLE "anwesenheit" (
"counter_id" bigserial NOT NULL,
@@ -55,3 +64,6 @@ CREATE TABLE "wochen_report" (
PRIMARY KEY ("id"),
CONSTRAINT "wochen_report_personal_nummer_woche_start_key" UNIQUE ("personal_nummer", "woche_start")
);
GRANT INSERT, UPDATE ON abwesenheit, anwesenheit, wochen_report, user_password TO app_base;
GRANT DELETE ON abwesenheit to app_base;

View File

@@ -3,8 +3,3 @@
DROP FUNCTION update_zuletzt_geandert;
DROP TRIGGER IF EXISTS pass_hash_update ON user_password;
-- revert: Adds crypto extension
DROP EXTENSION IF EXISTS pgcrypto;

View File

@@ -17,5 +17,3 @@ FOR EACH ROW
EXECUTE FUNCTION update_zuletzt_geandert();
-- Adds crypto extension
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@@ -11,3 +11,5 @@ CREATE TABLE "s_feiertage" (
);
-- create index "feiertage_unique_pro_jahr" to table: "s_feiertage"
CREATE UNIQUE INDEX "feiertage_unique_pro_jahr" ON "s_feiertage" ((EXTRACT(year FROM datum)), "name");
GRANT INSERT, UPDATE ON s_feiertage TO app_base;

View File

@@ -128,6 +128,9 @@ func (b *Booking) InsertWithTimestamp() error {
if b.Timestamp.IsZero() {
return b.Insert()
}
if !checkLastBooking(*b) {
return SameBookingError{}
}
stmt, err := DB.Prepare((`INSERT INTO anwesenheit (card_uid, geraet_id, check_in_out, anwesenheit_typ, timestamp) VALUES ($1, $2, $3, $4, $5) RETURNING counter_id`))
if err != nil {
return err
@@ -242,12 +245,13 @@ func (b *Booking) Update(nb Booking) {
func checkLastBooking(b Booking) bool {
var check_in_out int
stmt, err := DB.Prepare((`SELECT check_in_out FROM "anwesenheit" WHERE "card_uid" = $1 ORDER BY "timestamp" DESC LIMIT 1;`))
slog.Info("Checking with timestamp:", "timestamp", b.Timestamp.String())
stmt, err := DB.Prepare((`SELECT check_in_out FROM "anwesenheit" WHERE "card_uid" = $1 AND "timestamp"::DATE <= $2::DATE ORDER BY "timestamp" DESC LIMIT 1;`))
if err != nil {
log.Fatalf("Error preparing query: %v", err)
return false
}
err = stmt.QueryRow(b.CardUID).Scan(&check_in_out)
err = stmt.QueryRow(b.CardUID, b.Timestamp).Scan(&check_in_out)
if err == sql.ErrNoRows {
return true
}

View File

@@ -28,7 +28,7 @@ func (c *CompoundDay) GetWorkDay() WorkDay {
// IsEmpty implements [IWorkDay].
func (c *CompoundDay) IsEmpty() bool {
return len(c.DayParts) > 0
return len(c.DayParts) == 0
}
// Date implements [IWorkDay].
@@ -47,12 +47,16 @@ func (c *CompoundDay) GetDayProgress(u User) int8 {
// GetOvertime implements [IWorkDay].
func (c *CompoundDay) GetOvertime(u User, base WorktimeBase, includeKurzarbeit bool) time.Duration {
work := c.GetWorktime(u, base, includeKurzarbeit)
var targetHours time.Duration
var overtime time.Duration
for _, day := range c.DayParts {
overtime += day.GetOvertime(u, base, includeKurzarbeit)
switch base {
case WorktimeBaseDay:
targetHours = u.ArbeitszeitProTagFrac(1)
case WorktimeBaseWeek:
targetHours = u.ArbeitszeitProWocheFrac(.2)
}
return overtime
return (work - targetHours).Round(time.Minute)
}
// GetPausetime implements [IWorkDay].

View File

@@ -49,19 +49,16 @@ func GetDays(user User, tsFrom, tsTo time.Time, orderedForward bool) []IWorkDay
}
for _, absentDay := range absences {
// Kurzarbeit should be integrated in workday
// Check if there is already a day
existingDay, ok := allDays[absentDay.Date().Format(time.DateOnly)]
if !ok {
allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay
continue
}
switch {
case absentDay.AbwesenheitTyp.WorkTime < 0:
if workDay, ok := allDays[absentDay.Date().Format(time.DateOnly)].(*WorkDay); ok {
workDay.kurzArbeit = true
workDay.kurzArbeitAbsence = absentDay
}
case !existingDay.IsEmpty():
case ok && !existingDay.IsEmpty():
allDays[absentDay.Date().Format(time.DateOnly)] = NewCompondDay(absentDay.Date(), existingDay, &absentDay)
default:
allDays[absentDay.Date().Format(time.DateOnly)] = &absentDay

View File

@@ -175,21 +175,38 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
qStr, err := DB.Prepare(`
WITH all_days AS (
SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date),
ordered_bookings AS (
normalized_bookings AS (
SELECT *
FROM (
SELECT
a.timestamp::DATE AS work_date,
a.card_uid,
a.timestamp,
a.timestamp::DATE AS work_date,
a.check_in_out,
a.counter_id,
a.anwesenheit_typ,
sat.anwesenheit_name AS anwesenheit_typ_name,
LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp,
LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check
LAG(a.check_in_out) OVER (
PARTITION BY a.card_uid, a.timestamp::DATE
ORDER BY a.timestamp
) AS prev_check
FROM anwesenheit a
LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id
LEFT JOIN s_anwesenheit_typen sat
ON a.anwesenheit_typ = sat.anwesenheit_id
WHERE a.card_uid = $1
AND a.timestamp::DATE >= $2
AND a.timestamp::DATE <= $3
) t
WHERE prev_check IS NULL OR prev_check <> check_in_out
),
ordered_bookings AS (
SELECT
*,
LAG(timestamp) OVER (
PARTITION BY card_uid, work_date
ORDER BY timestamp
) AS prev_timestamp
FROM normalized_bookings
)
SELECT
d.work_date,
@@ -228,6 +245,62 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
GROUP BY d.work_date
ORDER BY d.work_date ASC;`)
// qStr, err := DB.Prepare(`
// WITH all_days AS (
// SELECT generate_series($2::DATE, $3::DATE - INTERVAL '1 day', INTERVAL '1 day')::DATE AS work_date),
// ordered_bookings AS (
// SELECT
// a.timestamp::DATE AS work_date,
// a.timestamp,
// a.check_in_out,
// a.counter_id,
// a.anwesenheit_typ,
// sat.anwesenheit_name AS anwesenheit_typ_name,
// LAG(a.timestamp) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_timestamp,
// LAG(a.check_in_out) OVER (PARTITION BY a.card_uid, a.timestamp::DATE ORDER BY a.timestamp) AS prev_check
// FROM anwesenheit a
// LEFT JOIN s_anwesenheit_typen sat ON a.anwesenheit_typ = sat.anwesenheit_id
// WHERE a.card_uid = $1
// AND a.timestamp::DATE >= $2
// AND a.timestamp::DATE <= $3
// )
// SELECT
// d.work_date,
// COALESCE(MIN(b.timestamp), NOW()) AS time_from,
// COALESCE(MAX(b.timestamp), NOW()) AS time_to,
// COALESCE(
// EXTRACT(EPOCH FROM SUM(
// CASE
// WHEN b.prev_check IN (1, 3) AND b.check_in_out IN (2, 4, 254)
// THEN b.timestamp - b.prev_timestamp
// ELSE INTERVAL '0'
// END
// )), 0
// ) AS total_work_seconds,
// COALESCE(
// EXTRACT(EPOCH FROM SUM(
// CASE
// WHEN b.prev_check IN (2, 4, 254) AND b.check_in_out IN (1, 3)
// THEN b.timestamp - b.prev_timestamp
// ELSE INTERVAL '0'
// END
// )), 0
// ) AS total_pause_seconds,
// COALESCE(jsonb_agg(jsonb_build_object(
// 'check_in_out', b.check_in_out,
// 'timestamp', b.timestamp,
// 'counter_id', b.counter_id,
// 'anwesenheit_typ', b.anwesenheit_typ,
// 'anwesenheit_typ', jsonb_build_object(
// 'anwesenheit_id', b.anwesenheit_typ,
// 'anwesenheit_name', b.anwesenheit_typ_name
// )
// ) ORDER BY b.timestamp), '[]'::jsonb) AS bookings
// FROM all_days d
// LEFT JOIN ordered_bookings b ON d.work_date = b.work_date
// GROUP BY d.work_date
// ORDER BY d.work_date ASC;`)
if err != nil {
log.Println("Error preparing SQL statement", err)
return workDays
@@ -258,7 +331,7 @@ func GetWorkDays(user User, tsFrom, tsTo time.Time) []WorkDay {
if len(workDay.Bookings) == 1 && workDay.Bookings[0].CounterId == 0 {
workDay.Bookings = []Booking{}
}
if len(workDay.Bookings) > 1 || !helper.IsWeekend(workDay.Date()) {
if len(workDay.Bookings) >= 1 || !helper.IsWeekend(workDay.Date()) {
workDays = append(workDays, workDay)
}
}

View File

@@ -205,45 +205,24 @@
.top-0 {
top: calc(var(--spacing) * 0);
}
.top-1 {
top: calc(var(--spacing) * 1);
}
.top-1\/2 {
top: calc(1/2 * 100%);
}
.top-2 {
top: calc(var(--spacing) * 2);
}
.top-2\.5 {
top: calc(var(--spacing) * 2.5);
}
.top-25 {
top: calc(var(--spacing) * 25);
}
.top-26 {
top: calc(var(--spacing) * 26);
}
.top-\[0\.125rem\] {
top: 0.125rem;
}
.right-1 {
right: calc(var(--spacing) * 1);
}
.right-2 {
right: calc(var(--spacing) * 2);
}
.right-2\.5 {
right: calc(var(--spacing) * 2.5);
}
.left-1 {
left: calc(var(--spacing) * 1);
}
.left-1\/2 {
left: calc(1/2 * 100%);
}
.z-10 {
z-index: 10;
}
.z-100 {
z-index: 100;
}
@@ -404,9 +383,6 @@
.h-2 {
height: calc(var(--spacing) * 2);
}
.h-3 {
height: calc(var(--spacing) * 3);
}
.h-3\.5 {
height: calc(var(--spacing) * 3.5);
}
@@ -431,9 +407,6 @@
.w-2 {
width: calc(var(--spacing) * 2);
}
.w-3 {
width: calc(var(--spacing) * 3);
}
.w-3\.5 {
width: calc(var(--spacing) * 3.5);
}
@@ -443,9 +416,6 @@
.w-5 {
width: calc(var(--spacing) * 5);
}
.w-9 {
width: calc(var(--spacing) * 9);
}
.w-9\/10 {
width: calc(9/10 * 100%);
}
@@ -458,9 +428,6 @@
.w-full {
width: 100%;
}
.flex-shrink {
flex-shrink: 1;
}
.flex-shrink-0 {
flex-shrink: 0;
}
@@ -476,21 +443,10 @@
.basis-\[content\] {
flex-basis: content;
}
.border-collapse {
border-collapse: collapse;
}
.-translate-x-1 {
--tw-translate-x: calc(var(--spacing) * -1);
translate: var(--tw-translate-x) var(--tw-translate-y);
}
.-translate-x-1\/2 {
--tw-translate-x: calc(calc(1/2 * 100%) * -1);
translate: var(--tw-translate-x) var(--tw-translate-y);
}
.-translate-y-1 {
--tw-translate-y: calc(var(--spacing) * -1);
translate: var(--tw-translate-x) var(--tw-translate-y);
}
.-translate-y-1\/2 {
--tw-translate-y: calc(calc(1/2 * 100%) * -1);
translate: var(--tw-translate-x) var(--tw-translate-y);
@@ -501,9 +457,6 @@
.cursor-pointer {
cursor: pointer;
}
.resize {
resize: both;
}
.scroll-m-2 {
scroll-margin: calc(var(--spacing) * 2);
}
@@ -661,9 +614,6 @@
.bg-red-600 {
background-color: var(--color-red-600);
}
.mask-repeat {
mask-repeat: repeat;
}
.p-1 {
padding: calc(var(--spacing) * 1);
}
@@ -740,16 +690,9 @@
.uppercase {
text-transform: uppercase;
}
.underline {
text-decoration-line: underline;
}
.opacity-0 {
opacity: 0%;
}
.outline {
outline-style: var(--tw-outline-style);
outline-width: 1px;
}
.filter {
filter: var(--tw-blur,) var(--tw-brightness,) var(--tw-contrast,) var(--tw-grayscale,) var(--tw-hue-rotate,) var(--tw-invert,) var(--tw-saturate,) var(--tw-sepia,) var(--tw-drop-shadow,);
}
@@ -1195,11 +1138,6 @@
syntax: "*";
inherits: false;
}
@property --tw-outline-style {
syntax: "*";
inherits: false;
initial-value: solid;
}
@property --tw-blur {
syntax: "*";
inherits: false;
@@ -1272,7 +1210,6 @@
--tw-border-style: solid;
--tw-divide-y-reverse: 0;
--tw-font-weight: initial;
--tw-outline-style: solid;
--tw-blur: initial;
--tw-brightness: initial;
--tw-contrast: initial;

View File

@@ -94,8 +94,8 @@ function checkAll(pattern, state) {
}
}
bookingForms = document.querySelectorAll("form.bookings");
for (form of bookingForms) {
const bookingForms = document.querySelectorAll("form.bookings");
for (let form of bookingForms) {
let selectKommenInput = form.querySelector("input[name='select_kommen']");
let kommenGehenSelector = form.querySelector("select");
if (selectKommenInput) {

View File

@@ -115,7 +115,7 @@ templ defaultDayComponent(day models.IWorkDay) {
if pause > 0 {
<p class="text-neutral-500 flex flex-row items-center"><span class="icon-[material-symbols-light--motion-photos-paused-outline]"></span>{ helper.FormatDuration(pause) }</p>
}
if overtime != 0 && day.IsEmpty() == false {
if !day.IsEmpty() && overtime != 0 {
<p class="text-neutral-500 flex flex-row items-center">
<span class="icon-[material-symbols-light--more-time]"></span>
{ helper.FormatDuration(overtime) }

View File

@@ -346,7 +346,7 @@ func defaultDayComponent(day models.IWorkDay) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if overtime != 0 && day.IsEmpty() == false {
if !day.IsEmpty() && overtime != 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<p class=\"text-neutral-500 flex flex-row items-center\"><span class=\"icon-[material-symbols-light--more-time]\"></span> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err

6
Cron/autoBackup.sh Executable file
View File

@@ -0,0 +1,6 @@
# cron-timing: 05 01 * * 1
container_name="arbeitszeitmessung-main-db-1"
filename=backup-$(date '+%d%m%Y').sql
database_name=__DATABASE__
docker exec $container_name pg_dump $database_name > /home/pi/arbeitszeitmessung-backup/$filename
echo "created backup file: "$filename

3
Cron/autoHolidays.sh Executable file
View File

@@ -0,0 +1,3 @@
# Calls endpoint to write all public Holidays for the current year inside a database.
port=__PORT__
curl localhost:$port/auto/feiertage

4
Cron/autoLogout.sh Executable file
View File

@@ -0,0 +1,4 @@
# cron-timing: 55 23 * * *
# Calls endpoint to log out all users, still logged in for today
port=__PORT__
curl localhost:$port/auto/logout

56
DB/initdb/01_create_user.sh Executable file
View File

@@ -0,0 +1,56 @@
#!/bin/bash
set -e # Exit on error
echo "Creating PostgreSQL user and setting permissions... $POSTGRES_USER for API user $POSTGRES_API_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE ROLE migrate LOGIN ENCRYPTED PASSWORD '$POSTGRES_PASSWORD';
GRANT USAGE, CREATE ON SCHEMA public TO migrate;
GRANT CONNECT ON DATABASE arbeitszeitmessung TO migrate;
EOSQL
# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
# GRANT SELECT, INSERT, UPDATE ON anwesenheit, abwesenheit, user_password, wochen_report, s_feiertage TO $POSTGRES_API_USER;
# GRANT DELETE ON abwesenheit TO $POSTGRES_API_USER;
# GRANT SELECT ON s_personal_daten, s_abwesenheit_typen, s_anwesenheit_typen, s_feiertage TO $POSTGRES_API_USER;
# GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $POSTGRES_API_USER;
# EOSQL
echo "User creation and permissions setup complete!"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
-- privilege roles
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'app_base') THEN
CREATE ROLE app_base NOLOGIN;
END IF;
END
\$\$;
-- dynamic login role
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '$POSTGRES_API_USER') THEN
CREATE ROLE $POSTGRES_API_USER
LOGIN
ENCRYPTED PASSWORD '$POSTGRES_API_PASS';
END IF;
END
\$\$;
-- grant base privileges
GRANT app_base TO $POSTGRES_API_USER;
GRANT CONNECT ON DATABASE $POSTGRES_DB TO $POSTGRES_API_USER;
GRANT USAGE ON SCHEMA public TO $POSTGRES_API_USER;
CREATE EXTENSION IF NOT EXISTS pgcrypto;
EOSQL
# psql -v ON_ERROR_STOP=1 --username root --dbname arbeitszeitmessung

View File

@@ -1,8 +0,0 @@
INSERT INTO "s_personal_daten" ("personal_nummer", "aktiv_beschaeftigt", "vorname", "nachname", "geburtsdatum", "plz", "adresse", "geschlecht", "card_uid", "hauptbeschaeftigungs_ort", "arbeitszeit_per_tag", "arbeitszeit_per_woche", "arbeitszeit_min_start", "arbeitszeit_max_ende", "vorgesetzter_pers_nr") VALUES
(123, 't', 'Kim', 'Mustermensch', '2003-02-01', '08963', 'Altenburger Str. 44A', 1, 'aaaa-aaaa', 1, 8, 40, '07:00:00', '20:00:00', 0);
INSERT INTO "user_password" ("personal_nummer", "pass_hash") VALUES
(123, crypt('max_pass', gen_salt('bf')));
INSERT INTO "s_anwesenheit_typen" ("anwesenheit_id", "anwesenheit_name") VALUES (1, 'Büro');
INSERT INTO "s_abwesenheit_typen" ("abwesenheit_id", "abwesenheit_name", "arbeitszeit_equivalent") VALUES (1, 'Urlaub', 100), (2, 'Krank', 100), (3, 'Kurzarbeit', -1), (4, 'Urlaub untertags', 50);

View File

@@ -1,21 +0,0 @@
#!/bin/bash
set -e # Exit on error
echo "Creating PostgreSQL user and setting permissions... $POSTGRES_USER for API user $POSTGRES_API_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE USER $POSTGRES_API_USER WITH ENCRYPTED PASSWORD '$POSTGRES_API_PASS';
EOSQL
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
GRANT CONNECT ON DATABASE $POSTGRES_DB TO $POSTGRES_API_USER;
GRANT USAGE ON SCHEMA public TO $POSTGRES_API_USER;
GRANT SELECT, INSERT, UPDATE ON anwesenheit, abwesenheit, user_password, wochen_report, s_feiertage TO $POSTGRES_API_USER;
GRANT DELETE ON abwesenheit TO $POSTGRES_API_USER;
GRANT SELECT ON s_personal_daten, s_abwesenheit_typen, s_anwesenheit_typen, s_feiertage TO $POSTGRES_API_USER;
GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO $POSTGRES_API_USER;
EOSQL
echo "User creation and permissions setup complete!"
# psql -v ON_ERROR_STOP=1 --username root --dbname arbeitszeitmessung

View File

@@ -3,7 +3,7 @@ services:
db:
volumes:
- ${POSTGRES_PATH}:/var/lib/postgresql/data
# - ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d
- ${POSTGRES_PATH}/initdb:/docker-entrypoint-initdb.d
ports:
- 5432:5432

View File

@@ -25,12 +25,6 @@ services:
- ${WEB_PORT}:8080
depends_on:
- db
- document-creator
volumes:
- ../logs:/app/Backend/logs
restart: unless-stopped
document-creator:
image: git.letsstein.de/tom/arbeitszeitmessung-doc-creator
container_name: ${TYPST_CONTAINER}
- ${LOG_PATH}:/app/logs
restart: unless-stopped

View File

@@ -2,11 +2,11 @@ POSTGRES_USER=root # Postgres ADMIN Nutzername
POSTGRES_PASSWORD=very_secure # Postgres ADMIN Passwort
POSTGRES_API_USER=api_nutzer # Postgres API Nutzername (für Arbeitszeitmessung)
POSTGRES_API_PASS=password # Postgres API Passwort (für Arbeitszeitmessung)
POSTGRES_PATH=../DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...)
LOG_PATH=../logs # Pfad für Logdatein
POSTGRES_PATH=__ROOT__/DB # Datebank Pfad (relativ zu Docker Ordner oder absoluter pfad mit /...)
POSTGRES_DB=arbeitszeitmessung # Postgres Datenbank Name
POSTGRES_PORT=127.0.0.1:5432 # Postgres Port will not be exposed by default. regex:^[0-9]{1,5}$
POSTGRES_PORT=127.0.0.1:5432 # Postgres Port normalerweise nicht freigegeben. regex:^[0-9]{1,5}$
TZ=Europe/Berlin # Zeitzone
API_TOKEN=dont_access # API Token für ESP Endpoints
WEB_PORT=8000 # Port from which Arbeitszeitmessung should be accessable regex:^[0-9]{1,5}$
TYPST_CONTAINER=arbeitszeitmessung-doc-creator # Name of the pdf compiler container
WEB_PORT=8000 # Port unter welchem Webserver erreichbar ist. regex:^[0-9]{1,5}$
LOG_PATH=__ROOT__/logs # Pfad für Audit Logs
LOG_LEVEL=warn # Welche Log-Nachrichten werden in der Konsole erscheinen

View File

@@ -1,7 +0,0 @@
FROM ghcr.io/typst/typst:0.14.0
WORKDIR /app
COPY ./templates /app/templates
COPY ./static /app/static
ENTRYPOINT ["sh", "-c", "while true; do sleep 3600; done"]

View File

@@ -44,7 +44,7 @@ generateFrontend:
backend: generateFrontend login_registry
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_REGISTRY}/${PACKAGE_OWNER}/arbeitszeitmessung:latest Backend --push
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_REGISTRY}/${PACKAGE_OWNER}/arbeitszeitmessung-webserver:dev Backend --push
# docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_REGISTRY}/${PACKAGE_OWNER}/arbeitszeitmessung:${GIT_COMMIT} Backend //--push
test:

View File

@@ -4,6 +4,10 @@ set -e
envFile=Docker/.env
envExample=Docker/env.example
autoBackupScript=Cron/autoBackup.sh
autoHolidaysScript=Cron/autoHolidays.sh
autoLogoutScript=Cron/autoLogout.sh
echo "Checking Docker installation..."
if ! command -v docker >/dev/null 2>&1; then
echo "Docker not found. Install Docker? [y/N]"
@@ -18,12 +22,16 @@ else
echo "Docker is already installed."
fi
###########################################################################
echo "Checking Docker Compose..."
if ! docker compose version >/dev/null 2>&1; then
echo "Docker Compose plugin missing. You may need to update Docker."
exit 1
fi
###########################################################################
echo "Preparing .env file..."
if [ ! -f $envFile ]; then
if [ -f $envExample ]; then
@@ -44,6 +52,9 @@ if [ ! -f $envFile ]; then
raw_val=$(printf "%s" "$rest" | sed 's/ *#.*//')
default_value=$(printf "%s" "$raw_val" | sed 's/"//g')
# Replace __ROOT__ with script pwd
default_value="${default_value/__ROOT__/$(pwd)}"
regex=""
if [[ "$comment" =~ regex:(.*)$ ]]; then
regex="${BASH_REMATCH[1]}"
@@ -96,13 +107,80 @@ else
echo "Using existing .env. (found at $envFile)"
fi
###########################################################################
LOG_PATH=$(grep -E '^LOG_PATH=' $envFile | cut -d= -f2)
if [ -z "$LOG_PATH" ]; then
echo "LOG_PATH not found in .env using default $(pwd)/logs"
LOG_PATH=$(pwd)/logs
else
LOG_PATH=Docker/$LOG_PATH
fi
mkdir -p $LOG_PATH
echo "Created logs folder at $LOG_PATH"
###########################################################################
echo -e "\n\n"
echo "Start containers with docker compose up -d? [y/N]"
read -r start_containers
if [[ "$start_containers" =~ ^[Yy]$ ]]; then
cd Docker
mkdir ../logs
docker compose up -d
echo "Containers started."
else
echo "You can start them manually with: docker compose up -d"
fi
###########################################################################
echo -e "\n\n"
echo "Setup Crontab for automatic logout, backup and holiday creation? [y/N]"
read -r setup_cron
if [[ "$setup_cron" =~ ^[Yy]$ ]]; then
WEB_PORT=$(grep -E '^WEB_PORT=' $envFile | cut -d= -f2)
if [ -z "$WEB_PORT" ]; then
echo "WEB_PORT not found in .env using default 8000"
WEB_PORT=8000
fi
POSTGRES_DB=$(grep -E '^POSTGRES_DB=' $envFile | cut -d= -f2)
if [ -z "$POSTGRES_DB" ]; then
echo "arbeitszeitmessung not found in .env using default arbeitszeitmessung"
POSTGRES_DB="arbeitszeitmessung"
fi
sed -i "s/__PORT__/$WEB_PORT/" $autoHolidaysScript
sed -i "s/__PORT__/$WEB_PORT/" $autoLogoutScript
sed -i "s/__DATABASE__/$POSTGRES_DB/" $autoBackupScript
chmod +x $autoBackupScript $autoHolidaysScript $autoLogoutScript
# echo "Scripts build with PORT=$WEB_PORT and DATABSE=$POSTGRES_DB!"
echo "Adding rules to crontab."
cron_commands=$(mktemp /tmp/arbeitszeitmessung-cron.XXX)
for file in Cron/*; do
cron_timing=$(grep -E '^# cron-timing:' "$file" | sed 's/^# cron-timing:[[:space:]]*//')
if [ -z "$cron_timing" ]; then
echo "No cron-timing found in $file, so it's not added to crontab."
continue
fi
( crontab -l ; echo "$cron_timing $(pwd)/$file" )| awk '!x[$0]++' | crontab -
echo "Added entry to crontab: $cron_timing $(pwd)/$file."
sleep 2
done
if systemctl is-active --quiet cron.service ; then
echo "cron.service is running. Everything should be fine now."
else
echo "cron.service is not running. Please start and enable cron.service."
echo "For how to start a service, see: https://wiki.ubuntuusers.de/systemd/systemctl UNITNAME will be cron.service"
fi
else
echo "Please setup cron manually by executing crontab -e and adding all files from inside the Cron directory!"
fi