Compare commits

...

21 Commits

Author SHA1 Message Date
6a1b0c7b7f Merge pull request 'fix' (#28) from master into prod
Reviewed-on: #28
2025-06-21 12:09:01 +03:00
Egor Matveev
ea63cd794c fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 1m4s
Deploy Dev / Push (pull_request) Successful in 36s
Deploy Dev / Deploy dev (pull_request) Successful in 10s
Deploy Prod / Build (pull_request) Successful in 1m0s
Deploy Prod / Push (pull_request) Successful in 36s
Deploy Prod / Deploy prod (pull_request) Successful in 16s
2025-06-21 12:04:27 +03:00
028db059fe Merge pull request 'fix' (#26) from master into prod
Reviewed-on: #26
2025-06-18 16:35:21 +03:00
Egor Matveev
dce65faffd fix
All checks were successful
Deploy Prod / Build (pull_request) Successful in 1m0s
Deploy Prod / Push (pull_request) Successful in 36s
Deploy Prod / Deploy prod (pull_request) Successful in 16s
2025-06-18 16:34:50 +03:00
880b6313a9 Merge pull request 'fix' (#25) from master into prod
Reviewed-on: #25
2025-06-18 16:18:53 +03:00
Egor Matveev
71d5fa2e25 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 59s
Deploy Dev / Push (pull_request) Successful in 37s
Deploy Dev / Deploy dev (pull_request) Successful in 15s
Deploy Prod / Build (pull_request) Successful in 58s
Deploy Prod / Push (pull_request) Successful in 39s
Deploy Prod / Deploy prod (pull_request) Successful in 15s
2025-06-18 15:22:50 +03:00
fbeb173df0 Merge pull request 'master' (#23) from master into prod
Reviewed-on: #23
2025-06-15 22:39:28 +03:00
Egor Matveev
502c767a2a fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 53s
Deploy Dev / Push (pull_request) Successful in 40s
Deploy Dev / Deploy dev (pull_request) Successful in 13s
Deploy Prod / Build (pull_request) Successful in 58s
Deploy Prod / Push (pull_request) Successful in 34s
Deploy Prod / Deploy prod (pull_request) Successful in 12s
2025-06-15 22:35:04 +03:00
Egor Matveev
f2ab91cbce fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 58s
Deploy Dev / Push (pull_request) Successful in 36s
Deploy Dev / Deploy dev (pull_request) Successful in 15s
2025-06-15 22:24:06 +03:00
Egor Matveev
fad93012a1 fix
All checks were successful
Deploy Prod / Build (pull_request) Successful in 58s
Deploy Prod / Push (pull_request) Successful in 37s
Deploy Prod / Deploy prod (pull_request) Successful in 10s
2025-06-14 03:39:20 +03:00
Egor Matveev
3d6c955240 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 56s
Deploy Dev / Push (pull_request) Successful in 38s
Deploy Dev / Deploy dev (pull_request) Successful in 11s
2025-06-14 03:31:23 +03:00
Egor Matveev
9c106501c8 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 57s
Deploy Dev / Push (pull_request) Successful in 37s
Deploy Dev / Deploy dev (pull_request) Successful in 12s
2025-06-14 03:26:54 +03:00
Egor Matveev
d55f63ae79 fix 2025-06-14 03:25:03 +03:00
Egor Matveev
5ba0be8e30 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 55s
Deploy Dev / Push (pull_request) Successful in 35s
Deploy Dev / Deploy dev (pull_request) Successful in 11s
2025-06-13 23:30:59 +03:00
Egor Matveev
d11ae23369 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 57s
Deploy Dev / Push (pull_request) Successful in 35s
Deploy Dev / Deploy dev (pull_request) Successful in 12s
2025-06-13 23:25:34 +03:00
Egor Matveev
3d73678be0 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 56s
Deploy Dev / Push (pull_request) Successful in 35s
Deploy Dev / Deploy dev (pull_request) Successful in 11s
2025-06-13 02:44:50 +03:00
Egor Matveev
16c24883ed fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 1m0s
Deploy Dev / Push (pull_request) Successful in 36s
Deploy Dev / Deploy dev (pull_request) Successful in 12s
2025-06-13 02:39:22 +03:00
Egor Matveev
4f11e508db fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 56s
Deploy Dev / Push (pull_request) Successful in 35s
Deploy Dev / Deploy dev (pull_request) Successful in 12s
2025-06-13 02:35:18 +03:00
Egor Matveev
7375a12d95 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 55s
Deploy Dev / Push (pull_request) Successful in 36s
Deploy Dev / Deploy dev (pull_request) Successful in 13s
2025-06-13 02:30:28 +03:00
Egor Matveev
97aefc3052 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 54s
Deploy Dev / Push (pull_request) Successful in 40s
Deploy Dev / Deploy dev (pull_request) Successful in 11s
2025-06-13 02:26:40 +03:00
Egor Matveev
1d5d753c39 fix
All checks were successful
Deploy Dev / Build (pull_request) Successful in 57s
Deploy Dev / Push (pull_request) Successful in 37s
Deploy Dev / Deploy dev (pull_request) Successful in 13s
2025-06-13 02:23:48 +03:00
11 changed files with 377 additions and 50 deletions

View File

@@ -8,6 +8,7 @@ services:
- clickhouse-development
environment:
CLICKHOUSE_PASSWORD: $CLICKHOUSE_PASSWORD_DEV
STAGE: "development"
deploy:
mode: replicated
restart_policy:

View File

@@ -1,23 +1,24 @@
version: "3.4"
services:
queues:
monitoring:
image: mathwave/sprint-repo:monitoring
networks:
- clickhouse
- monitoring
environment:
MONGO_HOST: "mongo.sprinthub.ru"
MONGO_PASSWORD: $MONGO_PASSWORD_PROD
CLICKHOUSE_PASSWORD: $CLICKHOUSE_PASSWORD_PROD
STAGE: "production"
deploy:
mode: replicated
restart_policy:
condition: any
placement:
constraints: [node.labels.stage == production]
update_config:
parallelism: 1
order: start-first
networks:
queues:
external: true
clickhouse:
external: true
monitoring:
external: true

View File

@@ -0,0 +1,43 @@
name: Deploy Prod
on:
pull_request:
branches:
- prod
types: [closed]
jobs:
build:
name: Build
runs-on: [dev]
steps:
- name: login
run: docker login -u mathwave -p ${{ secrets.DOCKERHUB_PASSWORD }}
- name: checkout
uses: actions/checkout@v4
with:
ref: prod
- name: build
run: docker build -t mathwave/sprint-repo:monitoring .
push:
name: Push
runs-on: [dev]
needs: build
steps:
- name: push
run: docker push mathwave/sprint-repo:monitoring
deploy-prod:
name: Deploy prod
runs-on: [prod]
needs: push
steps:
- name: login
run: docker login -u mathwave -p ${{ secrets.DOCKERHUB_PASSWORD }}
- name: checkout
uses: actions/checkout@v4
with:
ref: prod
- name: deploy
env:
CLICKHOUSE_PASSWORD_PROD: ${{ secrets.CLICKHOUSE_PASSWORD_PROD }}
run: docker stack deploy --with-registry-auth -c ./.deploy/deploy-prod.yaml infra

View File

@@ -3,23 +3,24 @@ package routers
import (
"encoding/json"
"log"
client "monitoring/app/storage/clickhouse"
endpoints "monitoring/app/storage/clickhouse/tables"
"net/http"
)
func AddEndpointMetric (r *http.Request) (interface{}, int) {
func AddEndpointMetric(r *http.Request) (interface{}, int) {
d := json.NewDecoder(r.Body)
body := endpoints.EndpointMetric{}
body := client.EndpointMetric{}
err := d.Decode(&body)
if err != nil {
return nil, http.StatusBadRequest
}
err = endpoints.AddEndpointMetric(body)
endpoints.AddEndpointMetric(body)
if err != nil {
log.Printf(err.Error())
log.Print(err.Error())
return nil, http.StatusInternalServerError
}
return nil, http.StatusAccepted
}
}

View File

@@ -0,0 +1,26 @@
package routers
import (
"encoding/json"
"log"
client "monitoring/app/storage/clickhouse"
increments "monitoring/app/storage/clickhouse/tables"
"net/http"
)
func AddIncrementMetric(r *http.Request) (interface{}, int) {
d := json.NewDecoder(r.Body)
body := client.IncrementMetric{}
err := d.Decode(&body)
if err != nil {
return nil, http.StatusBadRequest
}
increments.AddIncrementMetric(body)
if err != nil {
log.Print(err.Error())
return nil, http.StatusInternalServerError
}
return nil, http.StatusAccepted
}

View File

@@ -0,0 +1,26 @@
package routers
import (
"encoding/json"
"log"
client "monitoring/app/storage/clickhouse"
tasks "monitoring/app/storage/clickhouse/tables"
"net/http"
)
func AddTaskMetric(r *http.Request) (interface{}, int) {
d := json.NewDecoder(r.Body)
body := client.TaskMetric{}
err := d.Decode(&body)
if err != nil {
return nil, http.StatusBadRequest
}
tasks.AddTaskMetric(body)
if err != nil {
log.Print(err.Error())
return nil, http.StatusInternalServerError
}
return nil, http.StatusAccepted
}

View File

@@ -3,13 +3,49 @@ package storage
import (
"context"
"fmt"
"log"
"os"
"sync"
"time"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
)
type EndpointMetric struct {
Timestamp time.Time `json:"timestamp"`
Service string `json:"service"`
Environment string `json:"environment"`
Endpoint string `json:"endpoint"`
StatusCode int `json:"status_code"`
ResponseTime int `json:"response_time"`
Method string `json:"method"`
}
type IncrementMetric struct {
Timestamp time.Time `json:"timestamp"`
Service string `json:"service"`
Environment string `json:"environment"`
Name string `json:"name"`
Count int `json:"count"`
}
type TaskMetric struct {
Timestamp time.Time `json:"timestamp"`
Service string `json:"service"`
Environment string `json:"environment"`
Queue string `json:"queue"`
Success bool `json:"success"`
ExecutionTimeMs int `json:"execution_time_ms"`
}
var Connection driver.Conn
var EndpointsCol []EndpointMetric
var EndpointsMutex sync.Mutex
var IncrementsCol []IncrementMetric
var IncrementsMutex sync.Mutex
var TasksCol []TaskMetric
var TasksMutex sync.Mutex
func Connect() error {
conn, err := connect()
@@ -17,12 +53,23 @@ func Connect() error {
return err
}
Connection = *conn
EndpointsCol = make([]EndpointMetric, 0)
IncrementsCol = make([]IncrementMetric, 0)
TasksCol = make([]TaskMetric, 0)
EndpointsMutex = sync.Mutex{}
IncrementsMutex = sync.Mutex{}
TasksMutex = sync.Mutex{}
go pushMetrics()
return nil
}
func Close() {
Connection.Close()
}
func connect() (*driver.Conn, error) {
var (
ctx = context.Background()
ctx = context.Background()
conn, err = clickhouse.Open(&clickhouse.Options{
Addr: []string{"clickhouse:9000"},
Auth: clickhouse.Auth{
@@ -45,4 +92,170 @@ func connect() (*driver.Conn, error) {
return nil, err
}
return &conn, nil
}
}
func pushEndpoints() error {
if len(EndpointsCol) == 0 {
return nil
}
EndpointsMutex.Lock()
newCollection := EndpointsCol
EndpointsCol = make([]EndpointMetric, 0)
EndpointsMutex.Unlock()
batch, err := Connection.PrepareBatch(context.Background(), "INSERT INTO endpoints")
if err != nil {
return err
}
for _, metric := range newCollection {
err := batch.Append(
metric.Timestamp,
metric.Service,
metric.Environment,
metric.Endpoint,
metric.StatusCode,
metric.ResponseTime,
metric.Method,
)
if err != nil {
return err
}
}
return batch.Send()
}
func pushIncrements() error {
if len(IncrementsCol) == 0 {
return nil
}
IncrementsMutex.Lock()
newCollection := IncrementsCol
IncrementsCol = make([]IncrementMetric, 0)
IncrementsMutex.Unlock()
batch, err := Connection.PrepareBatch(context.Background(), "INSERT INTO increments")
if err != nil {
return err
}
for _, metric := range newCollection {
err := batch.Append(
metric.Timestamp,
metric.Service,
metric.Environment,
metric.Name,
metric.Count,
)
if err != nil {
return err
}
}
return batch.Send()
}
func pushTasks() error {
if len(IncrementsCol) == 0 {
return nil
}
TasksMutex.Lock()
newCollection := TasksCol
TasksCol = make([]TaskMetric, 0)
TasksMutex.Unlock()
batch, err := Connection.PrepareBatch(context.Background(), "INSERT INTO tasks")
if err != nil {
return err
}
for _, metric := range newCollection {
err := batch.Append(
metric.Timestamp,
metric.Service,
metric.Environment,
metric.Queue,
metric.Success,
metric.ExecutionTimeMs,
)
if err != nil {
return err
}
}
return batch.Send()
}
func pushMetrics() {
for {
pushEndpoints()
pushIncrements()
pushTasks()
time.Sleep(time.Second)
}
}
func Migrate() error {
err := Connection.Exec(
context.TODO(),
`CREATE TABLE IF NOT EXISTS endpoints (
timestamp DateTime,
service LowCardinality(String),
environment LowCardinality(String),
endpoint LowCardinality(String),
status_code UInt16,
response_time_ms UInt32,
method LowCardinality(String)
)
ENGINE = MergeTree
PARTITION BY toYYYYMM(timestamp)
ORDER BY (service, environment, endpoint, method, timestamp);`,
)
if err != nil {
log.Fatal(err)
return err
}
err = Connection.Exec(
context.TODO(),
`CREATE TABLE IF NOT EXISTS tasks (
timestamp DateTime,
service LowCardinality(String),
environment LowCardinality(String),
queue LowCardinality(String),
success Bool,
execution_time_ms UInt32
)
ENGINE = MergeTree
PARTITION BY toYYYYMM(timestamp)
ORDER BY (service, environment, queue, timestamp);`,
)
if err != nil {
log.Fatal(err)
return err
}
err = Connection.Exec(
context.TODO(),
`CREATE TABLE IF NOT EXISTS increments (
timestamp DateTime,
service LowCardinality(String),
environment LowCardinality(String),
name LowCardinality(String),
count UInt16
)
ENGINE = MergeTree
PARTITION BY toYYYYMM(timestamp)
ORDER BY (service, environment, name, timestamp);`,
)
if err != nil {
log.Fatal(err)
return err
}
return nil
}

View File

@@ -1,42 +1,11 @@
package storage
import (
"context"
client "monitoring/app/storage/clickhouse"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
)
type EndpointMetric struct {
Timestamp int `json:"timestamp"`
Service string `json:"service"`
Endpoint string `json:"endpoint"`
StatusCode int `json:"status_code"`
ResponseTime int `json:"response_time"`
Method string `json:"method"`
}
func AddEndpointMetric(metric EndpointMetric) error {
batch, err := connection().PrepareBatch(context.Background(), "INSERT INTO endpoints")
if err != nil {
return err
}
err = batch.Append(
metric.Timestamp,
metric.Service,
metric.Endpoint,
metric.StatusCode,
metric.ResponseTime,
metric.Method,
)
if err != nil {
return err
}
return batch.Send()
}
func connection() driver.Conn {
return client.Connection
func AddEndpointMetric(metric client.EndpointMetric) {
client.EndpointsMutex.Lock()
defer client.EndpointsMutex.Unlock()
client.EndpointsCol = append(client.EndpointsCol, metric)
}

View File

@@ -0,0 +1,11 @@
package storage
import (
client "monitoring/app/storage/clickhouse"
)
func AddIncrementMetric(metric client.IncrementMetric) {
client.IncrementsMutex.Lock()
defer client.IncrementsMutex.Unlock()
client.IncrementsCol = append(client.IncrementsCol, metric)
}

View File

@@ -0,0 +1,11 @@
package storage
import (
client "monitoring/app/storage/clickhouse"
)
func AddTaskMetric(metric client.TaskMetric) {
client.TasksMutex.Lock()
defer client.TasksMutex.Unlock()
client.TasksCol = append(client.TasksCol, metric)
}

27
main.go
View File

@@ -4,11 +4,27 @@ import (
"encoding/json"
"log"
endpoint "monitoring/app/routers/metrics"
increment "monitoring/app/routers/metrics"
task "monitoring/app/routers/metrics"
client "monitoring/app/storage/clickhouse"
endpoints "monitoring/app/storage/clickhouse/tables"
"net/http"
"os"
"time"
)
func writeMetric(timestamp time.Time, endpoint string, statusCode int, responseTime int, method string) {
endpoints.AddEndpointMetric(client.EndpointMetric{
Timestamp: timestamp.Add(time.Hour * 3),
Service: "monitoring",
Environment: os.Getenv("STAGE"),
Endpoint: endpoint,
StatusCode: statusCode,
ResponseTime: responseTime,
Method: method,
})
}
func handlerWrapper(f func(*http.Request) (interface{}, int)) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
@@ -24,6 +40,7 @@ func handlerWrapper(f func(*http.Request) (interface{}, int)) func(http.Response
} else {
w.WriteHeader(status)
}
go writeMetric(start, r.URL.Path, status, int(time.Since(start).Milliseconds()), r.Method)
log.Printf("%s %d %s", r.URL, status, time.Since(start))
}
}
@@ -33,8 +50,16 @@ func main() {
if err != nil {
panic(err)
}
defer client.Close()
err = client.Migrate()
if err != nil {
panic(err)
}
http.HandleFunc("/api/v1/metrics/endpoint", handlerWrapper(endpoint.AddEndpointMetric))
http.HandleFunc("/api/v1/metrics/task", handlerWrapper(task.AddTaskMetric))
http.HandleFunc("/api/v1/metrics/increment", handlerWrapper(increment.AddIncrementMetric))
log.Printf("Server started")
http.ListenAndServe(":1237", nil)
}
}