Compare commits
15 Commits
703549703b
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
| ff4df20d2a | |||
|
|
ea63cd794c | ||
|
|
dce65faffd | ||
| 5838806424 | |||
|
|
71d5fa2e25 | ||
| 2116b5f2a8 | |||
|
|
502c767a2a | ||
| 2a296a0838 | |||
|
|
f2ab91cbce | ||
|
|
fad93012a1 | ||
| d5ddb7a316 | |||
|
|
3d6c955240 | ||
| 50459bcb64 | |||
|
|
9c106501c8 | ||
|
|
d55f63ae79 |
@@ -8,6 +8,7 @@ services:
|
||||
- clickhouse-development
|
||||
environment:
|
||||
CLICKHOUSE_PASSWORD: $CLICKHOUSE_PASSWORD_DEV
|
||||
STAGE: "development"
|
||||
deploy:
|
||||
mode: replicated
|
||||
restart_policy:
|
||||
|
||||
@@ -1,23 +1,24 @@
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
queues:
|
||||
monitoring:
|
||||
image: mathwave/sprint-repo:monitoring
|
||||
networks:
|
||||
- clickhouse
|
||||
- monitoring
|
||||
environment:
|
||||
MONGO_HOST: "mongo.sprinthub.ru"
|
||||
MONGO_PASSWORD: $MONGO_PASSWORD_PROD
|
||||
CLICKHOUSE_PASSWORD: $CLICKHOUSE_PASSWORD_PROD
|
||||
STAGE: "production"
|
||||
deploy:
|
||||
mode: replicated
|
||||
restart_policy:
|
||||
condition: any
|
||||
placement:
|
||||
constraints: [node.labels.stage == production]
|
||||
update_config:
|
||||
parallelism: 1
|
||||
order: start-first
|
||||
|
||||
networks:
|
||||
queues:
|
||||
external: true
|
||||
clickhouse:
|
||||
external: true
|
||||
monitoring:
|
||||
external: true
|
||||
|
||||
43
.gitea/workflows/deploy-prod.yaml
Normal file
43
.gitea/workflows/deploy-prod.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Deploy Prod
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- prod
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: [dev]
|
||||
steps:
|
||||
- name: login
|
||||
run: docker login -u mathwave -p ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: prod
|
||||
- name: build
|
||||
run: docker build -t mathwave/sprint-repo:monitoring .
|
||||
push:
|
||||
name: Push
|
||||
runs-on: [dev]
|
||||
needs: build
|
||||
steps:
|
||||
- name: push
|
||||
run: docker push mathwave/sprint-repo:monitoring
|
||||
deploy-prod:
|
||||
name: Deploy prod
|
||||
runs-on: [prod]
|
||||
needs: push
|
||||
steps:
|
||||
- name: login
|
||||
run: docker login -u mathwave -p ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: prod
|
||||
- name: deploy
|
||||
env:
|
||||
CLICKHOUSE_PASSWORD_PROD: ${{ secrets.CLICKHOUSE_PASSWORD_PROD }}
|
||||
run: docker stack deploy --with-registry-auth -c ./.deploy/deploy-prod.yaml infra
|
||||
@@ -3,23 +3,24 @@ package routers
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
endpoints "monitoring/app/storage/clickhouse/tables"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func AddEndpointMetric (r *http.Request) (interface{}, int) {
|
||||
func AddEndpointMetric(r *http.Request) (interface{}, int) {
|
||||
d := json.NewDecoder(r.Body)
|
||||
body := endpoints.EndpointMetric{}
|
||||
body := client.EndpointMetric{}
|
||||
err := d.Decode(&body)
|
||||
if err != nil {
|
||||
return nil, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = endpoints.AddEndpointMetric(body)
|
||||
endpoints.AddEndpointMetric(body)
|
||||
if err != nil {
|
||||
log.Print(err.Error())
|
||||
return nil, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return nil, http.StatusAccepted
|
||||
}
|
||||
}
|
||||
|
||||
26
app/routers/metrics/increment.go
Normal file
26
app/routers/metrics/increment.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package routers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
increments "monitoring/app/storage/clickhouse/tables"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func AddIncrementMetric(r *http.Request) (interface{}, int) {
|
||||
d := json.NewDecoder(r.Body)
|
||||
body := client.IncrementMetric{}
|
||||
err := d.Decode(&body)
|
||||
if err != nil {
|
||||
return nil, http.StatusBadRequest
|
||||
}
|
||||
|
||||
increments.AddIncrementMetric(body)
|
||||
if err != nil {
|
||||
log.Print(err.Error())
|
||||
return nil, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return nil, http.StatusAccepted
|
||||
}
|
||||
@@ -3,19 +3,20 @@ package routers
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
tasks "monitoring/app/storage/clickhouse/tables"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func AddTaskMetric(r *http.Request) (interface{}, int) {
|
||||
d := json.NewDecoder(r.Body)
|
||||
body := tasks.TaskMetric{}
|
||||
body := client.TaskMetric{}
|
||||
err := d.Decode(&body)
|
||||
if err != nil {
|
||||
return nil, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = tasks.AddTaskMetric(body)
|
||||
tasks.AddTaskMetric(body)
|
||||
if err != nil {
|
||||
log.Print(err.Error())
|
||||
return nil, http.StatusInternalServerError
|
||||
|
||||
@@ -5,12 +5,47 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
)
|
||||
|
||||
type EndpointMetric struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Service string `json:"service"`
|
||||
Environment string `json:"environment"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
StatusCode int `json:"status_code"`
|
||||
ResponseTime int `json:"response_time"`
|
||||
Method string `json:"method"`
|
||||
}
|
||||
|
||||
type IncrementMetric struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Service string `json:"service"`
|
||||
Environment string `json:"environment"`
|
||||
Name string `json:"name"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type TaskMetric struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Service string `json:"service"`
|
||||
Environment string `json:"environment"`
|
||||
Queue string `json:"queue"`
|
||||
Success bool `json:"success"`
|
||||
ExecutionTimeMs int `json:"execution_time_ms"`
|
||||
}
|
||||
|
||||
var Connection driver.Conn
|
||||
var EndpointsCol []EndpointMetric
|
||||
var EndpointsMutex sync.Mutex
|
||||
var IncrementsCol []IncrementMetric
|
||||
var IncrementsMutex sync.Mutex
|
||||
var TasksCol []TaskMetric
|
||||
var TasksMutex sync.Mutex
|
||||
|
||||
func Connect() error {
|
||||
conn, err := connect()
|
||||
@@ -18,6 +53,13 @@ func Connect() error {
|
||||
return err
|
||||
}
|
||||
Connection = *conn
|
||||
EndpointsCol = make([]EndpointMetric, 0)
|
||||
IncrementsCol = make([]IncrementMetric, 0)
|
||||
TasksCol = make([]TaskMetric, 0)
|
||||
EndpointsMutex = sync.Mutex{}
|
||||
IncrementsMutex = sync.Mutex{}
|
||||
TasksMutex = sync.Mutex{}
|
||||
go pushMetrics()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,20 +94,126 @@ func connect() (*driver.Conn, error) {
|
||||
return &conn, nil
|
||||
}
|
||||
|
||||
func pushEndpoints() error {
|
||||
if len(EndpointsCol) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
EndpointsMutex.Lock()
|
||||
newCollection := EndpointsCol
|
||||
EndpointsCol = make([]EndpointMetric, 0)
|
||||
EndpointsMutex.Unlock()
|
||||
|
||||
batch, err := Connection.PrepareBatch(context.Background(), "INSERT INTO endpoints")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range newCollection {
|
||||
err := batch.Append(
|
||||
metric.Timestamp,
|
||||
metric.Service,
|
||||
metric.Environment,
|
||||
metric.Endpoint,
|
||||
metric.StatusCode,
|
||||
metric.ResponseTime,
|
||||
metric.Method,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.Send()
|
||||
}
|
||||
|
||||
func pushIncrements() error {
|
||||
if len(IncrementsCol) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
IncrementsMutex.Lock()
|
||||
newCollection := IncrementsCol
|
||||
IncrementsCol = make([]IncrementMetric, 0)
|
||||
IncrementsMutex.Unlock()
|
||||
|
||||
batch, err := Connection.PrepareBatch(context.Background(), "INSERT INTO increments")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range newCollection {
|
||||
err := batch.Append(
|
||||
metric.Timestamp,
|
||||
metric.Service,
|
||||
metric.Environment,
|
||||
metric.Name,
|
||||
metric.Count,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.Send()
|
||||
}
|
||||
|
||||
func pushTasks() error {
|
||||
if len(IncrementsCol) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
TasksMutex.Lock()
|
||||
newCollection := TasksCol
|
||||
TasksCol = make([]TaskMetric, 0)
|
||||
TasksMutex.Unlock()
|
||||
|
||||
batch, err := Connection.PrepareBatch(context.Background(), "INSERT INTO tasks")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range newCollection {
|
||||
err := batch.Append(
|
||||
metric.Timestamp,
|
||||
metric.Service,
|
||||
metric.Environment,
|
||||
metric.Queue,
|
||||
metric.Success,
|
||||
metric.ExecutionTimeMs,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.Send()
|
||||
}
|
||||
|
||||
func pushMetrics() {
|
||||
for {
|
||||
pushEndpoints()
|
||||
pushIncrements()
|
||||
pushTasks()
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func Migrate() error {
|
||||
err := Connection.Exec(
|
||||
context.TODO(),
|
||||
`CREATE TABLE IF NOT EXISTS endpoints (
|
||||
timestamp DateTime,
|
||||
service_name LowCardinality(String),
|
||||
service LowCardinality(String),
|
||||
environment LowCardinality(String),
|
||||
endpoint LowCardinality(String),
|
||||
status_code UInt16,
|
||||
response_time_ms UInt32,
|
||||
request_method LowCardinality(String)
|
||||
method LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY (service_name, endpoint, request_method, timestamp);`,
|
||||
ORDER BY (service, environment, endpoint, method, timestamp);`,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -76,18 +224,38 @@ func Migrate() error {
|
||||
context.TODO(),
|
||||
`CREATE TABLE IF NOT EXISTS tasks (
|
||||
timestamp DateTime,
|
||||
service_name LowCardinality(String),
|
||||
queue_name LowCardinality(String),
|
||||
service LowCardinality(String),
|
||||
environment LowCardinality(String),
|
||||
queue LowCardinality(String),
|
||||
success Bool,
|
||||
execution_time_ms UInt32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY (service_name, queue_name, timestamp);`,
|
||||
ORDER BY (service, environment, queue, timestamp);`,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = Connection.Exec(
|
||||
context.TODO(),
|
||||
`CREATE TABLE IF NOT EXISTS increments (
|
||||
timestamp DateTime,
|
||||
service LowCardinality(String),
|
||||
environment LowCardinality(String),
|
||||
name LowCardinality(String),
|
||||
count UInt16
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY (service, environment, name, timestamp);`,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,43 +1,11 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
)
|
||||
|
||||
type EndpointMetric struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Service string `json:"service"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
StatusCode int `json:"status_code"`
|
||||
ResponseTime int `json:"response_time"`
|
||||
Method string `json:"method"`
|
||||
}
|
||||
|
||||
func AddEndpointMetric(metric EndpointMetric) error {
|
||||
batch, err := connection().PrepareBatch(context.Background(), "INSERT INTO endpoints")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = batch.Append(
|
||||
metric.Timestamp,
|
||||
metric.Service,
|
||||
metric.Endpoint,
|
||||
metric.StatusCode,
|
||||
metric.ResponseTime,
|
||||
metric.Method,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return batch.Send()
|
||||
}
|
||||
|
||||
func connection() driver.Conn {
|
||||
return client.Connection
|
||||
func AddEndpointMetric(metric client.EndpointMetric) {
|
||||
client.EndpointsMutex.Lock()
|
||||
defer client.EndpointsMutex.Unlock()
|
||||
client.EndpointsCol = append(client.EndpointsCol, metric)
|
||||
}
|
||||
|
||||
11
app/storage/clickhouse/tables/increments.go
Normal file
11
app/storage/clickhouse/tables/increments.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
)
|
||||
|
||||
func AddIncrementMetric(metric client.IncrementMetric) {
|
||||
client.IncrementsMutex.Lock()
|
||||
defer client.IncrementsMutex.Unlock()
|
||||
client.IncrementsCol = append(client.IncrementsCol, metric)
|
||||
}
|
||||
@@ -1,34 +1,11 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
)
|
||||
|
||||
type TaskMetric struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Service string `json:"service"`
|
||||
Queue string `json:"queue"`
|
||||
Success bool `json:"success"`
|
||||
ExecutionTimeMs int `json:"execution_time_ms"`
|
||||
}
|
||||
|
||||
func AddTaskMetric(metric TaskMetric) error {
|
||||
batch, err := connection().PrepareBatch(context.Background(), "INSERT INTO tasks")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = batch.Append(
|
||||
metric.Timestamp,
|
||||
metric.Service,
|
||||
metric.Queue,
|
||||
metric.Success,
|
||||
metric.ExecutionTimeMs,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return batch.Send()
|
||||
func AddTaskMetric(metric client.TaskMetric) {
|
||||
client.TasksMutex.Lock()
|
||||
defer client.TasksMutex.Unlock()
|
||||
client.TasksCol = append(client.TasksCol, metric)
|
||||
}
|
||||
|
||||
17
main.go
17
main.go
@@ -4,12 +4,27 @@ import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
endpoint "monitoring/app/routers/metrics"
|
||||
increment "monitoring/app/routers/metrics"
|
||||
task "monitoring/app/routers/metrics"
|
||||
client "monitoring/app/storage/clickhouse"
|
||||
endpoints "monitoring/app/storage/clickhouse/tables"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func writeMetric(timestamp time.Time, endpoint string, statusCode int, responseTime int, method string) {
|
||||
endpoints.AddEndpointMetric(client.EndpointMetric{
|
||||
Timestamp: timestamp.Add(time.Hour * 3),
|
||||
Service: "monitoring",
|
||||
Environment: os.Getenv("STAGE"),
|
||||
Endpoint: endpoint,
|
||||
StatusCode: statusCode,
|
||||
ResponseTime: responseTime,
|
||||
Method: method,
|
||||
})
|
||||
}
|
||||
|
||||
func handlerWrapper(f func(*http.Request) (interface{}, int)) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
@@ -25,6 +40,7 @@ func handlerWrapper(f func(*http.Request) (interface{}, int)) func(http.Response
|
||||
} else {
|
||||
w.WriteHeader(status)
|
||||
}
|
||||
go writeMetric(start, r.URL.Path, status, int(time.Since(start).Milliseconds()), r.Method)
|
||||
log.Printf("%s %d %s", r.URL, status, time.Since(start))
|
||||
}
|
||||
}
|
||||
@@ -43,6 +59,7 @@ func main() {
|
||||
|
||||
http.HandleFunc("/api/v1/metrics/endpoint", handlerWrapper(endpoint.AddEndpointMetric))
|
||||
http.HandleFunc("/api/v1/metrics/task", handlerWrapper(task.AddTaskMetric))
|
||||
http.HandleFunc("/api/v1/metrics/increment", handlerWrapper(increment.AddIncrementMetric))
|
||||
log.Printf("Server started")
|
||||
http.ListenAndServe(":1237", nil)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user