Commit e4a1e692 authored by Your Name's avatar Your Name

first version

parent 2fb5a659
......@@ -11,6 +11,7 @@ package main
import (
"context"
"fmt"
"sort"
"time"
"github.com/jaypipes/ghw"
......@@ -21,6 +22,193 @@ import (
"github.com/ricochet2200/go-disk-usage/du"
)
// type Disks struct {
// TotalSize int64 `json:"total_size"`
// FileSystem []FileSystem `json:"files_system"`
// }
type FileSystem struct {
Device string `json:"device"`
MountPoints []string `json:"mount_points"`
FreeBytes int64 `json:"free_bytes"`
SizeBytes int64 `json:"size_bytes"`
}
func (c *ProApi) Disks() ([]FileSystem, error) {
size, err := c.FileSystemSizeBytes()
if err != nil {
return nil, err
}
free, err := c.FileSystemFreeBytes()
if err != nil {
return nil, err
}
res := make([]FileSystem, 0, len(size))
for k, v := range size {
if freeV, ok := free[k]; ok {
v.FreeBytes = freeV.FreeBytes
res = append(res, v)
}
}
// 定义排序函数
sortByDevice := func(i, j int) bool {
return res[i].Device < res[j].Device
}
// sort.Strings(strings)
// 使用sort.Slice进行排序
sort.Slice(res, sortByDevice)
return res, nil
}
func (c *ProApi) FileSystemSizeBytes() (map[string]FileSystem, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_filesystem_size_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make(map[string]FileSystem, 8)
//unique := make(map[string]FileSystem)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
if modelName, ok := elem.Metric["device"]; ok {
if oldV, subOk := res[string(modelName)]; subOk {
//continue
if v, ok := elem.Metric["mountpoint"]; ok {
oldV.MountPoints = append(oldV.MountPoints, string(v))
//initMountPoints = append(initMountPoints, string(v))
res[string(modelName)] = oldV
}
} else {
initMountPoints := make([]string, 0, 8)
if v, ok := elem.Metric["mountpoint"]; ok {
initMountPoints = append(initMountPoints, string(v))
}
f := FileSystem{
Device: string(modelName),
MountPoints: initMountPoints,
SizeBytes: int64(elem.Value),
}
res[string(modelName)] = f
}
} else {
continue
}
}
}
return res, nil
}
func (c *ProApi) FileSystemFreeBytes() (map[string]FileSystem, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_filesystem_free_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make(map[string]FileSystem, 8)
//unique := make(map[string]FileSystem)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
if modelName, ok := elem.Metric["device"]; ok {
if oldV, subOk := res[string(modelName)]; subOk {
//continue
if v, ok := elem.Metric["mountpoint"]; ok {
oldV.MountPoints = append(oldV.MountPoints, string(v))
//initMountPoints = append(initMountPoints, string(v))
}
} else {
initMountPoints := make([]string, 0, 8)
if v, ok := elem.Metric["mountpoint"]; ok {
initMountPoints = append(initMountPoints, string(v))
}
f := FileSystem{
Device: string(modelName),
MountPoints: initMountPoints,
FreeBytes: int64(elem.Value),
}
res[string(modelName)] = f
}
} else {
continue
}
}
}
return res, nil
}
//100 - ((node_filesystem_free_bytes * 100) / node_filesystem_size_bytes)
func (c *ProApi) DiskUtil() ([]DeviceInfo, error) {
......
......@@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"strconv"
"syscall"
"time"
......@@ -14,6 +15,162 @@ import (
// "github.com/spf13/cobra"
)
type Cpus struct {
TotalUtil int64 `json:"total_util"`
List []Cpu `json:"list"`
}
type Cpu struct {
Seq int64 `json:"seq"`
Model string `json:"model"`
Thread int64 `json:"thread"`
Core int64 `json:"core"`
}
// Gpu
func (c *ProApi) Cpus() (Cpus, error) {
totalUtil, err := c.CpuTotalUtil()
if err != nil {
return Cpus{}, err
}
// inCpus := make(map[string]Cpu)
modelname, core, thread, err := c.CpuItems()
if err != nil {
return Cpus{}, err
}
res := make([]Cpu, 0, 1)
res = append(res, Cpu{
Model: modelname,
Thread: int64(thread) + 1,
Core: int64(core) + 1,
})
result := Cpus{
TotalUtil: totalUtil,
List: res,
}
return result, nil
}
// node_cpu_seconds_total{mode="idle"}
func (c ProApi) CpuItems() (string, int, int, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryParam := `node_cpu_info`
result, warnings, err := c.API.Query(ctx, queryParam, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return "", 0, 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
//res := make([]Cpu, 0, len(vectorVal))
model_name := ""
core := 0
thread := 0
for k, elem := range vectorVal {
// int64(elem.Value), nil
_ = elem
if k == 0 {
if modelName, ok := elem.Metric["model_name"]; ok {
model_name = string(modelName)
} else {
continue
}
}
if coreStr, ok := elem.Metric["core"]; ok {
newCore, err := strconv.Atoi(string(coreStr))
if err != nil {
return "", 0, 0, err
}
if newCore > core {
core = newCore
}
} else {
continue
}
if cpuStr, ok := elem.Metric["cpu"]; ok {
newCpu, err := strconv.Atoi(string(cpuStr))
if err != nil {
return "", 0, 0, err
}
if newCpu > thread {
thread = newCpu
}
} else {
continue
}
// res = append(res, Cpu{
// Seq: int64(k),
// Model: model_name,
// })
}
return model_name, core, thread, nil
}
return "", 0, 0, errors.Errorf("cpu can not find the query %s value", queryParam)
}
func (c ProApi) CpuTotalUtil() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryParam := `(1 - sum(rate(node_cpu_seconds_total{mode="idle"}[1m])) by (instance) / sum(rate(node_cpu_seconds_total[1m])) by (instance) ) * 100`
result, warnings, err := c.API.Query(ctx, queryParam, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("can not find the query %s value", queryParam)
}
func (c *ProApi) CpuUtil() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
......
......@@ -61,7 +61,7 @@ x-tagGroups:
- store_model
paths:
/hd:
/hw:
get:
summary: get host hardware info and usage
tags:
......@@ -125,9 +125,6 @@ components:
type: integer
format: int64
mem_free:
type: integer
format: int64
mem_used:
type: integer
format: int64
mem_util:
......@@ -145,13 +142,15 @@ components:
network:
type: object
properties:
device:
type: string
speed:
type: integer
format: int64
send:
send_rate:
type: integer
format: int64
receive:
recv_rate:
type: integer
format: int64
filesystem:
......@@ -178,17 +177,24 @@ components:
model:
type: string
mem_util:
type: integer
format: int64
gpu_util:
type: integer
format: int64
mem_total:
type: integer
format: int64
mem_free:
type: integer
format: int64
power:
type: integer
format: int64
gpu_tmp:
gpu_temp:
type: integer
format: int64
cpu:
type: object
properties:
......@@ -201,6 +207,6 @@ components:
thread:
type: integer
format: int64
physical:
core:
type: integer
format: int64
......@@ -3,15 +3,450 @@ package main
import (
"context"
"fmt"
"sort"
"strconv"
"time"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
)
type Gpu struct {
Seq int `json:"seq"`
Model string `json:"model"`
MemUtil int64 `json:"mem_util"`
GpuUtil int64 `json:"gpu_util"`
MemTotal int64 `json:"mem_total"`
MemFree int64 `json:"mem_free"`
Power int64 `json:"power"`
GpuTemp int64 `json:"gpu_temp"`
}
// Gpu
func (c *ProApi) Gpus() ([]Gpu, error) {
inGpus := make(map[string]Gpu)
gpuUtil, err := c.GpuUtil(inGpus)
if err != nil {
return nil, err
}
gpuMemUtil, err := c.GpuMemUtil(gpuUtil)
if err != nil {
return nil, err
}
powerUsage, err := c.GpuPowerUsage(gpuMemUtil)
if err != nil {
return nil, err
}
gpuTemp, err := c.GpuTemp(powerUsage)
if err != nil {
return nil, err
}
gpuMemTotal, err := c.GpuMemTotal(gpuTemp)
if err != nil {
return nil, err
}
gpuMemFree, err := c.GpuMemFree(gpuMemTotal)
if err != nil {
return nil, err
}
res := make([]Gpu, 0, 8)
for _, gpu := range gpuMemFree {
res = append(res, gpu)
}
// 定义排序函数
sortByIdx := func(i, j int) bool {
return res[i].Seq < res[j].Seq
}
// 使用sort.Slice进行排序
sort.Slice(res, sortByIdx)
return res, nil
}
func (c *ProApi) GpuUtil(inGpus map[string]Gpu) (map[string]Gpu, error) {
// outGpus := make(map[string]Gpu, 8)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_GPU_UTIL", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.GpuUtil = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuMemUtil(inGpus map[string]Gpu) (map[string]Gpu, error) {
// outGpus := make(map[string]Gpu, 8)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_MEM_COPY_UTIL", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.MemUtil = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuPowerUsage(inGpus map[string]Gpu) (map[string]Gpu, error) {
// outGpus := make(map[string]Gpu, 8)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_POWER_USAGE", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.Power = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuMemFree(inGpus map[string]Gpu) (map[string]Gpu, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_FB_FREE", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.MemFree = (int64(elem.Value) * 1024 * 1024)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuMemTotal(inGpus map[string]Gpu) (map[string]Gpu, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_FB_USED+DCGM_FI_DEV_FB_FREE", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.MemTotal = (int64(elem.Value) * 1024 * 1024)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuTemp(inGpus map[string]Gpu) (map[string]Gpu, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_GPU_TEMP", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.GpuTemp = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
//DCGM_FI_DEV_POWER_USAGE
func (c *ProApi) GpuPowerUsage() ([]DeviceInfo, error) {
func (c *ProApi) GpuPowerUsageOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
......@@ -113,7 +548,7 @@ func (c *ProApi) GpuMemTemp() ([]DeviceInfo, error) {
}
func (c *ProApi) GpuTemp() ([]DeviceInfo, error) {
func (c *ProApi) GpuTempOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
......@@ -164,7 +599,7 @@ func (c *ProApi) GpuTemp() ([]DeviceInfo, error) {
}
func (c *ProApi) GpuUtil() ([]DeviceInfo, error) {
func (c *ProApi) GpuUtilOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
......@@ -216,7 +651,7 @@ func (c *ProApi) GpuUtil() ([]DeviceInfo, error) {
}
// DCGM_FI_DEV_MEM_COPY_UTIL
func (c *ProApi) GpuMemUtil() ([]DeviceInfo, error) {
func (c *ProApi) GpuMemUtilOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
......
package main
import "testing"
import (
"encoding/json"
"testing"
)
func TestGpuUtil(t *testing.T) {
//GpuInfo()
func TestGpuInfo(t *testing.T) {
cli, err := NewProCli("http://192.168.1.21:9090")
......@@ -10,7 +15,7 @@ func TestGpuUtil(t *testing.T) {
t.Fatal(err)
}
gpus, err := cli.GpuUtil()
gpus, err := cli.GpuInfo()
for k, v := range gpus {
......@@ -19,7 +24,9 @@ func TestGpuUtil(t *testing.T) {
}
func TestGpuMemUtil(t *testing.T) {
//GpuInit()
func TestGpuInit(t *testing.T) {
cli, err := NewProCli("http://192.168.1.21:9090")
......@@ -27,18 +34,21 @@ func TestGpuMemUtil(t *testing.T) {
t.Fatal(err)
}
gpus, err := cli.GpuMemUtil()
inGpus := make(map[string]Gpu)
for k, v := range gpus {
gpus, err := cli.GpuUtil(inGpus)
if err != nil {
t.Fatal(err)
}
for k, v := range gpus {
t.Log("k", k, "v", v)
}
}
//GpuInfo()
func TestGpuInfo(t *testing.T) {
func TestGpus(t *testing.T) {
cli, err := NewProCli("http://192.168.1.21:9090")
......@@ -46,11 +56,24 @@ func TestGpuInfo(t *testing.T) {
t.Fatal(err)
}
gpus, err := cli.GpuInfo()
// inGpus := make(map[string]Gpu)
for k, v := range gpus {
gpus, err := cli.Gpus()
if err != nil {
t.Fatal(err)
}
for k, v := range gpus {
t.Log("k", k, "v", v)
}
gpusAsJson, err := json.Marshal(gpus)
if err != nil {
t.Fatal(err)
}
t.Log(string(gpusAsJson))
}
......@@ -41,345 +41,263 @@ func main() {
}))
app.Get("/hw", func(c *fiber.Ctx) error {
return nil
})
app.Get("/hw/usage", func(c *fiber.Ctx) error {
res := NewResponse{}
res := make([]DeviceInfo, 0, 10)
gpus, err := cli.Gpus()
gpuUtils, err := cli.GpuUtil()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
fmt.Println("gpu error", err.Error())
res.Msg = err.Error()
res.Code = 2
}
res = append(res, gpuUtils...)
gpuMemUtils, err := cli.GpuMemUtil()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res.Data.Gpus = gpus
res = append(res, gpuMemUtils...)
cpus, err := cli.Cpus()
diskUtil, err := cli.DiskUtil()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
fmt.Println("cpu error", err.Error())
res.Msg = err.Error()
res.Code = 2
}
res = append(res, diskUtil...)
networkSpeed, err := cli.NetworkSpeed()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res.Data.Cpus = cpus
res = append(res, networkSpeed...)
networks, err := cli.Networks()
cpuUtils, err := cli.CpuUtil()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
fmt.Println("network error", err.Error())
res.Msg = err.Error()
res.Code = 2
}
res = append(res, cpuUtils...)
res.Data.Networks = networks
//MemUtil()
filesystems, err := cli.Disks()
memUtils, err := cli.MemUtil()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
fmt.Println("disks error", err.Error())
res.Msg = err.Error()
res.Code = 2
}
res = append(res, memUtils...)
res.Data.Disk = filesystems
netRece, err := cli.NetworkReceive()
mem, err := cli.Mem()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
fmt.Println("mem error", err.Error())
res.Msg = err.Error()
res.Code = 2
}
res = append(res, netRece...)
netSend, err := cli.NetworkTransmit()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, netSend...)
diskFreeSize, err := cli.DiskFreeSize()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res.Data.Mem = mem
res = append(res, diskFreeSize...)
res.Code = 1
return c.JSON(&res)
power, err := cli.GpuPowerUsage()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, power...)
memTemp, err := cli.GpuMemTemp()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, memTemp...)
gpuTemp, err := cli.GpuTemp()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, gpuTemp...)
return c.JSON(Response{
Success: true,
Devices: res,
})
})
app.Get("/hw/info", func(c *fiber.Ctx) error {
// app.Get("/hw/usage", func(c *fiber.Ctx) error {
res := make([]DeviceInfo, 0, 10)
// res := make([]DeviceInfo, 0, 10)
diskTotalSize, err := cli.DiskTotalSize()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, diskTotalSize...)
memSize, err := cli.MemInfo()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, memSize...)
gpuInfo, err := cli.GpuInfo()
// // gpuUtils, err := cli.GpuUtil()
// // if err != nil {
// // return c.JSON(Response{
// // Success: false,
// // Error: err.Error(),
// // })
// // }
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, gpuInfo...)
return c.JSON(Response{
Success: true,
Devices: res,
})
})
// Start the server on port 3000
log.Fatal(app.Listen(":5000"))
}
// // res = append(res, gpuUtils...)
type Response struct {
Devices []DeviceInfo `json:"devices"`
Success bool `json:"success"`
Error string `json:"error`
}
// // gpuMemUtils, err := cli.GpuMemUtil()
// // if err != nil {
// // return c.JSON(Response{
// // Success: false,
// // Error: err.Error(),
// // })
// // }
// func main123() {
// // Initialize a new Fiber app
// app := fiber.New()
// // res = append(res, gpuMemUtils...)
// //showMemory()
// diskUtil, err := cli.DiskUtil()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// // Define a route for the GET method on the root path '/'
// app.Get("/hw/info", func(c fiber.Ctx) error {
// res = append(res, diskUtil...)
// res := make([]DeviceInfo, 0)
// networkSpeed, err := cli.NetworkSpeed()
// block, err := getBlock()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil {
// c.SendString("getBlock err: " + err.Error())
// }
// res = append(res, block...)
// res = append(res, networkSpeed...)
// mem, err := getMemory()
// cpuUtils, err := cli.CpuUtil()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil {
// c.SendString("getMemory err: " + err.Error())
// }
// res = append(res, cpuUtils...)
// res = append(res, mem...)
// //MemUtil()
// cpu, err := getCPU()
// memUtils, err := cli.MemUtil()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil {
// c.SendString("getMemory err: " + err.Error())
// }
// res = append(res, memUtils...)
// res = append(res, cpu...)
// netRece, err := cli.NetworkReceive()
// gpu, err := getGPU()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil {
// c.SendString("getGPU err: " + err.Error())
// }
// res = append(res, netRece...)
// res = append(res, gpu...)
// netSend, err := cli.NetworkTransmit()
// return c.JSON(res)
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// // Send a string response to the client
// //return c.SendString("Hello, World 👋!")
// })
// res = append(res, netSend...)
// app.Get("/hw/usage", func(c fiber.Ctx) error {
// diskFreeSize, err := cli.DiskFreeSize()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// res := make([]DeviceInfo, 0)
// res = append(res, diskFreeSize...)
// gpu, err := getGpuUsage()
// // power, err := cli.GpuPowerUsage()
// if err != nil {
// c.SendString("getGpuUsage err: " + err.Error())
// }
// // if err != nil {
// // return c.JSON(Response{
// // Success: false,
// // Error: err.Error(),
// // })
// // }
// res = append(res, gpu...)
// // res = append(res, power...)
// cpu, err := getCPUUsage()
// memTemp, err := cli.GpuMemTemp()
// if err != nil {
// c.SendString("getCPUUsage err: " + err.Error())
// }
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// fmt.Println("cpu", cpu)
// res = append(res, memTemp...)
// res = append(res, cpu...)
// // gpuTemp, err := cli.GpuTemp()
// mem, err := getMemoryUsage()
// // if err != nil {
// // return c.JSON(Response{
// // Success: false,
// // Error: err.Error(),
// // })
// // }
// if err != nil {
// c.SendString("getMemoryUsage err: " + err.Error())
// }
// // res = append(res, gpuTemp...)
// res = append(res, mem...)
// return c.JSON(Response{
// Success: true,
// Devices: res,
// })
// })
// block, err := getBlockUsage()
// if err != nil {
// c.SendString("getBlockUsage err: " + err.Error())
// }
// app.Get("/hw/info", func(c *fiber.Ctx) error {
// res = append(res, block...)
// res := make([]DeviceInfo, 0, 10)
// return c.JSON(res)
// diskTotalSize, err := cli.DiskTotalSize()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// //res := make([]DeviceInfo, 0)
// //return c.JSON(res)
// res = append(res, diskTotalSize...)
// // Send a string response to the client
// //return c.SendString("Hello, World 👋!")
// })
// memSize, err := cli.MemInfo()
// // Start the server on port 3000
// app.Listen(":3000")
// }
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// //
// // Use and distribution licensed under the Apache license version 2.
// //
// // See the COPYING file in the root project directory for full text.
// //
// res = append(res, memSize...)
// // package main
// gpuInfo, err := cli.GpuInfo()
// // import (
// // "fmt"
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// // "github.com/jaypipes/ghw"
// // "github.com/pkg/errors"
// // "github.com/spf13/cobra"
// // )
// res = append(res, gpuInfo...)
// // // blockCmd represents the install command
// // var blockCmd = &cobra.Command{
// // Use: "block",
// // Short: "Show block storage information for the host system",
// // RunE: showBlock,
// // }
// return c.JSON(Response{
// Success: true,
// Devices: res,
// })
// })
// // // showBlock show block storage information for the host system.
// // func showBlock(cmd *cobra.Command, args []string) error {
// // block, err := ghw.Block()
// // if err != nil {
// // return errors.Wrap(err, "error getting block device info")
// // }
// Start the server on port 3000
log.Fatal(app.Listen(":5000"))
}
// // switch outputFormat {
// // case outputFormatHuman:
// // fmt.Printf("%v\n", block)
type Response struct {
Devices []DeviceInfo `json:"devices"`
Success bool `json:"success"`
Error string `json:"error`
}
// // for _, disk := range block.Disks {
// // fmt.Printf(" %v\n", disk)
// // for _, part := range disk.Partitions {
// // fmt.Printf(" %v\n", part)
// // }
// // }
// // case outputFormatJSON:
// // fmt.Printf("%s\n", block.JSONString(pretty))
// // case outputFormatYAML:
// // fmt.Printf("%s", block.YAMLString())
// // }
// // return nil
// // }
type NewResponse struct {
Data Data `json:"data"`
Code int `json:"code"`
Msg string `json:"msg"`
}
// // func init() {
// // rootCmd.AddCommand(blockCmd)
// // }
type Data struct {
Gpus []Gpu `json:"gpus"`
Cpus Cpus `json:"cpus"`
Disk []FileSystem `json:"disk"`
Networks []Network `json:"networks"`
Mem Mem `json:"mem"`
}
......@@ -13,7 +13,185 @@ import (
"github.com/shopspring/decimal"
)
func (c *ProApi) MemUtil() ([]DeviceInfo, error) {
type Mem struct {
Total int64 `json:"mem_total"`
Free int64 `json:"mem_free"`
Util int64 `json:"mem_util"`
}
func (c *ProApi) Mem() (Mem, error) {
total, err := c.MemTotal()
if err != nil {
return Mem{}, err
}
free, err := c.MemFree()
if err != nil {
return Mem{}, err
}
util, err := c.MemUtil()
if err != nil {
return Mem{}, err
}
return Mem{
Total: total,
Free: free,
Util: util,
}, nil
}
func (c *ProApi) MemUtil() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
//queryStr := `(1- (node_memory_Buffers_bytes + node_memory_Cached_bytes + node_memory_MemFree_bytes) / node_memory_MemTotal_bytes) * 100`
queryStr := `(1- (node_memory_MemFree_bytes) / node_memory_MemTotal_bytes) * 100`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemUtil can not find the query %s value", queryStr)
}
func (c *ProApi) MemTotal() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_memory_MemTotal_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemTotal can not find the query %s value", queryStr)
}
func (c *ProApi) MemFree() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_memory_MemFree_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemFree can not find the query %s value", queryStr)
}
func (c *ProApi) MemUsed() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_memory_MemFree_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemFree can not find the query %s value", queryStr)
}
func (c *ProApi) MemUtilOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
......
......@@ -3,20 +3,79 @@ package main
import (
"context"
"fmt"
"sort"
"time"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
)
type Network struct {
Device string `json:"device"`
Speed int64 `json:"speed"`
Send int64 `json:"send_rate"`
Recevice int64 `json:"recv_rate"`
}
func (c *ProApi) Networks() ([]Network, error) {
speed, err := c.NetworkSpeed()
if err != nil {
return nil, err
}
send, err := c.NetworkTransmit()
if err != nil {
return nil, err
}
receive, err := c.NetworkReceive()
if err != nil {
return nil, err
}
res := make([]Network, 0, len(speed))
for k, v := range speed {
n := Network{
Device: k,
Speed: v,
}
if s, ok := send[k]; ok {
n.Send = s
}
if r, ok := receive[k]; ok {
n.Recevice = r
}
res = append(res, n)
}
// 定义排序函数
sortByDevice := func(i, j int) bool {
return res[i].Device < res[j].Device
}
// sort.Strings(strings)
// 使用sort.Slice进行排序
sort.Slice(res, sortByDevice)
return res, nil
}
//node_network_speed_bytes/1024/1024/1024
func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
func (c *ProApi) NetworkSpeed() (map[string]int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `rate(node_network_speed_bytes[10s])`
queryStr := `node_network_speed_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
......@@ -27,11 +86,9 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make([]DeviceInfo, 0, 8)
res := make(map[string]int64, 8)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
......@@ -42,21 +99,15 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
// fmt.Println("k", k, "v", v)
// }
r := DeviceInfo{}
if modelName, ok := elem.Metric["device"]; ok {
r.Model = string(modelName)
deviceNameStr := ""
if deviceName, ok := elem.Metric["device"]; ok {
deviceNameStr = string(deviceName)
} else {
continue
}
//if name, ok := elem.Metric["__name__"]; ok {
r.Param = queryStr
//}
r.Power = uint64(elem.Value)
r.Type = "NetworkSpeed"
res = append(res, r)
res[deviceNameStr] = int64(elem.Value)
}
}
......@@ -64,13 +115,14 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
}
func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) {
func (c *ProApi) NetworkReceive() (map[string]int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m])) `
//queryStr := `sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m])) `
queryStr := `rate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[10s])`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
......@@ -80,36 +132,23 @@ func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) {
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make([]DeviceInfo, 0, 8)
res := make(map[string]int64, 8)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
r := DeviceInfo{}
// if modelName, ok := elem.Metric["modelName"]; ok {
// r.Model = string(modelName)
// } else {
// continue
// }
//if name, ok := elem.Metric["__name__"]; ok {
r.Param = queryStr
//}
deviceNameStr := ""
r.Power = uint64(elem.Value)
r.Type = "NetworkReceive"
if deviceName, ok := elem.Metric["device"]; ok {
deviceNameStr = string(deviceName)
} else {
continue
}
res = append(res, r)
res[deviceNameStr] = int64(elem.Value)
}
}
......@@ -120,12 +159,14 @@ func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) {
//sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m]))
//sum by(instance) (irate(node_network_transmit_bytes{device!~"bond.*?|lo"}[5m]))
func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) {
func (c *ProApi) NetworkTransmit() (map[string]int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `sum by(instance) (irate(node_network_transmit_bytes_total{device!~"bond.*?|lo"}[5m]))`
//queryStr := `sum by(instance) (irate(node_network_transmit_bytes_total{device!~"bond.*?|lo"}[5m]))`
queryStr := `(rate(node_network_transmit_bytes_total{device!~"bond.*?|lo"}[10s]))`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
......@@ -136,7 +177,7 @@ func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) {
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make([]DeviceInfo, 0, 8)
res := make(map[string]int64, 8)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
......@@ -147,25 +188,15 @@ func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) {
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
r := DeviceInfo{}
// if modelName, ok := elem.Metric["modelName"]; ok {
// r.Model = string(modelName)
// } else {
// continue
// }
//if name, ok := elem.Metric["__name__"]; ok {
r.Param = queryStr
//}
deviceNameStr := ""
r.Power = uint64(elem.Value)
r.Type = "NetworkTransmit"
if deviceName, ok := elem.Metric["device"]; ok {
deviceNameStr = string(deviceName)
} else {
continue
}
res = append(res, r)
res[deviceNameStr] = int64(elem.Value)
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment