Commit e4a1e692 authored by Your Name's avatar Your Name

first version

parent 2fb5a659
...@@ -11,6 +11,7 @@ package main ...@@ -11,6 +11,7 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"time" "time"
"github.com/jaypipes/ghw" "github.com/jaypipes/ghw"
...@@ -21,6 +22,193 @@ import ( ...@@ -21,6 +22,193 @@ import (
"github.com/ricochet2200/go-disk-usage/du" "github.com/ricochet2200/go-disk-usage/du"
) )
// type Disks struct {
// TotalSize int64 `json:"total_size"`
// FileSystem []FileSystem `json:"files_system"`
// }
type FileSystem struct {
Device string `json:"device"`
MountPoints []string `json:"mount_points"`
FreeBytes int64 `json:"free_bytes"`
SizeBytes int64 `json:"size_bytes"`
}
func (c *ProApi) Disks() ([]FileSystem, error) {
size, err := c.FileSystemSizeBytes()
if err != nil {
return nil, err
}
free, err := c.FileSystemFreeBytes()
if err != nil {
return nil, err
}
res := make([]FileSystem, 0, len(size))
for k, v := range size {
if freeV, ok := free[k]; ok {
v.FreeBytes = freeV.FreeBytes
res = append(res, v)
}
}
// 定义排序函数
sortByDevice := func(i, j int) bool {
return res[i].Device < res[j].Device
}
// sort.Strings(strings)
// 使用sort.Slice进行排序
sort.Slice(res, sortByDevice)
return res, nil
}
func (c *ProApi) FileSystemSizeBytes() (map[string]FileSystem, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_filesystem_size_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make(map[string]FileSystem, 8)
//unique := make(map[string]FileSystem)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
if modelName, ok := elem.Metric["device"]; ok {
if oldV, subOk := res[string(modelName)]; subOk {
//continue
if v, ok := elem.Metric["mountpoint"]; ok {
oldV.MountPoints = append(oldV.MountPoints, string(v))
//initMountPoints = append(initMountPoints, string(v))
res[string(modelName)] = oldV
}
} else {
initMountPoints := make([]string, 0, 8)
if v, ok := elem.Metric["mountpoint"]; ok {
initMountPoints = append(initMountPoints, string(v))
}
f := FileSystem{
Device: string(modelName),
MountPoints: initMountPoints,
SizeBytes: int64(elem.Value),
}
res[string(modelName)] = f
}
} else {
continue
}
}
}
return res, nil
}
func (c *ProApi) FileSystemFreeBytes() (map[string]FileSystem, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_filesystem_free_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make(map[string]FileSystem, 8)
//unique := make(map[string]FileSystem)
switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
if modelName, ok := elem.Metric["device"]; ok {
if oldV, subOk := res[string(modelName)]; subOk {
//continue
if v, ok := elem.Metric["mountpoint"]; ok {
oldV.MountPoints = append(oldV.MountPoints, string(v))
//initMountPoints = append(initMountPoints, string(v))
}
} else {
initMountPoints := make([]string, 0, 8)
if v, ok := elem.Metric["mountpoint"]; ok {
initMountPoints = append(initMountPoints, string(v))
}
f := FileSystem{
Device: string(modelName),
MountPoints: initMountPoints,
FreeBytes: int64(elem.Value),
}
res[string(modelName)] = f
}
} else {
continue
}
}
}
return res, nil
}
//100 - ((node_filesystem_free_bytes * 100) / node_filesystem_size_bytes) //100 - ((node_filesystem_free_bytes * 100) / node_filesystem_size_bytes)
func (c *ProApi) DiskUtil() ([]DeviceInfo, error) { func (c *ProApi) DiskUtil() ([]DeviceInfo, error) {
......
...@@ -3,6 +3,7 @@ package main ...@@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"strconv"
"syscall" "syscall"
"time" "time"
...@@ -14,6 +15,162 @@ import ( ...@@ -14,6 +15,162 @@ import (
// "github.com/spf13/cobra" // "github.com/spf13/cobra"
) )
type Cpus struct {
TotalUtil int64 `json:"total_util"`
List []Cpu `json:"list"`
}
type Cpu struct {
Seq int64 `json:"seq"`
Model string `json:"model"`
Thread int64 `json:"thread"`
Core int64 `json:"core"`
}
// Gpu
func (c *ProApi) Cpus() (Cpus, error) {
totalUtil, err := c.CpuTotalUtil()
if err != nil {
return Cpus{}, err
}
// inCpus := make(map[string]Cpu)
modelname, core, thread, err := c.CpuItems()
if err != nil {
return Cpus{}, err
}
res := make([]Cpu, 0, 1)
res = append(res, Cpu{
Model: modelname,
Thread: int64(thread) + 1,
Core: int64(core) + 1,
})
result := Cpus{
TotalUtil: totalUtil,
List: res,
}
return result, nil
}
// node_cpu_seconds_total{mode="idle"}
func (c ProApi) CpuItems() (string, int, int, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryParam := `node_cpu_info`
result, warnings, err := c.API.Query(ctx, queryParam, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return "", 0, 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
//res := make([]Cpu, 0, len(vectorVal))
model_name := ""
core := 0
thread := 0
for k, elem := range vectorVal {
// int64(elem.Value), nil
_ = elem
if k == 0 {
if modelName, ok := elem.Metric["model_name"]; ok {
model_name = string(modelName)
} else {
continue
}
}
if coreStr, ok := elem.Metric["core"]; ok {
newCore, err := strconv.Atoi(string(coreStr))
if err != nil {
return "", 0, 0, err
}
if newCore > core {
core = newCore
}
} else {
continue
}
if cpuStr, ok := elem.Metric["cpu"]; ok {
newCpu, err := strconv.Atoi(string(cpuStr))
if err != nil {
return "", 0, 0, err
}
if newCpu > thread {
thread = newCpu
}
} else {
continue
}
// res = append(res, Cpu{
// Seq: int64(k),
// Model: model_name,
// })
}
return model_name, core, thread, nil
}
return "", 0, 0, errors.Errorf("cpu can not find the query %s value", queryParam)
}
func (c ProApi) CpuTotalUtil() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryParam := `(1 - sum(rate(node_cpu_seconds_total{mode="idle"}[1m])) by (instance) / sum(rate(node_cpu_seconds_total[1m])) by (instance) ) * 100`
result, warnings, err := c.API.Query(ctx, queryParam, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("can not find the query %s value", queryParam)
}
func (c *ProApi) CpuUtil() ([]DeviceInfo, error) { func (c *ProApi) CpuUtil() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
......
...@@ -61,7 +61,7 @@ x-tagGroups: ...@@ -61,7 +61,7 @@ x-tagGroups:
- store_model - store_model
paths: paths:
/hd: /hw:
get: get:
summary: get host hardware info and usage summary: get host hardware info and usage
tags: tags:
...@@ -125,9 +125,6 @@ components: ...@@ -125,9 +125,6 @@ components:
type: integer type: integer
format: int64 format: int64
mem_free: mem_free:
type: integer
format: int64
mem_used:
type: integer type: integer
format: int64 format: int64
mem_util: mem_util:
...@@ -145,13 +142,15 @@ components: ...@@ -145,13 +142,15 @@ components:
network: network:
type: object type: object
properties: properties:
device:
type: string
speed: speed:
type: integer type: integer
format: int64 format: int64
send: send_rate:
type: integer type: integer
format: int64 format: int64
receive: recv_rate:
type: integer type: integer
format: int64 format: int64
filesystem: filesystem:
...@@ -178,17 +177,24 @@ components: ...@@ -178,17 +177,24 @@ components:
model: model:
type: string type: string
mem_util: mem_util:
type: integer
format: int64
gpu_util:
type: integer type: integer
format: int64 format: int64
mem_total: mem_total:
type: integer
format: int64
mem_free:
type: integer type: integer
format: int64 format: int64
power: power:
type: integer type: integer
format: int64 format: int64
gpu_tmp: gpu_temp:
type: integer type: integer
format: int64 format: int64
cpu: cpu:
type: object type: object
properties: properties:
...@@ -201,6 +207,6 @@ components: ...@@ -201,6 +207,6 @@ components:
thread: thread:
type: integer type: integer
format: int64 format: int64
physical: core:
type: integer type: integer
format: int64 format: int64
...@@ -3,15 +3,450 @@ package main ...@@ -3,15 +3,450 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"strconv"
"time" "time"
v1 "github.com/prometheus/client_golang/api/prometheus/v1" v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
type Gpu struct {
Seq int `json:"seq"`
Model string `json:"model"`
MemUtil int64 `json:"mem_util"`
GpuUtil int64 `json:"gpu_util"`
MemTotal int64 `json:"mem_total"`
MemFree int64 `json:"mem_free"`
Power int64 `json:"power"`
GpuTemp int64 `json:"gpu_temp"`
}
// Gpu
func (c *ProApi) Gpus() ([]Gpu, error) {
inGpus := make(map[string]Gpu)
gpuUtil, err := c.GpuUtil(inGpus)
if err != nil {
return nil, err
}
gpuMemUtil, err := c.GpuMemUtil(gpuUtil)
if err != nil {
return nil, err
}
powerUsage, err := c.GpuPowerUsage(gpuMemUtil)
if err != nil {
return nil, err
}
gpuTemp, err := c.GpuTemp(powerUsage)
if err != nil {
return nil, err
}
gpuMemTotal, err := c.GpuMemTotal(gpuTemp)
if err != nil {
return nil, err
}
gpuMemFree, err := c.GpuMemFree(gpuMemTotal)
if err != nil {
return nil, err
}
res := make([]Gpu, 0, 8)
for _, gpu := range gpuMemFree {
res = append(res, gpu)
}
// 定义排序函数
sortByIdx := func(i, j int) bool {
return res[i].Seq < res[j].Seq
}
// 使用sort.Slice进行排序
sort.Slice(res, sortByIdx)
return res, nil
}
func (c *ProApi) GpuUtil(inGpus map[string]Gpu) (map[string]Gpu, error) {
// outGpus := make(map[string]Gpu, 8)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_GPU_UTIL", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.GpuUtil = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuMemUtil(inGpus map[string]Gpu) (map[string]Gpu, error) {
// outGpus := make(map[string]Gpu, 8)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_MEM_COPY_UTIL", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.MemUtil = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuPowerUsage(inGpus map[string]Gpu) (map[string]Gpu, error) {
// outGpus := make(map[string]Gpu, 8)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_POWER_USAGE", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.Power = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuMemFree(inGpus map[string]Gpu) (map[string]Gpu, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_FB_FREE", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.MemFree = (int64(elem.Value) * 1024 * 1024)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuMemTotal(inGpus map[string]Gpu) (map[string]Gpu, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_FB_USED+DCGM_FI_DEV_FB_FREE", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.MemTotal = (int64(elem.Value) * 1024 * 1024)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
func (c *ProApi) GpuTemp(inGpus map[string]Gpu) (map[string]Gpu, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gpuResult, gpuWarnings, err := c.API.Query(ctx, "DCGM_FI_DEV_GPU_TEMP", time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return nil, err
}
if len(gpuWarnings) > 0 {
fmt.Printf("Warnings: %v\n", gpuWarnings)
}
switch {
case gpuResult.Type() == model.ValVector:
vectorVal := gpuResult.(model.Vector)
for _, elem := range vectorVal {
uuidStr := ""
if uuid, ok := elem.Metric["UUID"]; ok {
uuidStr = string(uuid)
} else {
continue
}
gpu := Gpu{}
if oldV, ok := inGpus[uuidStr]; ok {
gpu = oldV
} else {
if idxAsStr, ok := elem.Metric["gpu"]; ok {
seq, err := strconv.Atoi(string(idxAsStr))
if err != nil {
return nil, err
}
gpu.Seq = seq
} else {
continue
}
if modelName, ok := elem.Metric["modelName"]; ok {
gpu.Model = string(modelName)
} else {
continue
}
}
gpu.GpuTemp = int64(elem.Value)
inGpus[uuidStr] = gpu
}
}
return inGpus, nil
}
//DCGM_FI_DEV_POWER_USAGE //DCGM_FI_DEV_POWER_USAGE
func (c *ProApi) GpuPowerUsage() ([]DeviceInfo, error) { func (c *ProApi) GpuPowerUsageOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
...@@ -113,7 +548,7 @@ func (c *ProApi) GpuMemTemp() ([]DeviceInfo, error) { ...@@ -113,7 +548,7 @@ func (c *ProApi) GpuMemTemp() ([]DeviceInfo, error) {
} }
func (c *ProApi) GpuTemp() ([]DeviceInfo, error) { func (c *ProApi) GpuTempOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
...@@ -164,7 +599,7 @@ func (c *ProApi) GpuTemp() ([]DeviceInfo, error) { ...@@ -164,7 +599,7 @@ func (c *ProApi) GpuTemp() ([]DeviceInfo, error) {
} }
func (c *ProApi) GpuUtil() ([]DeviceInfo, error) { func (c *ProApi) GpuUtilOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
...@@ -216,7 +651,7 @@ func (c *ProApi) GpuUtil() ([]DeviceInfo, error) { ...@@ -216,7 +651,7 @@ func (c *ProApi) GpuUtil() ([]DeviceInfo, error) {
} }
// DCGM_FI_DEV_MEM_COPY_UTIL // DCGM_FI_DEV_MEM_COPY_UTIL
func (c *ProApi) GpuMemUtil() ([]DeviceInfo, error) { func (c *ProApi) GpuMemUtilOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
......
package main package main
import "testing" import (
"encoding/json"
"testing"
)
func TestGpuUtil(t *testing.T) { //GpuInfo()
func TestGpuInfo(t *testing.T) {
cli, err := NewProCli("http://192.168.1.21:9090") cli, err := NewProCli("http://192.168.1.21:9090")
...@@ -10,7 +15,7 @@ func TestGpuUtil(t *testing.T) { ...@@ -10,7 +15,7 @@ func TestGpuUtil(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gpus, err := cli.GpuUtil() gpus, err := cli.GpuInfo()
for k, v := range gpus { for k, v := range gpus {
...@@ -19,7 +24,9 @@ func TestGpuUtil(t *testing.T) { ...@@ -19,7 +24,9 @@ func TestGpuUtil(t *testing.T) {
} }
func TestGpuMemUtil(t *testing.T) { //GpuInit()
func TestGpuInit(t *testing.T) {
cli, err := NewProCli("http://192.168.1.21:9090") cli, err := NewProCli("http://192.168.1.21:9090")
...@@ -27,18 +34,21 @@ func TestGpuMemUtil(t *testing.T) { ...@@ -27,18 +34,21 @@ func TestGpuMemUtil(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gpus, err := cli.GpuMemUtil() inGpus := make(map[string]Gpu)
for k, v := range gpus { gpus, err := cli.GpuUtil(inGpus)
if err != nil {
t.Fatal(err)
}
for k, v := range gpus {
t.Log("k", k, "v", v) t.Log("k", k, "v", v)
} }
} }
//GpuInfo() func TestGpus(t *testing.T) {
func TestGpuInfo(t *testing.T) {
cli, err := NewProCli("http://192.168.1.21:9090") cli, err := NewProCli("http://192.168.1.21:9090")
...@@ -46,11 +56,24 @@ func TestGpuInfo(t *testing.T) { ...@@ -46,11 +56,24 @@ func TestGpuInfo(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
gpus, err := cli.GpuInfo() // inGpus := make(map[string]Gpu)
for k, v := range gpus { gpus, err := cli.Gpus()
if err != nil {
t.Fatal(err)
}
for k, v := range gpus {
t.Log("k", k, "v", v) t.Log("k", k, "v", v)
} }
gpusAsJson, err := json.Marshal(gpus)
if err != nil {
t.Fatal(err)
}
t.Log(string(gpusAsJson))
} }
...@@ -41,345 +41,263 @@ func main() { ...@@ -41,345 +41,263 @@ func main() {
})) }))
app.Get("/hw", func(c *fiber.Ctx) error { app.Get("/hw", func(c *fiber.Ctx) error {
return nil res := NewResponse{}
})
app.Get("/hw/usage", func(c *fiber.Ctx) error {
res := make([]DeviceInfo, 0, 10) gpus, err := cli.Gpus()
gpuUtils, err := cli.GpuUtil()
if err != nil { if err != nil {
return c.JSON(Response{ fmt.Println("gpu error", err.Error())
Success: false, res.Msg = err.Error()
Error: err.Error(), res.Code = 2
})
} }
res = append(res, gpuUtils...) res.Data.Gpus = gpus
gpuMemUtils, err := cli.GpuMemUtil()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, gpuMemUtils...) cpus, err := cli.Cpus()
diskUtil, err := cli.DiskUtil()
if err != nil { if err != nil {
return c.JSON(Response{ fmt.Println("cpu error", err.Error())
Success: false, res.Msg = err.Error()
Error: err.Error(), res.Code = 2
})
} }
res = append(res, diskUtil...) res.Data.Cpus = cpus
networkSpeed, err := cli.NetworkSpeed()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, networkSpeed...) networks, err := cli.Networks()
cpuUtils, err := cli.CpuUtil()
if err != nil { if err != nil {
return c.JSON(Response{ fmt.Println("network error", err.Error())
Success: false, res.Msg = err.Error()
Error: err.Error(), res.Code = 2
})
} }
res = append(res, cpuUtils...) res.Data.Networks = networks
//MemUtil() filesystems, err := cli.Disks()
memUtils, err := cli.MemUtil()
if err != nil { if err != nil {
return c.JSON(Response{ fmt.Println("disks error", err.Error())
Success: false, res.Msg = err.Error()
Error: err.Error(), res.Code = 2
})
} }
res = append(res, memUtils...) res.Data.Disk = filesystems
netRece, err := cli.NetworkReceive() mem, err := cli.Mem()
if err != nil { if err != nil {
return c.JSON(Response{ fmt.Println("mem error", err.Error())
Success: false, res.Msg = err.Error()
Error: err.Error(), res.Code = 2
})
} }
res = append(res, netRece...) res.Data.Mem = mem
netSend, err := cli.NetworkTransmit()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, netSend...)
diskFreeSize, err := cli.DiskFreeSize()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, diskFreeSize...) res.Code = 1
return c.JSON(&res)
power, err := cli.GpuPowerUsage()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, power...)
memTemp, err := cli.GpuMemTemp()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, memTemp...)
gpuTemp, err := cli.GpuTemp()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, gpuTemp...)
return c.JSON(Response{
Success: true,
Devices: res,
})
}) })
app.Get("/hw/info", func(c *fiber.Ctx) error { // app.Get("/hw/usage", func(c *fiber.Ctx) error {
res := make([]DeviceInfo, 0, 10) // res := make([]DeviceInfo, 0, 10)
diskTotalSize, err := cli.DiskTotalSize() // // gpuUtils, err := cli.GpuUtil()
if err != nil { // // if err != nil {
return c.JSON(Response{ // // return c.JSON(Response{
Success: false, // // Success: false,
Error: err.Error(), // // Error: err.Error(),
}) // // })
} // // }
res = append(res, diskTotalSize...)
memSize, err := cli.MemInfo()
if err != nil {
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, memSize...)
gpuInfo, err := cli.GpuInfo()
if err != nil { // // res = append(res, gpuUtils...)
return c.JSON(Response{
Success: false,
Error: err.Error(),
})
}
res = append(res, gpuInfo...)
return c.JSON(Response{
Success: true,
Devices: res,
})
})
// Start the server on port 3000
log.Fatal(app.Listen(":5000"))
}
type Response struct { // // gpuMemUtils, err := cli.GpuMemUtil()
Devices []DeviceInfo `json:"devices"` // // if err != nil {
Success bool `json:"success"` // // return c.JSON(Response{
Error string `json:"error` // // Success: false,
} // // Error: err.Error(),
// // })
// // }
// func main123() { // // res = append(res, gpuMemUtils...)
// // Initialize a new Fiber app
// app := fiber.New()
// //showMemory() // diskUtil, err := cli.DiskUtil()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// // Define a route for the GET method on the root path '/' // res = append(res, diskUtil...)
// app.Get("/hw/info", func(c fiber.Ctx) error {
// res := make([]DeviceInfo, 0) // networkSpeed, err := cli.NetworkSpeed()
// block, err := getBlock() // if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil { // res = append(res, networkSpeed...)
// c.SendString("getBlock err: " + err.Error())
// }
// res = append(res, block...)
// mem, err := getMemory() // cpuUtils, err := cli.CpuUtil()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil { // res = append(res, cpuUtils...)
// c.SendString("getMemory err: " + err.Error())
// }
// res = append(res, mem...) // //MemUtil()
// cpu, err := getCPU() // memUtils, err := cli.MemUtil()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil { // res = append(res, memUtils...)
// c.SendString("getMemory err: " + err.Error())
// }
// res = append(res, cpu...) // netRece, err := cli.NetworkReceive()
// gpu, err := getGPU() // if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// if err != nil { // res = append(res, netRece...)
// c.SendString("getGPU err: " + err.Error())
// }
// res = append(res, gpu...) // netSend, err := cli.NetworkTransmit()
// return c.JSON(res) // if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// // Send a string response to the client // res = append(res, netSend...)
// //return c.SendString("Hello, World 👋!")
// })
// app.Get("/hw/usage", func(c fiber.Ctx) error { // diskFreeSize, err := cli.DiskFreeSize()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// res := make([]DeviceInfo, 0) // res = append(res, diskFreeSize...)
// gpu, err := getGpuUsage() // // power, err := cli.GpuPowerUsage()
// if err != nil { // // if err != nil {
// c.SendString("getGpuUsage err: " + err.Error()) // // return c.JSON(Response{
// } // // Success: false,
// // Error: err.Error(),
// // })
// // }
// res = append(res, gpu...) // // res = append(res, power...)
// cpu, err := getCPUUsage() // memTemp, err := cli.GpuMemTemp()
// if err != nil { // if err != nil {
// c.SendString("getCPUUsage err: " + err.Error()) // return c.JSON(Response{
// } // Success: false,
// Error: err.Error(),
// })
// }
// fmt.Println("cpu", cpu) // res = append(res, memTemp...)
// res = append(res, cpu...) // // gpuTemp, err := cli.GpuTemp()
// mem, err := getMemoryUsage() // // if err != nil {
// // return c.JSON(Response{
// // Success: false,
// // Error: err.Error(),
// // })
// // }
// if err != nil { // // res = append(res, gpuTemp...)
// c.SendString("getMemoryUsage err: " + err.Error())
// }
// res = append(res, mem...) // return c.JSON(Response{
// Success: true,
// Devices: res,
// })
// })
// block, err := getBlockUsage() // app.Get("/hw/info", func(c *fiber.Ctx) error {
// if err != nil {
// c.SendString("getBlockUsage err: " + err.Error())
// }
// res = append(res, block...) // res := make([]DeviceInfo, 0, 10)
// return c.JSON(res) // diskTotalSize, err := cli.DiskTotalSize()
// if err != nil {
// return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// //res := make([]DeviceInfo, 0) // res = append(res, diskTotalSize...)
// //return c.JSON(res)
// // Send a string response to the client // memSize, err := cli.MemInfo()
// //return c.SendString("Hello, World 👋!")
// })
// // Start the server on port 3000 // if err != nil {
// app.Listen(":3000") // return c.JSON(Response{
// } // Success: false,
// Error: err.Error(),
// })
// }
// // // res = append(res, memSize...)
// // Use and distribution licensed under the Apache license version 2.
// //
// // See the COPYING file in the root project directory for full text.
// //
// // package main // gpuInfo, err := cli.GpuInfo()
// // import ( // if err != nil {
// // "fmt" // return c.JSON(Response{
// Success: false,
// Error: err.Error(),
// })
// }
// // "github.com/jaypipes/ghw" // res = append(res, gpuInfo...)
// // "github.com/pkg/errors"
// // "github.com/spf13/cobra"
// // )
// // // blockCmd represents the install command // return c.JSON(Response{
// // var blockCmd = &cobra.Command{ // Success: true,
// // Use: "block", // Devices: res,
// // Short: "Show block storage information for the host system", // })
// // RunE: showBlock, // })
// // }
// // // showBlock show block storage information for the host system. // Start the server on port 3000
// // func showBlock(cmd *cobra.Command, args []string) error { log.Fatal(app.Listen(":5000"))
// // block, err := ghw.Block() }
// // if err != nil {
// // return errors.Wrap(err, "error getting block device info")
// // }
// // switch outputFormat { type Response struct {
// // case outputFormatHuman: Devices []DeviceInfo `json:"devices"`
// // fmt.Printf("%v\n", block) Success bool `json:"success"`
Error string `json:"error`
}
// // for _, disk := range block.Disks { type NewResponse struct {
// // fmt.Printf(" %v\n", disk) Data Data `json:"data"`
// // for _, part := range disk.Partitions { Code int `json:"code"`
// // fmt.Printf(" %v\n", part) Msg string `json:"msg"`
// // } }
// // }
// // case outputFormatJSON:
// // fmt.Printf("%s\n", block.JSONString(pretty))
// // case outputFormatYAML:
// // fmt.Printf("%s", block.YAMLString())
// // }
// // return nil
// // }
// // func init() { type Data struct {
// // rootCmd.AddCommand(blockCmd) Gpus []Gpu `json:"gpus"`
// // } Cpus Cpus `json:"cpus"`
Disk []FileSystem `json:"disk"`
Networks []Network `json:"networks"`
Mem Mem `json:"mem"`
}
...@@ -13,7 +13,185 @@ import ( ...@@ -13,7 +13,185 @@ import (
"github.com/shopspring/decimal" "github.com/shopspring/decimal"
) )
func (c *ProApi) MemUtil() ([]DeviceInfo, error) { type Mem struct {
Total int64 `json:"mem_total"`
Free int64 `json:"mem_free"`
Util int64 `json:"mem_util"`
}
func (c *ProApi) Mem() (Mem, error) {
total, err := c.MemTotal()
if err != nil {
return Mem{}, err
}
free, err := c.MemFree()
if err != nil {
return Mem{}, err
}
util, err := c.MemUtil()
if err != nil {
return Mem{}, err
}
return Mem{
Total: total,
Free: free,
Util: util,
}, nil
}
func (c *ProApi) MemUtil() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
//queryStr := `(1- (node_memory_Buffers_bytes + node_memory_Cached_bytes + node_memory_MemFree_bytes) / node_memory_MemTotal_bytes) * 100`
queryStr := `(1- (node_memory_MemFree_bytes) / node_memory_MemTotal_bytes) * 100`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemUtil can not find the query %s value", queryStr)
}
func (c *ProApi) MemTotal() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_memory_MemTotal_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemTotal can not find the query %s value", queryStr)
}
func (c *ProApi) MemFree() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_memory_MemFree_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
// for k, v := range elem.Metric {
// fmt.Println("k", k, "v", v)
// }
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemFree can not find the query %s value", queryStr)
}
func (c *ProApi) MemUsed() (int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStr := `node_memory_MemFree_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil {
return 0, err
}
if len(warnings) > 0 {
fmt.Printf("Warnings: %v\n", warnings)
}
switch {
case result.Type() == model.ValVector:
vectorVal := result.(model.Vector)
for _, elem := range vectorVal {
for k, v := range elem.Metric {
fmt.Println("k", k, "v", v)
}
return int64(elem.Value), nil
}
}
return 0, errors.Errorf("MemFree can not find the query %s value", queryStr)
}
func (c *ProApi) MemUtilOld() ([]DeviceInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
......
...@@ -3,20 +3,79 @@ package main ...@@ -3,20 +3,79 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"time" "time"
v1 "github.com/prometheus/client_golang/api/prometheus/v1" v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
type Network struct {
Device string `json:"device"`
Speed int64 `json:"speed"`
Send int64 `json:"send_rate"`
Recevice int64 `json:"recv_rate"`
}
func (c *ProApi) Networks() ([]Network, error) {
speed, err := c.NetworkSpeed()
if err != nil {
return nil, err
}
send, err := c.NetworkTransmit()
if err != nil {
return nil, err
}
receive, err := c.NetworkReceive()
if err != nil {
return nil, err
}
res := make([]Network, 0, len(speed))
for k, v := range speed {
n := Network{
Device: k,
Speed: v,
}
if s, ok := send[k]; ok {
n.Send = s
}
if r, ok := receive[k]; ok {
n.Recevice = r
}
res = append(res, n)
}
// 定义排序函数
sortByDevice := func(i, j int) bool {
return res[i].Device < res[j].Device
}
// sort.Strings(strings)
// 使用sort.Slice进行排序
sort.Slice(res, sortByDevice)
return res, nil
}
//node_network_speed_bytes/1024/1024/1024 //node_network_speed_bytes/1024/1024/1024
func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) { func (c *ProApi) NetworkSpeed() (map[string]int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
queryStr := `rate(node_network_speed_bytes[10s])` queryStr := `node_network_speed_bytes`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second)) result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil { if err != nil {
...@@ -27,11 +86,9 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) { ...@@ -27,11 +86,9 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
} }
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String()) // fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make([]DeviceInfo, 0, 8) res := make(map[string]int64, 8)
switch { switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector: case result.Type() == model.ValVector:
vectorVal := result.(model.Vector) vectorVal := result.(model.Vector)
...@@ -42,21 +99,15 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) { ...@@ -42,21 +99,15 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
// fmt.Println("k", k, "v", v) // fmt.Println("k", k, "v", v)
// } // }
r := DeviceInfo{} deviceNameStr := ""
if modelName, ok := elem.Metric["device"]; ok {
r.Model = string(modelName) if deviceName, ok := elem.Metric["device"]; ok {
deviceNameStr = string(deviceName)
} else { } else {
continue continue
} }
//if name, ok := elem.Metric["__name__"]; ok { res[deviceNameStr] = int64(elem.Value)
r.Param = queryStr
//}
r.Power = uint64(elem.Value)
r.Type = "NetworkSpeed"
res = append(res, r)
} }
} }
...@@ -64,13 +115,14 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) { ...@@ -64,13 +115,14 @@ func (c *ProApi) NetworkSpeed() ([]DeviceInfo, error) {
} }
func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) { func (c *ProApi) NetworkReceive() (map[string]int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
queryStr := `sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m])) ` //queryStr := `sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m])) `
queryStr := `rate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[10s])`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second)) result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -80,36 +132,23 @@ func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) { ...@@ -80,36 +132,23 @@ func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) {
} }
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String()) // fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make([]DeviceInfo, 0, 8) res := make(map[string]int64, 8)
switch { switch {
case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar)
// handle scalar stuff
case result.Type() == model.ValVector: case result.Type() == model.ValVector:
vectorVal := result.(model.Vector) vectorVal := result.(model.Vector)
for _, elem := range vectorVal { for _, elem := range vectorVal {
// for k, v := range elem.Metric { deviceNameStr := ""
// fmt.Println("k", k, "v", v)
// }
r := DeviceInfo{}
// if modelName, ok := elem.Metric["modelName"]; ok {
// r.Model = string(modelName)
// } else {
// continue
// }
//if name, ok := elem.Metric["__name__"]; ok {
r.Param = queryStr
//}
r.Power = uint64(elem.Value) if deviceName, ok := elem.Metric["device"]; ok {
r.Type = "NetworkReceive" deviceNameStr = string(deviceName)
} else {
continue
}
res = append(res, r) res[deviceNameStr] = int64(elem.Value)
} }
} }
...@@ -120,12 +159,14 @@ func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) { ...@@ -120,12 +159,14 @@ func (c *ProApi) NetworkReceive() ([]DeviceInfo, error) {
//sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m])) //sum by(instance) (irate(node_network_receive_bytes_total{device!~"bond.*?|lo"}[5m]))
//sum by(instance) (irate(node_network_transmit_bytes{device!~"bond.*?|lo"}[5m])) //sum by(instance) (irate(node_network_transmit_bytes{device!~"bond.*?|lo"}[5m]))
func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) { func (c *ProApi) NetworkTransmit() (map[string]int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
queryStr := `sum by(instance) (irate(node_network_transmit_bytes_total{device!~"bond.*?|lo"}[5m]))` //queryStr := `sum by(instance) (irate(node_network_transmit_bytes_total{device!~"bond.*?|lo"}[5m]))`
queryStr := `(rate(node_network_transmit_bytes_total{device!~"bond.*?|lo"}[10s]))`
result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second)) result, warnings, err := c.API.Query(ctx, queryStr, time.Now(), v1.WithTimeout(5*time.Second))
if err != nil { if err != nil {
...@@ -136,7 +177,7 @@ func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) { ...@@ -136,7 +177,7 @@ func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) {
} }
// fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String()) // fmt.Printf("Result:\n%v \nstring %v \n", result.Type(), result.String())
res := make([]DeviceInfo, 0, 8) res := make(map[string]int64, 8)
switch { switch {
case result.Type() == model.ValScalar: case result.Type() == model.ValScalar:
//scalarVal := result.(*model.Scalar) //scalarVal := result.(*model.Scalar)
...@@ -147,25 +188,15 @@ func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) { ...@@ -147,25 +188,15 @@ func (c *ProApi) NetworkTransmit() ([]DeviceInfo, error) {
for _, elem := range vectorVal { for _, elem := range vectorVal {
for k, v := range elem.Metric { deviceNameStr := ""
fmt.Println("k", k, "v", v)
}
r := DeviceInfo{}
// if modelName, ok := elem.Metric["modelName"]; ok {
// r.Model = string(modelName)
// } else {
// continue
// }
//if name, ok := elem.Metric["__name__"]; ok {
r.Param = queryStr
//}
r.Power = uint64(elem.Value) if deviceName, ok := elem.Metric["device"]; ok {
r.Type = "NetworkTransmit" deviceNameStr = string(deviceName)
} else {
continue
}
res = append(res, r) res[deviceNameStr] = int64(elem.Value)
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment