Commit 36d5ed1e authored by duanjinfei's avatar duanjinfei

merge master

parents 8d082b92 204d5b83
...@@ -94,7 +94,7 @@ func (m *ModelHandler) MonitorModelInfo() { ...@@ -94,7 +94,7 @@ func (m *ModelHandler) MonitorModelInfo() {
if err != nil { if err != nil {
log.WithError(err).Error("Error writing models.json") log.WithError(err).Error("Error writing models.json")
} }
ticker = time.NewTicker(time.Minute * 10) ticker.Reset(time.Minute * 10)
} }
} }
} }
......
...@@ -138,7 +138,7 @@ func (m *MonitorNm) monitorNodeManagerSeed() { ...@@ -138,7 +138,7 @@ func (m *MonitorNm) monitorNodeManagerSeed() {
nodeManagerArr = append(nodeManagerArr, &NodeManager{Info: node, IsUsed: false, IsExist: true}) nodeManagerArr = append(nodeManagerArr, &NodeManager{Info: node, IsUsed: false, IsExist: true})
} }
m.IsInit = true m.IsInit = true
ticker = time.NewTicker(time.Minute * 10) ticker.Reset(time.Minute * 10)
} }
} }
} }
...@@ -195,7 +195,7 @@ func (n *NodeManagerHandler) ReportResourceMap(dockerOp *operate.DockerOp) { ...@@ -195,7 +195,7 @@ func (n *NodeManagerHandler) ReportResourceMap(dockerOp *operate.DockerOp) {
} }
params := utils.BuildParams(dockerOp.ReportModelIds, bootUpModelIds) params := utils.BuildParams(dockerOp.ReportModelIds, bootUpModelIds)
n.msgRespWorker.RegisterMsgResp(n.nodeManager, n.worker, SubmitResourceMapRes, params) n.msgRespWorker.RegisterMsgResp(n.nodeManager, n.worker, SubmitResourceMapRes, params)
ticker = time.NewTicker(time.Minute * 10) ticker.Reset(time.Minute * 10)
} }
} }
} }
......
...@@ -45,7 +45,8 @@ type TaskOp struct { ...@@ -45,7 +45,8 @@ type TaskOp struct {
taskParam *models.TaskParam taskParam *models.TaskParam
httpClient *http.Client httpClient *http.Client
request *http.Request request *http.Request
ticker *time.Ticker waitRunningTicker *time.Ticker
waitReqTicker *time.Ticker
startBeforeTaskTime time.Time startBeforeTaskTime time.Time
} }
...@@ -141,7 +142,8 @@ func (t *TaskWorker) ComputeTaskHandler(taskMsg *nodeManagerV1.PushTaskMessage) ...@@ -141,7 +142,8 @@ func (t *TaskWorker) ComputeTaskHandler(taskMsg *nodeManagerV1.PushTaskMessage)
taskParam: &models.TaskParam{}, taskParam: &models.TaskParam{},
httpClient: &http.Client{}, httpClient: &http.Client{},
request: &http.Request{}, request: &http.Request{},
ticker: time.NewTicker(time.Second * models.DefaultTaskTimer), waitRunningTicker: time.NewTicker(time.Millisecond),
waitReqTicker: time.NewTicker(time.Millisecond),
startBeforeTaskTime: time.Now(), startBeforeTaskTime: time.Now(),
} }
t.LruCache.Add(taskMsg.TaskId, taskOp.taskExecResult) t.LruCache.Add(taskMsg.TaskId, taskOp.taskExecResult)
...@@ -469,7 +471,8 @@ func (op *TaskOp) waitContainerRunning(handler *TaskWorker, imageName string, co ...@@ -469,7 +471,8 @@ func (op *TaskOp) waitContainerRunning(handler *TaskWorker, imageName string, co
log.WithField("maxExecTime", maxExecTime).Info("Waiting for container running", imageName) log.WithField("maxExecTime", maxExecTime).Info("Waiting for container running", imageName)
for { for {
select { select {
case <-op.ticker.C: case <-op.waitRunningTicker.C:
op.waitRunningTicker.Reset(time.Second * models.DefaultTaskTimer)
if int64(time.Since(op.startBeforeTaskTime).Seconds()) > maxExecTime-50 { if int64(time.Since(op.startBeforeTaskTime).Seconds()) > maxExecTime-50 {
log.Errorf("%s", "The maximum execution time for this task has been exceeded") log.Errorf("%s", "The maximum execution time for this task has been exceeded")
return fmt.Errorf("%s", "The maximum execution time for this task has been exceeded") return fmt.Errorf("%s", "The maximum execution time for this task has been exceeded")
...@@ -528,7 +531,8 @@ func (op *TaskOp) waitReqContainerOk(dockerOp *operate.DockerOp) error { ...@@ -528,7 +531,8 @@ func (op *TaskOp) waitReqContainerOk(dockerOp *operate.DockerOp) error {
var err error var err error
for { for {
select { select {
case <-op.ticker.C: case <-op.waitReqTicker.C:
op.waitReqTicker.Reset(time.Second * models.DefaultTaskTimer)
if int64(time.Since(op.startBeforeTaskTime).Seconds()) > maxExecTime-50 { if int64(time.Since(op.startBeforeTaskTime).Seconds()) > maxExecTime-50 {
log.Errorf("%s", "The maximum execution time for this task has been exceeded") log.Errorf("%s", "The maximum execution time for this task has been exceeded")
return fmt.Errorf("%s", "The maximum execution time for this task has been exceeded") return fmt.Errorf("%s", "The maximum execution time for this task has been exceeded")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment