#3898 fix-3834

Merged
chenyifan01 merged 28 commits from fix-3834 into V20230322 1 year ago
  1. +14
    -0
      models/cloudbrain.go
  2. +20
    -12
      models/schedule_record.go
  3. +31
    -0
      modules/grampus/resty.go
  4. +66
    -58
      modules/urfs_client/urchin/schedule.go
  5. +1
    -0
      routers/api/v1/api.go
  6. +13
    -3
      routers/api/v1/repo/modelarts.go
  7. +21
    -0
      routers/repo/grampus.go
  8. +45
    -18
      services/cloudbrain/cloudbrainTask/schedule.go
  9. +0
    -30
      services/cloudbrain/cloudbrainTask/sync_status.go
  10. +15
    -0
      templates/repo/grampus/trainjob/show.tmpl
  11. +49
    -7
      web_src/js/features/cloudbrainShow.js
  12. +3
    -1
      web_src/js/features/i18nVue.js

+ 14
- 0
models/cloudbrain.go View File

@@ -31,6 +31,7 @@ const (

TypeCloudBrainAll = -1
AccCardsNumAll = -1
JobNoTeminal = -1
)

const (
@@ -1610,6 +1611,13 @@ type SpecInfo struct {
MemorySize string `json:"memorySize"`
}

type GrampusJobEvents struct {
Message string `json:"message"`
Name string `json:"name"`
Reason string `json:"reason"`
Timestamp string `json:"timestamp"`
}

type GetGrampusResourceSpecsResult struct {
GrampusResult
Infos []GrampusSpec `json:"resourceSpecs"`
@@ -1664,6 +1672,12 @@ type GrampusStopJobResponse struct {
Status string `json:"status"`
}

type GetGrampusJobEventsResponse struct {
GrampusResult
JobEvents []GrampusJobEvents `json:"jobEvents"`
TotalSize int `json:"totalSize"`
}

type GrampusTasks struct {
Command string `json:"command"`
Name string `json:"name"`


+ 20
- 12
models/schedule_record.go View File

@@ -1,25 +1,33 @@
package models

import (
"fmt"
"time"

"code.gitea.io/gitea/modules/timeutil"
)

const (
StorageScheduleSucceed int = iota
StorageScheduleProcessing
StorageScheduleFailed
StorageNoFile
StorageScheduleWaiting
StorageUrchinScheduleSucceed int = iota
StorageUrchinScheduleProcessing
StorageUrchinScheduleFailed
StorageUrchinNoFile
StorageUrchinScheduleWaiting
)

const (
StorageLocalOperateSucceed int = iota
StorageLocalOperating
StorageLocalOperateFailed
StorageLocalOperateWaiting
MoveBucketSucceed int = iota
MoveBucketOperating
MoveBucketFailed
MoveBucketWaiting
)

type ModelScheduleStatus int

const (
ModelScheduleSucceed ModelScheduleStatus = iota
ModelScheduleOperating
ModelScheduleFailed
ModelScheduleWaiting
)

const UrchinDefaultBucket = "urchincache"
@@ -59,7 +67,7 @@ func UpdateScheduleLocalOperateStatus(record *ScheduleRecord, newLocalOperateSta
func GetSchedulingRecord() ([]*ScheduleRecord, error) {
records := make([]*ScheduleRecord, 0, 10)
return records, x.
Where("status = ?", StorageScheduleProcessing).
Where("status = ?", StorageUrchinScheduleProcessing).
Limit(100).
Find(&records)
}
@@ -79,7 +87,7 @@ func getScheduleRecordByPrID(e Engine, cloudbrainId int64) (*ScheduleRecord, err
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("get record by cloudbrain_id failed(%d)", cloudbrainId)
return nil, ErrRecordNotExist{}
}
return record, nil
}


+ 31
- 0
modules/grampus/resty.go View File

@@ -425,6 +425,37 @@ sendjob:
return &result, nil
}

func GetJobEvents(jobID string) (*models.GetGrampusJobEventsResponse, error) {
checkSetting()
client := getRestyClient()
var result models.GetGrampusJobEventsResponse

retry := 0

sendjob:
_, err := client.R().
SetAuthToken(TOKEN).
SetResult(&result).
Get(HOST + urlTrainJob + "/" + jobID + "/events")
if err != nil {
return nil, fmt.Errorf("resty GetJobEvents: %v", err)
}

if result.ErrorCode == errorIllegalToken && retry < 1 {
retry++
log.Info("retry get token")
_ = getToken()
goto sendjob
}

if result.ErrorCode != 0 {
log.Error("GetJobEvents failed(%d): %s", result.ErrorCode, result.ErrorMsg)
return nil, fmt.Errorf("GetJobEvents failed(%d): %s", result.ErrorCode, result.ErrorMsg)
}

return &result, nil
}

func RestartNotebookJob(jobID string) (*models.GrampusNotebookRestartResponse, error) {
checkSetting()
client := getRestyClient()


+ 66
- 58
modules/urfs_client/urchin/schedule.go View File

@@ -71,16 +71,17 @@ func GetGPUDataBack(cloudbrainID int64, jobName, centerId string) error {
log.Error("ScheduleDataToPeerByKey failed info is EndPoint:%s,Bucket:%s,ObjectKey:%s,ProxyServer:%s,TargetObjectKey:%s,error:%v",
endpoint, bucket, objectKey, destPeerHost, grampus.GetGPUModelObjectKey(jobName), err)
_, err = models.InsertScheduleRecord(&models.ScheduleRecord{
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: models.StorageScheduleFailed,
IsDir: true,
ComputeSource: models.GPUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
Remark: interceptErrorMessages(err),
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: models.StorageUrchinScheduleFailed,
IsDir: true,
ComputeSource: models.GPUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
Remark: interceptErrorMessages(err),
LocalOperateStatus: models.MoveBucketWaiting,
})
if err != nil {
log.Error("InsertScheduleRecord failed:%v", err)
@@ -90,15 +91,16 @@ func GetGPUDataBack(cloudbrainID int64, jobName, centerId string) error {
}

_, err = models.InsertScheduleRecord(&models.ScheduleRecord{
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: res.StatusCode,
IsDir: true,
ComputeSource: models.GPUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: res.StatusCode,
IsDir: true,
ComputeSource: models.GPUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
LocalOperateStatus: models.MoveBucketWaiting,
})
if err != nil {
log.Error("InsertScheduleRecord failed:%v", err)
@@ -148,15 +150,16 @@ func GetGCUDataBack(cloudbrainID int64, jobName, centerId string) error {
log.Error("ScheduleDataToPeerByKey failed info is EndPoint:%s,Bucket:%s,ObjectKey:%s,ProxyServer:%s,TargetObjectKey:%s,error:%v",
endpoint, bucket, objectKey, destPeerHost, grampus.GetGPUModelObjectKey(jobName), err)
_, err = models.InsertScheduleRecord(&models.ScheduleRecord{
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: models.StorageScheduleFailed,
IsDir: true,
ComputeSource: models.GCUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: models.StorageUrchinScheduleFailed,
IsDir: true,
ComputeSource: models.GCUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
LocalOperateStatus: models.MoveBucketWaiting,
})
if err != nil {
log.Error("InsertScheduleRecord failed:%v", err)
@@ -166,15 +169,16 @@ func GetGCUDataBack(cloudbrainID int64, jobName, centerId string) error {
}

_, err = models.InsertScheduleRecord(&models.ScheduleRecord{
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: res.StatusCode,
IsDir: true,
ComputeSource: models.GCUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: res.StatusCode,
IsDir: true,
ComputeSource: models.GCUResource,
TargetObjectKey: grampus.GetGPUModelObjectKey(jobName),
LocalOperateStatus: models.MoveBucketWaiting,
})
if err != nil {
log.Error("InsertScheduleRecord failed:%v", err)
@@ -224,15 +228,16 @@ func GetNPUDataBack(cloudbrainID int64, jobName, centerId string) error {
log.Error("ScheduleDataToPeerByKey failed after retrying, errorInfo is EndPoint:%s,Bucket:%s,ObjectKey:%s,ProxyServer:%s,error:%v",
endpoint, bucket, objectKey, destPeerHost, err)
_, err = models.InsertScheduleRecord(&models.ScheduleRecord{
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: models.StorageScheduleFailed,
IsDir: false,
ComputeSource: models.NPUResource,
Remark: interceptErrorMessages(err),
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: models.StorageUrchinScheduleFailed,
IsDir: false,
ComputeSource: models.NPUResource,
Remark: interceptErrorMessages(err),
LocalOperateStatus: models.MoveBucketWaiting,
})
if err != nil {
log.Error("InsertScheduleRecord failed:%v", err)
@@ -242,14 +247,15 @@ func GetNPUDataBack(cloudbrainID int64, jobName, centerId string) error {
}

_, err = models.InsertScheduleRecord(&models.ScheduleRecord{
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: res.StatusCode,
IsDir: false,
ComputeSource: models.NPUResource,
CloudbrainID: cloudbrainID,
EndPoint: endpoint,
Bucket: bucket,
ObjectKey: objectKey,
ProxyServer: destPeerHost,
Status: res.StatusCode,
IsDir: false,
ComputeSource: models.NPUResource,
LocalOperateStatus: models.MoveBucketWaiting,
})
if err != nil {
log.Error("InsertScheduleRecord failed:%v", err)
@@ -368,22 +374,24 @@ func HandleScheduleRecords() error {
func handleScheduleResult(record *models.ScheduleRecord, res *PeerResult) error {
var err error
switch res.StatusCode {
case models.StorageScheduleSucceed:
case models.StorageUrchinScheduleSucceed:
log.Info("ScheduleDataToPeerByKey(%s) succeed", record.ObjectKey)
models.UpdateScheduleLocalOperateStatus(record, models.StorageLocalOperating)
models.UpdateScheduleLocalOperateStatus(record, models.MoveBucketOperating)
if record.ComputeSource == models.GPUResource || record.ComputeSource == models.GCUResource {
err = MoveBucketInOpenIMinio(res.DataPath, record.TargetObjectKey, res.DataRoot, setting.Attachment.Minio.Bucket)
if err != nil {
models.UpdateScheduleLocalOperateStatus(record, models.MoveBucketFailed)
log.Error("GetBackModel MoveBucketInOpenIMinio err.%v", err)
return err
}
models.UpdateScheduleLocalOperateStatus(record, models.MoveBucketSucceed)
} else {
decompress(record.Bucket+"/"+record.ObjectKey, setting.Bucket+"/"+strings.TrimSuffix(record.ObjectKey, models.ModelSuffix))
}

case models.StorageScheduleProcessing:
case models.StorageUrchinScheduleProcessing:
log.Info("ScheduleDataToPeerByKey(%s) processing", record.ObjectKey)
case models.StorageScheduleFailed:
case models.StorageUrchinScheduleFailed:
log.Error("ScheduleDataToPeerByKey(%s) failed:%s", record.ObjectKey, res.StatusMsg)

default:


+ 1
- 0
routers/api/v1/api.go View File

@@ -1131,6 +1131,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/metrics", repo_ext.GrampusMetrics)
m.Get("/download_multi_model", cloudbrain.AdminOrJobCreaterRightForTrain, repo.MultiModelDownload)
m.Get("/download_log", cloudbrain.AdminOrJobCreaterRightForTrain, repo_ext.GrampusDownloadLog)
m.Get("/job_event", repo_ext.GrampusJobEvents)
})
})
}, reqRepoReader(models.UnitTypeCloudBrain))


+ 13
- 3
routers/api/v1/repo/modelarts.go View File

@@ -6,7 +6,6 @@
package repo

import (
"code.gitea.io/gitea/routers/response"
"encoding/json"
"net/http"
"path"
@@ -15,6 +14,8 @@ import (
"strings"
"time"

"code.gitea.io/gitea/routers/response"

"code.gitea.io/gitea/modules/cloudbrain"

"code.gitea.io/gitea/services/cloudbrain/cloudbrainTask"
@@ -465,7 +466,7 @@ func ModelList(ctx *context.APIContext) {
return
}

status := models.StorageScheduleSucceed
status := models.ModelScheduleSucceed
var fileInfos []storage.FileInfo
if task.ComputeResource == models.NPUResource {
prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, setting.OutPutPath, versionName), "/")
@@ -504,7 +505,16 @@ func ModelList(ctx *context.APIContext) {
}

if task.Type == models.TypeC2Net {
status = cloudbrainTask.GetModelListFileStatus(fileInfos, task)
if !task.IsTerminal() {
log.Info("GetModelScheduleStatus job is not terminal.jobId=%s", jobID)
status = models.JobNoTeminal
} else {
status, err = cloudbrainTask.GetModelScheduleStatus(task.JobID)
if err != nil {
log.Error("GetModelScheduleStatus(%s) failed:%v", task.JobName, err.Error())
return
}
}
}

ctx.JSON(http.StatusOK, map[string]interface{}{


+ 21
- 0
routers/repo/grampus.go View File

@@ -1691,6 +1691,27 @@ func GrampusMetrics(ctx *context.Context) {
return
}

func GrampusJobEvents(ctx *context.Context) {
jobID := ctx.Params(":jobid")
job, err := models.GetCloudbrainByJobID(jobID)
if err != nil {
log.Error("GetCloudbrainByJobID failed: %v", err, ctx.Data["MsgID"])
ctx.ServerError(err.Error(), err)
return
}

result, err := grampus.GetJobEvents(job.JobID)
if err != nil {
log.Error("GetJobEvents failed: %v", err, ctx.Data["MsgID"])
}
ctx.JSON(http.StatusOK, map[string]interface{}{
"JobID": jobID,
"JobEvents": result.JobEvents,
})

return
}

func generateCommand(repoName, processorType, bootFile, paramSrc, outputRemotePath, datasetName, pretrainModelFileName, modelRemoteObsUrl string) (string, error) {
var command string



+ 45
- 18
services/cloudbrain/cloudbrainTask/schedule.go View File

@@ -10,7 +10,7 @@ import (
"strings"
)

func GetModelScheduleStatus(jobId string) (int, error) {
func GetModelScheduleStatus(jobId string) (models.ModelScheduleStatus, error) {
job, err := models.GetCloudbrainByJobID(jobId)
if err != nil {
log.Error("GetModelScheduleStatus GetCloudbrainByJobID err.jobId=%s err=%v", jobId, err)
@@ -18,33 +18,60 @@ func GetModelScheduleStatus(jobId string) (int, error) {
}
if !job.IsTerminal() {
log.Info("GetModelScheduleStatus job is not terminal.jobId=%s", jobId)
return models.StorageLocalOperateWaiting, nil
return models.ModelScheduleWaiting, nil
}

record, err := models.GetScheduleRecordByCloudbrainID(job.ID)
if err != nil {
log.Error("GetModelScheduleStatus GetScheduleRecordByCloudbrainID err.jobId=%s err=%v", jobId, err)
return models.StorageScheduleSucceed, nil
if models.IsErrRecordNotExist(err) {
return models.ModelScheduleSucceed, nil
}
return models.ModelScheduleFailed, err
}

switch record.Status {
case models.StorageScheduleProcessing, models.StorageScheduleWaiting:
return models.StorageLocalOperating, nil
case models.StorageNoFile, models.StorageScheduleFailed:
return models.StorageLocalOperateFailed, nil
case models.StorageScheduleSucceed:
if record.ComputeSource == models.GPUResource || record.ComputeSource == models.GCUResource {
return record.LocalOperateStatus, nil
}
if record.LocalOperateStatus != models.StorageLocalOperating {
return record.LocalOperateStatus, nil
case models.StorageUrchinScheduleWaiting:
return models.ModelScheduleWaiting, nil
case models.StorageUrchinScheduleProcessing:
return models.ModelScheduleOperating, nil
case models.StorageUrchinScheduleFailed:
return models.ModelScheduleFailed, nil
case models.StorageUrchinNoFile:
return models.ModelScheduleSucceed, nil
case models.StorageUrchinScheduleSucceed:
moveStatus, err := GetMoveBucketStatus(record, job.JobName, job.VersionName)
if err != nil {
log.Error("GetMoveBucketStatus err.%v", err)
return models.ModelScheduleFailed, err
}
//由于NPU回传后还有异步的解压,所以对于进行中的状态需要进一步查询是否已解压结束
//判断方法是查询模型目录是否有文件
if IsNPUModelDirHasFile(job.JobName, job.VersionName) {
models.UpdateScheduleLocalOperateStatus(record, models.StorageLocalOperateSucceed)
return models.StorageLocalOperateSucceed, nil
switch moveStatus {
case models.MoveBucketSucceed:
return models.ModelScheduleSucceed, nil
case models.MoveBucketOperating:
return models.ModelScheduleOperating, nil
case models.MoveBucketFailed:
return models.ModelScheduleFailed, nil
}
}

return models.ModelScheduleFailed, nil
}

func GetMoveBucketStatus(record *models.ScheduleRecord, jobName, versionName string) (int, error) {

if record.ComputeSource == models.GPUResource || record.ComputeSource == models.GCUResource {
return record.LocalOperateStatus, nil
}
if record.LocalOperateStatus != models.MoveBucketOperating {
return record.LocalOperateStatus, nil
}
//由于NPU回传后还有异步的解压,所以对于进行中的状态需要进一步查询是否已解压结束
//判断方法是查询模型目录是否有文件
if IsNPUModelDirHasFile(jobName, versionName) {
models.UpdateScheduleLocalOperateStatus(record, models.MoveBucketSucceed)
return models.MoveBucketSucceed, nil
}
return record.LocalOperateStatus, nil
}



+ 0
- 30
services/cloudbrain/cloudbrainTask/sync_status.go View File

@@ -12,7 +12,6 @@ import (
"code.gitea.io/gitea/modules/modelarts_cd"
"code.gitea.io/gitea/modules/notification"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/timeutil"
)

@@ -170,32 +169,3 @@ func StopDebugJob(task *models.Cloudbrain) error {
return err

}

func GetModelListFileStatus(fileInfos []storage.FileInfo, task *models.Cloudbrain) (status int) {
if len(fileInfos) > 0 {
status = models.StorageScheduleSucceed
} else {
if models.IsTrainJobTerminal(task.Status) {
if task.Status == models.GrampusStatusStopped {
status = models.StorageNoFile
} else if task.Status == models.GrampusStatusFailed {
if task.AiCenter == "" {
status = models.StorageNoFile
}
} else {
record, _ := models.GetScheduleRecordByCloudbrainID(task.ID)
if record != nil {
status = record.Status
if status == models.StorageScheduleSucceed {
status = models.StorageNoFile
}
} else {
status = models.StorageScheduleProcessing
}
}
} else {
status = models.StorageScheduleWaiting
}
}
return status
}

+ 15
- 0
templates/repo/grampus/trainjob/show.tmpl View File

@@ -384,6 +384,21 @@

</div>

</div>
<div class="ui tab" data-tab="five{{$k}}">
<div style="position: relative;border: 1px solid rgba(0,0,0,.2);padding: 0 10px;margin-top: 10px;">
<div class="ui attached info" id="info{{.VersionName}}"
style="height: 300px !important; overflow: auto;">
<div class="ui inverted active dimmer">
<div class="ui loader"></div>
</div>
<span class="info_text">
</span>
</div>

</div>

</div>
<div class="ui tab" data-tab="four{{$k}}" style="position: relative;">
<i class="ri-refresh-line metric_chart"


+ 49
- 7
web_src/js/features/cloudbrainShow.js View File

@@ -352,7 +352,41 @@ export default async function initCloudrainSow() {
}
$(".log_top").click(logTop);
$(".log_bottom").click(logBottom);
$(".run_info").on('click', function () {
let version_name = $(this).data("version");
let ID = $(`#accordion${version_name}`).data("jobid");
let repoPath = $(`#accordion${version_name}`).data("repopath");
$(`#info${version_name} .ui.inverted.active.dimmer`).css({
"background-color": "#fff",
display: "block",
});
$.get(
`/api/v1/repos/${repoPath}/${ID}/job_event`,
(data) => {
$(`#info${version_name} .ui.inverted.active.dimmer`).css(
"display", "none",
);
parseInfo(data,version_name)
})
});

function parseInfo(jsonObj,version_name){
let html = "";
if (jsonObj != null){
let podEventArray = jsonObj['JobEvents'];
console.log("podEventArray",podEventArray)
if(podEventArray != null){
for(var i=0; i < podEventArray.length;i++){
if (podEventArray[i]["reason"] != "") {
let time = new Date(podEventArray[i]["timestamp"])
html += `<p><b>[${podEventArray[i]["reason"]}]</b> <span>${time.toLocaleString()}</span></p>`
html += `<p>${podEventArray[i]["message"]}</p>`;
}
}
}
}
$(`#info${version_name} .info_text`)[0].innerHTML = html
}
// $(".log-scroll-max").scroll();

$(".full-log-dialog").click(function () {
@@ -570,7 +604,15 @@ export default async function initCloudrainSow() {
gpuFlag
);
}
} else if (data.StatusOK == 1) { // 处理中 1
}else if (data.StatusOK == -1) { // 任务未结束 -1
$(`#file_breadcrumb${version_name}`).empty();
$(`#dir_list${version_name}`).html(`<div style="height:200px;display:flex;justify-content:center;align-items:center;font-size:14px;color:rgb(16, 16, 16);">
<div style="display:flex;justify-content:center;align-items:center;height:24px;width:24px;margin-right:5px;">
<svg xmlns="http://www.w3.org/2000/svg" class="styles__StyledSVGIconPathComponent-sc-16fsqc8-0 iKfgJk svg-icon-path-icon fill" viewBox="0 0 32 32" width="16" height="16"><defs data-reactroot=""></defs><g><path d="M16 29.333c-7.364 0-13.333-5.969-13.333-13.333s5.969-13.333 13.333-13.333 13.333 5.969 13.333 13.333-5.969 13.333-13.333 13.333zM16 26.667c5.891 0 10.667-4.776 10.667-10.667s-4.776-10.667-10.667-10.667v0c-5.891 0-10.667 4.776-10.667 10.667s4.776 10.667 10.667 10.667v0zM17.333 16h5.333v2.667h-8v-9.333h2.667v6.667z"></path></g></svg>
</div>
<span>${i18n['task_not_finished']}</span>
</div>`);
}else if (data.StatusOK == 1) { // 处理中 1
$(`#file_breadcrumb${version_name}`).empty();
$(`#dir_list${version_name}`).html(`<div style="height:200px;display:flex;justify-content:center;align-items:center;font-size:14px;color:rgb(16, 16, 16);">
<style>
@@ -592,21 +634,21 @@ export default async function initCloudrainSow() {
</div>
<span>${i18n['file_sync_fail']}</span>
</div>`);
} else if (data.StatusOK == 3) { // 无文件 3
} else if (data.StatusOK == 3) { // 等待同步 3
$(`#file_breadcrumb${version_name}`).empty();
$(`#dir_list${version_name}`).html(`<div style="height:200px;display:flex;justify-content:center;align-items:center;font-size:14px;color:rgb(16, 16, 16);">
<div style="display:flex;justify-content:center;align-items:center;height:24px;width:24px;margin-right:5px;">
<svg xmlns="http://www.w3.org/2000/svg" class="styles__StyledSVGIconPathComponent-sc-16fsqc8-0 iKfgJk svg-icon-path-icon fill" viewBox="0 0 24 24" width="16" height="16"><defs data-reactroot=""></defs><g><circle cx="15.5" cy="9.5" r="1.5"></circle><circle cx="8.5" cy="9.5" r="1.5"></circle><path d="M11.99 2C6.47 2 2 6.48 2 12s4.47 10 9.99 10C17.52 22 22 17.52 22 12S17.52 2 11.99 2zM12 20c-4.42 0-8-3.58-8-8s3.58-8 8-8 8 3.58 8 8-3.58 8-8 8zm0-6c-2.33 0-4.32 1.45-5.12 3.5h1.67c.69-1.19 1.97-2 3.45-2s2.75.81 3.45 2h1.67c-.8-2.05-2.79-3.5-5.12-3.5z"></path></g></svg>
<svg xmlns="http://www.w3.org/2000/svg" class="styles__StyledSVGIconPathComponent-sc-16fsqc8-0 iKfgJk svg-icon-path-icon fill" viewBox="0 0 32 32" width="16" height="16"><defs data-reactroot=""></defs><g><path d="M16 29.333c-7.364 0-13.333-5.969-13.333-13.333s5.969-13.333 13.333-13.333 13.333 5.969 13.333 13.333-5.969 13.333-13.333 13.333zM16 26.667c5.891 0 10.667-4.776 10.667-10.667s-4.776-10.667-10.667-10.667v0c-5.891 0-10.667 4.776-10.667 10.667s4.776 10.667 10.667 10.667v0zM17.333 16h5.333v2.667h-8v-9.333h2.667v6.667z"></path></g></svg>
</div>
<span>${i18n['no_file_to_download']}</span>
<span>${i18n['file_sync_wait']}</span>
</div>`);
} else if (data.StatusOK == 4) { // 任务未结束 4
} else if (data.StatusOK == 4) { // 无文件 4
$(`#file_breadcrumb${version_name}`).empty();
$(`#dir_list${version_name}`).html(`<div style="height:200px;display:flex;justify-content:center;align-items:center;font-size:14px;color:rgb(16, 16, 16);">
<div style="display:flex;justify-content:center;align-items:center;height:24px;width:24px;margin-right:5px;">
<svg xmlns="http://www.w3.org/2000/svg" class="styles__StyledSVGIconPathComponent-sc-16fsqc8-0 iKfgJk svg-icon-path-icon fill" viewBox="0 0 32 32" width="16" height="16"><defs data-reactroot=""></defs><g><path d="M16 29.333c-7.364 0-13.333-5.969-13.333-13.333s5.969-13.333 13.333-13.333 13.333 5.969 13.333 13.333-5.969 13.333-13.333 13.333zM16 26.667c5.891 0 10.667-4.776 10.667-10.667s-4.776-10.667-10.667-10.667v0c-5.891 0-10.667 4.776-10.667 10.667s4.776 10.667 10.667 10.667v0zM17.333 16h5.333v2.667h-8v-9.333h2.667v6.667z"></path></g></svg>
<svg xmlns="http://www.w3.org/2000/svg" class="styles__StyledSVGIconPathComponent-sc-16fsqc8-0 iKfgJk svg-icon-path-icon fill" viewBox="0 0 24 24" width="16" height="16"><defs data-reactroot=""></defs><g><circle cx="15.5" cy="9.5" r="1.5"></circle><circle cx="8.5" cy="9.5" r="1.5"></circle><path d="M11.99 2C6.47 2 2 6.48 2 12s4.47 10 9.99 10C17.52 22 22 17.52 22 12S17.52 2 11.99 2zM12 20c-4.42 0-8-3.58-8-8s3.58-8 8-8 8 3.58 8 8-3.58 8-8 8zm0-6c-2.33 0-4.32 1.45-5.12 3.5h1.67c.69-1.19 1.97-2 3.45-2s2.75.81 3.45 2h1.67c-.8-2.05-2.79-3.5-5.12-3.5z"></path></g></svg>
</div>
<span>${i18n['task_not_finished']}</span>
<span>${i18n['no_file_to_download']}</span>
</div>`);
}
}).fail(function (err) {


+ 3
- 1
web_src/js/features/i18nVue.js View File

@@ -71,8 +71,9 @@ export const i18nVue = {
model_success:"模型加载成功",
model_failed:"模型加载失败",
file_sync_ing:"文件同步中,请稍侯",
file_sync_wait:"文件等待同步中,请稍侯",
file_sync_fail:"文件同步失败",
no_file_to_download:"没有文件可以下载",
no_file_to_download:"没有文件可以下载,稍后再来看看",
task_not_finished:"任务还未结束,稍后再来看看",
local:"本地",
online:"线上",
@@ -211,6 +212,7 @@ export const i18nVue = {
model_success:"Success",
model_failed:"Failed",
file_sync_ing:"File synchronization in progress, please wait",
file_sync_ing:"File synchronization in waitting, please wait",
file_sync_fail:"File synchronization failed",
no_file_to_download:"No files can be downloaded",
task_not_finished:"Task not finished yet, please wait",


Loading…
Cancel
Save