#5300 V20240129.patch

Merged
ychao_1983 merged 233 commits from V20240129.patch into develop 1 month ago
  1. +1
    -1
      cmd/web.go
  2. +12
    -0
      custom/public/json/images_version.json
  3. +14
    -25
      entity/ai_task.go
  4. +0
    -22
      entity/creation.go
  5. +1
    -1
      go.mod
  6. +2
    -2
      go.sum
  7. +42
    -0
      manager/client/grampus/grampus.go
  8. +3
    -1
      models/action.go
  9. +7
    -4
      models/card_request.go
  10. +20
    -11
      models/cloudbrain.go
  11. +287
    -41
      models/cloudbrain_image.go
  12. +12
    -3
      models/cloudbrain_spec.go
  13. +1
    -0
      models/models.go
  14. +61
    -0
      models/resource_exclusive_pool.go
  15. +144
    -26
      models/resource_queue.go
  16. +15
    -45
      models/resource_scene.go
  17. +291
    -21
      models/resource_specification.go
  18. +2
    -1
      models/task_config.go
  19. +11
    -0
      models/user_analysis_for_activity.go
  20. +40
    -20
      models/user_business_analysis.go
  21. +19
    -1
      models/user_business_struct.go
  22. +20
    -0
      models/user_login_action_log.go
  23. +51
    -20
      modules/auth/cloudbrain.go
  24. +40
    -18
      modules/cloudbrain/resty.go
  25. +1
    -0
      modules/context/context.go
  26. +42
    -0
      modules/context/user_action_count.go
  27. +2
    -0
      modules/grampus/grampus.go
  28. +111
    -28
      modules/grampus/resty.go
  29. +4
    -0
      modules/public/public.go
  30. +2
    -0
      modules/setting/setting.go
  31. +1
    -0
      modules/structs/card_requests.go
  32. +5
    -1
      modules/urfs_client/objectstorage/mocks/objectstorage_mock.go
  33. +1
    -0
      options/locale/locale_en-US.ini
  34. +1
    -0
      options/locale/locale_zh-CN.ini
  35. +7
    -5
      public/home/home.js
  36. +25
    -10
      routers/admin/resources.go
  37. +14
    -0
      routers/api/v1/api.go
  38. +29
    -13
      routers/api/v1/repo/attachments.go
  39. +2
    -0
      routers/api/v1/repo/cloudbrain_dashboard.go
  40. +37
    -0
      routers/api/v1/repo/images.go
  41. +17
    -21
      routers/api/v1/repo/modelmanage.go
  42. +65
    -0
      routers/card_request/card_request.go
  43. +3
    -0
      routers/init.go
  44. +3
    -0
      routers/repo/ai_model_convert.go
  45. +1
    -1
      routers/repo/ai_model_manage.go
  46. +96
    -18
      routers/repo/cloudbrain.go
  47. +1
    -1
      routers/repo/dataset.go
  48. +38
    -51
      routers/repo/grampus.go
  49. +222
    -174
      routers/repo/repo_statistic.go
  50. +22
    -0
      routers/repo/user_data_analysis.go
  51. +35
    -0
      routers/resources/acc_card.go
  52. +17
    -3
      routers/routes/routes.go
  53. +2
    -2
      routers/user/auth.go
  54. +17
    -3
      services/ai_task_service/cluster/c2net.go
  55. +3
    -1
      services/ai_task_service/container_builder/code_builder.go
  56. +6
    -1
      services/ai_task_service/container_builder/dataset_builder.go
  57. +20
    -20
      services/ai_task_service/container_builder/pre_model_builder.go
  58. +25
    -1
      services/ai_task_service/task/grampus_notebook_task.go
  59. +2
    -0
      services/ai_task_service/task/grampus_train_task.go
  60. +3
    -3
      services/ai_task_service/task/task_base.go
  61. +41
    -18
      services/ai_task_service/task/task_creation_info.go
  62. +5
    -2
      services/card_request/card_request.go
  63. +69
    -2
      services/cloudbrain/resource/resource_queue.go
  64. +19
    -2
      services/cloudbrain/resource/resource_specification.go
  65. +1
    -1
      services/socketwrap/clientManager.go
  66. +10
    -148
      templates/admin/cloudbrain/imagecommit.tmpl
  67. +3
    -24
      templates/explore/images.tmpl
  68. +9
    -121
      templates/repo/cloudbrain/image/apply.tmpl
  69. +9
    -114
      templates/repo/cloudbrain/image/edit.tmpl
  70. +12
    -118
      templates/repo/cloudbrain/image/submit.tmpl
  71. +1
    -0
      templates/repo/grampus/trainjob/iluvatar-gpgpu/new.tmpl
  72. +0
    -6
      templates/reward/point/rule.tmpl
  73. +9
    -0
      templates/user/dashboard/feeds.tmpl
  74. +0
    -1
      vendor/gitea.com/macaron/csrf/csrf.go
  75. +0
    -1
      vendor/gitea.com/macaron/csrf/xsrf.go
  76. +19
    -0
      vendor/github.com/Jeffail/tunny/LICENSE
  77. +134
    -0
      vendor/github.com/Jeffail/tunny/README.md
  78. +309
    -0
      vendor/github.com/Jeffail/tunny/tunny.go
  79. BIN
      vendor/github.com/Jeffail/tunny/tunny_logo.png
  80. +126
    -0
      vendor/github.com/Jeffail/tunny/worker.go
  81. +0
    -27
      vendor/golang.org/x/exp/LICENSE
  82. +0
    -22
      vendor/golang.org/x/exp/PATENTS
  83. +0
    -50
      vendor/golang.org/x/exp/constraints/constraints.go
  84. +0
    -44
      vendor/golang.org/x/exp/slices/cmp.go
  85. +0
    -499
      vendor/golang.org/x/exp/slices/slices.go
  86. +0
    -195
      vendor/golang.org/x/exp/slices/sort.go
  87. +0
    -479
      vendor/golang.org/x/exp/slices/zsortanyfunc.go
  88. +0
    -481
      vendor/golang.org/x/exp/slices/zsortordered.go
  89. +3
    -2
      vendor/modules.txt
  90. +115
    -34
      web_src/js/components/images/adminImages.vue
  91. +1
    -1
      web_src/js/features/clipboard.js
  92. +1
    -1
      web_src/js/features/highlight.js
  93. +39
    -5
      web_src/js/features/i18nVue.js
  94. +1
    -1
      web_src/less/_admin.less
  95. +9
    -0
      web_src/vuepages/apis/modules/common.js
  96. +61
    -0
      web_src/vuepages/apis/modules/computingpower.js
  97. +64
    -3
      web_src/vuepages/apis/modules/images.js
  98. +0
    -1
      web_src/vuepages/components/cloudbrain/FormTop.vue
  99. +249
    -19
      web_src/vuepages/components/cloudbrain/ImageSelectV1.vue
  100. +15
    -2
      web_src/vuepages/const/index.js

+ 1
- 1
cmd/web.go View File

@@ -167,7 +167,7 @@ func runWeb(ctx *cli.Context) error {
if setting.EnablePprof {
go func() {
log.Info("Starting pprof server on localhost:6060")
log.Info("%v", http.ListenAndServe("192.168.207.34:6060", nil))
log.Info("%v", http.ListenAndServe("localhost:6060", nil))
}()
}



+ 12
- 0
custom/public/json/images_version.json View File

@@ -0,0 +1,12 @@
{
"compute_resource": ["GPU", "GCU", "MLU", "ILUVATAR-GPGPU", "METAX-GPGPU"],
"framework":["PyTorch","TensorFlow","MindSpore","PaddlePaddle","Other"],
"framework_version":{
"PyTorch":["2.1.2","2.1.1","2.1.0","2.0.1","2.0.0", "1.13.1","1.13.0","1.12.1","1.12.0","1.11.0","1.10.1","1.10.0","1.9.1","1.9.0","1.6.0"],
"TensorFlow":["2.15.0","2.14.1","2.14.0","2.13.1","2.13.0","2.12.1","2.12.0","2.11.1","2.11.0","2.10.1","2.9.3","2.9.2","2.8.4","2.8.3","2.7.4"],
"MindSpore":["2.2.10","2.2.1","2.1.0","2.0.0","1.10.1","1.10.0"],
"PaddlePaddle":["2.6.0","2.5.2","2.5.1","2.5.0","2.4.2","2.4.1","2.4.0","2.3.2","2.3.1","2.3.0","2.2.2"]
},
"python":["3.13","3.12","3.11","3.10","3.9","3.8","3.7","3.6"],
"cuda":["12.3","12.2","12.1","11.8","11.7","11.6","11.5","11.4","11.3","11.2","11.1","10.2","10.1","10.0"]
}

+ 14
- 25
entity/ai_task.go View File

@@ -16,7 +16,6 @@ import (
"code.gitea.io/gitea/models"

"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
)
@@ -148,7 +147,13 @@ type AITaskDetailInfo struct {
}

func (a *AITaskDetailInfo) Tr(language string) {
a.AICenter = getAiCenterShow(a.AICenter, language)
aiCenterInfo := strings.Split(a.AICenter, "+")
aiCenterCode := aiCenterInfo[0]
aiCenterName := ""
if len(aiCenterCode) >= 2 {
aiCenterName = aiCenterInfo[1]
}
a.AICenter = models.GetAiCenterShow(aiCenterCode, aiCenterName, language)
}

func (a *AITaskDetailInfo) TryToRemoveDatasets(currentUser *models.User) {
@@ -167,28 +172,6 @@ func (a *AITaskDetailInfo) TryToRemoveSDKCode(currentUser *models.User) {
}
}

func getAiCenterShow(aiCenter string, language string) string {
aiCenterInfo := strings.Split(aiCenter, "+")
if len(aiCenterInfo) == 2 {
if setting.AiCenterCodeAndNameAndLocMapInfo != nil {
if info, ok := setting.AiCenterCodeAndNameAndLocMapInfo[aiCenterInfo[0]]; ok {
if language == defaultLanguage {
return info.Content
} else {
return info.ContentEN
}
} else {
return aiCenterInfo[1]
}
} else {
return aiCenterInfo[1]
}
}
return ""
}

var defaultLanguage = "zh-CN"

type CreateTaskRes struct {
ID int64 `json:"id"`
Status string `json:"status"`
@@ -231,7 +214,13 @@ type AITaskBriefInfo struct {
}

func (a *AITaskBriefInfo) Tr(language string) {
a.AICenter = getAiCenterShow(a.AICenter, language)
aiCenterInfo := strings.Split(a.AICenter, "+")
aiCenterCode := aiCenterInfo[0]
aiCenterName := ""
if len(aiCenterCode) >= 2 {
aiCenterName = aiCenterInfo[1]
}
a.AICenter = models.GetAiCenterShow(aiCenterCode, aiCenterName, language)
}

type AITaskListRes struct {


+ 0
- 22
entity/creation.go View File

@@ -53,28 +53,6 @@ type SpecificationInfo struct {
ExclusiveOrg string `json:"exclusive_org"`
}

func ParseSpecificationInfo(s *models.Specification) *SpecificationInfo {
return &SpecificationInfo{
ID: s.ID,
SourceSpecId: s.SourceSpecId,
AccCardsNum: s.AccCardsNum,
AccCardType: s.AccCardType,
CpuCores: s.CpuCores,
MemGiB: s.MemGiB,
GPUMemGiB: s.GPUMemGiB,
ShareMemGiB: s.ShareMemGiB,
ComputeResource: s.ComputeResource,
UnitPrice: s.UnitPrice,
QueueId: s.QueueId,
QueueCode: s.QueueCode,
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,
IsExclusive: s.IsExclusive,
ExclusiveOrg: s.ExclusiveOrg,
}
}

type PointAccountInfo struct {
AccountCode string `json:"account_code"`
Balance int64 `json:"balance"`


+ 1
- 1
go.mod View File

@@ -18,6 +18,7 @@ require (
gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7
github.com/360EntSecGroup-Skylar/excelize/v2 v2.0.2
github.com/BurntSushi/toml v0.3.1
github.com/Jeffail/tunny v0.1.4
github.com/PuerkitoBio/goquery v1.5.0
github.com/RichardKnop/machinery v1.6.9
github.com/alecthomas/chroma v0.10.0
@@ -97,7 +98,6 @@ require (
github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594
github.com/yuin/goldmark-meta v1.1.0
golang.org/x/crypto v0.16.0
golang.org/x/exp v0.0.0-20231127185646-65229373498e
golang.org/x/net v0.19.0
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sys v0.15.0


+ 2
- 2
go.sum View File

@@ -54,6 +54,8 @@ github.com/360EntSecGroup-Skylar/excelize/v2 v2.0.2/go.mod h1:EfRHD2k+Kd7ijnqlwO
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Jeffail/tunny v0.1.4 h1:chtpdz+nUtaYQeCKlNBg6GycFF/kGVHOr6A3cmzTJXs=
github.com/Jeffail/tunny v0.1.4/go.mod h1:P8xAx4XQl0xsuhjX1DtfaMDCSuavzdb2rwbd0lk+fvo=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -858,8 +860,6 @@ golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No=
golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=


+ 42
- 0
manager/client/grampus/grampus.go View File

@@ -29,6 +29,7 @@ const (
urlGetResourceSpecs = urlOpenApiV1 + "resourcespec"
urlGetAiCenter = urlOpenApiV1 + "sharescreen/aicenter"
urlGetImages = urlOpenApiV1 + "image"
urlListUserImages = urlOpenApiV1 + "listUserImage"
urlNotebookJob = urlOpenApiV1 + "notebook"
urlInferenceJob = urlOpenApiV1 + "inference"

@@ -351,6 +352,8 @@ sendjob:
_ = getToken()
goto sendjob
}
jsonstr, _ := json.Marshal(result)
log.Info("GetImages resp:", string(jsonstr))

if result.ErrorCode != 0 {
log.Error("GetImages failed(%d): %s", result.ErrorCode, result.ErrorMsg)
@@ -360,6 +363,45 @@ sendjob:
return &result, nil
}

func GetUserImages(processorType string, jobType string) (*models.GetGrampusImagesResult, error) {
checkSetting()
client := getRestyClient()
var result models.GetGrampusImagesResult

retry := 0

queryType := "TrainJob"
if jobType == string(models.JobTypeDebug) {
queryType = "Notebook"
}

sendjob:
_, err := client.R().
SetAuthToken(TOKEN).
SetResult(&result).
Get(HOST + urlListUserImages + "?processorType=" + processorType + "&trainType=" + queryType)

if err != nil {
return nil, fmt.Errorf("resty GetUserImages: %v", err)
}

if result.ErrorCode == errorIllegalToken && retry < 1 {
retry++
log.Info("retry get token")
_ = getToken()
goto sendjob
}
jsonstr, _ := json.Marshal(result)
log.Info("GetUserImages resp:", string(jsonstr))

if result.ErrorCode != 0 {
log.Error("GetUserImages failed(%d): %s", result.ErrorCode, result.ErrorMsg)
return &result, fmt.Errorf("GetUserImages failed(%d): %s", result.ErrorCode, result.ErrorMsg)
}

return &result, nil
}

func GetTrainJobLog(jobID string, nodeId ...int) (string, error) {
checkSetting()
client := getRestyClient()


+ 3
- 1
models/action.go View File

@@ -79,6 +79,7 @@ const (
ActionCreateGrampusGPUInferenceTask //50
ActionCreateGrampusILUVATARInferenceTask //51
ActionInviteFriendRegister //52
ActionCreateGrampusILUVATARTrainTask
)

// Action represents user operation type and other information to
@@ -466,7 +467,8 @@ func (a *Action) IsCloudbrainAction() bool {
ActionCreateGrampusMETAXDebugTask,
ActionCreateSuperComputeTask,
ActionCreateGrampusILUVATARInferenceTask,
ActionCreateGrampusGPUInferenceTask:
ActionCreateGrampusGPUInferenceTask,
ActionCreateGrampusILUVATARTrainTask:
return true
}
return false


+ 7
- 4
models/card_request.go View File

@@ -34,6 +34,7 @@ type CardRequest struct {
Contact string
PhoneNumber string
EmailAddress string
Wechat string
Org string `xorm:"varchar(500)"`
Description string `xorm:"varchar(3000)"`
Status int
@@ -59,6 +60,7 @@ type CardRequestSpecRes struct {
Contact string
PhoneNumber string
EmailAddress string
Wechat string
Org string
Description string
Status int
@@ -118,6 +120,7 @@ type CardRequestSpecShow struct {
Contact string `json:"contact"`
PhoneNumber string `json:"phone_number"`
EmailAddress string `json:"email_address"`
Wechat string `json:"wechat"`
Org string `json:"org"`
Description string `json:"description"`
Status int `json:"status"`
@@ -282,8 +285,8 @@ func SearchCardRequest(opts *CardRequestOptions) (int64, []*CardRequestSpecRes,
cond = cond.And(builder.Or(builder.Like{"LOWER(card_request.contact)", lowerKeyWord},
builder.Like{"LOWER(card_request.acc_cards_num)", lowerKeyWord},
builder.Like{"LOWER(card_request.description)", lowerKeyWord}, builder.Like{"LOWER(card_request.description)", lowerKeyWord},
builder.Like{"LOWER(card_request.phone_number)", lowerKeyWord}, builder.Like{"LOWER(card_request.org)", lowerKeyWord},
builder.Like{"LOWER(\"user\".name)", lowerKeyWord}))
builder.Like{"LOWER(card_request.phone_number)", lowerKeyWord}, builder.Like{"LOWER(card_request.wechat)", lowerKeyWord},
builder.Like{"LOWER(card_request.org)", lowerKeyWord}, builder.Like{"LOWER(\"user\".name)", lowerKeyWord}))
}
if opts.UserID != 0 {
cond = cond.And(builder.Eq{"\"user\".id": opts.UserID})
@@ -331,7 +334,7 @@ func SearchCardRequest(opts *CardRequestOptions) (int64, []*CardRequestSpecRes,
cond = cond.And(builder.NewCond().Or(builder.Eq{"card_request.delete_unix": 0}).Or(builder.IsNull{"card_request.delete_unix"}))
cols := []string{"card_request.id", "card_request.compute_resource", "card_request.contact", "card_request.card_type", "card_request.acc_cards_num",
"card_request.disk_capacity", "card_request.resource_type", "card_request.begin_date", "card_request.end_date", "card_request.uid",
"card_request.phone_number", "card_request.email_address", "card_request.org", "card_request.description", "card_request.status", "card_request.review",
"card_request.phone_number", "card_request.email_address", "card_request.wechat", "card_request.org", "card_request.description", "card_request.status", "card_request.review",
"card_request.created_unix"}
var count int64
var err error
@@ -437,6 +440,6 @@ func SearchCardRequest(opts *CardRequestOptions) (int64, []*CardRequestSpecRes,
}

func UpdateCardRequest(cardRequest *CardRequest) error {
_, err := x.ID(cardRequest.ID).Cols("compute_resource", "contact", "card_type", "acc_cards_num", "disk_capacity", "resource_type", "begin_date", "end_date", "phone_number", "email_address", "org", "description", "begin_unix", "end_unix").Update(cardRequest)
_, err := x.ID(cardRequest.ID).Cols("compute_resource", "contact", "card_type", "acc_cards_num", "disk_capacity", "resource_type", "begin_date", "end_date", "phone_number", "wechat", "email_address", "org", "description", "begin_unix", "end_unix").Update(cardRequest)
return err
}

+ 20
- 11
models/cloudbrain.go View File

@@ -835,6 +835,7 @@ type CloudbrainsOptions struct {
AccCardType string
AccCardsNum int
WorkServerNumber int
QueueId int64
}

type TaskPod struct {
@@ -1147,12 +1148,20 @@ type CommitImageCloudBrainParams struct {

type CommitImageParams struct {
CommitImageCloudBrainParams
IsPrivate bool
Topics []string
CloudBrainType int
UID int64
Place string
Type int
IsPrivate bool
Topics []string
CloudBrainType int
UID int64
Place string
Type int
Framework string
FrameworkVersion string
CudaVersion string
PythonVersion string
OperationSystem string
OperationSystemVersion string
ThirdPackages string
ComputeResource string
}

type CommitImageResult struct {
@@ -2037,17 +2046,14 @@ type GrampusResourceQueue struct {

}

type AICenterImage struct {
AICenterID string `json:"aiCenterId"`
ImageUrl string `json:"imageUrl"`
}
type GrampusImage struct {
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
ID string `json:"id"`
Name string `json:"name"`
ProcessorType string `json:"processorType"`
AICenterImage []AICenterImage `json:"aiCenterImages"`
TrainType string `json:"trainType"`
AICenterImage []AiCenterImage `json:"aiCenterImages"`
}

type GetGrampusImagesResult struct {
@@ -3180,6 +3186,9 @@ func CloudbrainAll(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) {
if opts.AccCardsNum >= 0 {
cond = cond.And(builder.Eq{"cloudbrain_spec.acc_cards_num": opts.AccCardsNum})
}
if opts.QueueId > 0 {
cond = cond.And(builder.Eq{"cloudbrain_spec.queue_id": opts.QueueId})
}

var count int64
var err error


+ 287
- 41
models/cloudbrain_image.go View File

@@ -1,12 +1,15 @@
package models

import (
"database/sql/driver"
"encoding/json"
"fmt"
"strings"
"unicode/utf8"

"xorm.io/builder"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
)

@@ -24,27 +27,60 @@ const OKApply ApplyStatus = 3
const FailApply ApplyStatus = 4

type Image struct {
ID int64 `xorm:"pk autoincr" json:"id"`
ImageID string `json:"image_id"`
Type int `xorm:"INDEX NOT NULL" json:"type"` //0 normal 5官方推荐,中间值保留为后续扩展
CloudbrainType int `xorm:"INDEX NOT NULL" json:"cloudbrainType"` //0 云脑一 1云脑二
UID int64 `xorm:"INDEX NOT NULL" json:"uid"`
IsPrivate bool `xorm:"INDEX NOT NULL" json:"isPrivate"`
Tag string `xorm:"varchar(100) UNIQUE" json:"tag"`
Description string `xorm:"varchar(1000)" json:"description"`
Topics []string `xorm:"TEXT JSON" json:"topics"`
Place string `xorm:"varchar(300)" json:"place"`
NumStars int `xorm:"NOT NULL DEFAULT 0" json:"numStars"`
ComputeResource string `xorm:"varchar(30)" json:"compute_resource"`
IsStar bool `xorm:"-" json:"isStar"`
UserName string `xorm:"-" json:"userName"`
RelAvatarLink string `xorm:"-" json:"relAvatarLink"`
Status int `xorm:"INDEX NOT NULL DEFAULT 0" json:"status"` //0代表正在提交,1提交完成,2提交失败
ApplyStatus ApplyStatus `xorm:"DEFAULT 1" json:"apply_status"`
Message string `xorm:"varchar(500)" json:"message"`
UseCount int64 `xorm:"NOT NULL DEFAULT 0" json:"useCount"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created" json:"createdUnix"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated" json:"updatedUnix"`
ID int64 `xorm:"pk autoincr" json:"id"`
ImageID string `json:"image_id"`
Type int `xorm:"INDEX NOT NULL" json:"type"` //0 normal 5官方推荐,中间值保留为后续扩展
CloudbrainType int `xorm:"INDEX NOT NULL" json:"cloudbrainType"` //0 云脑一 1云脑二
UID int64 `xorm:"INDEX NOT NULL" json:"uid"`
IsPrivate bool `xorm:"INDEX NOT NULL" json:"isPrivate"`
Tag string `xorm:"varchar(100) UNIQUE" json:"tag"`
Description string `xorm:"varchar(1000)" json:"description"`
Topics []string `xorm:"TEXT JSON" json:"topics"`
Place string `xorm:"varchar(300)" json:"place"`
NumStars int `xorm:"NOT NULL DEFAULT 0" json:"numStars"`
ComputeResource string `xorm:"varchar(30)" json:"compute_resource"`
IsStar bool `xorm:"-" json:"isStar"`
UserName string `xorm:"-" json:"userName"`
RelAvatarLink string `xorm:"-" json:"relAvatarLink"`
Status int `xorm:"INDEX NOT NULL DEFAULT 0" json:"status"` //0代表正在提交,1提交完成,2提交失败
ApplyStatus ApplyStatus `xorm:"DEFAULT 1" json:"apply_status"`
Message string `xorm:"varchar(500)" json:"message"`
UseCount int64 `xorm:"NOT NULL DEFAULT 0" json:"useCount"`
Framework string `xorm:"varchar(100)" json:"framework" `
FrameworkVersion string `xorm:"varchar(50)" json:"frameworkVersion" `
CudaVersion string `xorm:"varchar(50)" json:"cudaVersion" `
PythonVersion string `xorm:"varchar(50)" json:"pythonVersion" `
OperationSystem string `xorm:"varchar(100)" json:"operationSystem" `
OperationSystemVersion string `xorm:"varchar(50)" json:"operationSystemVersion" `
ThirdPackages string `xorm:"varchar(1000)" json:"thirdPackages" `
AiCenterImages `gorm:"type:json;comment:智算中心镜像"`
GrampusBaseImage int `xorm:"" json:"grampusBaseImage"` //为1,表示分中心基础镜像
TrainType string `xorm:"varchar(30)" json:"trainType"` // 镜像适用调试还是训练
HuJingId string `xorm:"varchar(50)" json:"huJingId" ` //虎鲸侧镜像ID,用于同步镜像信息
ProcessorType string `xorm:"varchar(50)" json:"processorType" ` //虎鲸侧资源类型,用于同步镜像信息
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created" json:"createdUnix"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated" json:"updatedUnix"`
}

type AiCenterImages []AiCenterImage

type AiCenterImage struct {
AiCenterId string `json:"aiCenterId"`
ImageUrl string `json:"imageUrl"`
ImageId string `json:"imageId"`
}

func (r AiCenterImages) Value() (driver.Value, error) {
return json.Marshal(r)
}

func (r *AiCenterImages) Scan(input interface{}) error {
switch v := input.(type) {
case []byte:
return json.Unmarshal(input.([]byte), r)
default:
return fmt.Errorf("cannot Scan() from: %#v", v)
}
}

type ImageList []*Image
@@ -64,24 +100,48 @@ type ImageTopic struct {
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

type ImageVersionList struct {
}

type ImageTopicRelation struct {
ImageID int64 `xorm:"UNIQUE(s)"`
TopicID int64 `xorm:"UNIQUE(s)"`
}
type SearchAvailableValueOptions struct {
Column string
Framework string
FrameworkVersion string
PythonVersion string
ComputeResource string
OnlyRecommend bool
IncludeOwnerOnly bool
IncludeStarByMe bool
UID int64
}

type SearchImageOptions struct {
Keyword string
UID int64
Status int
IncludePublicOnly bool
IncludeOfficialOnly bool
IncludePrivateOnly bool
IncludeStarByMe bool
IncludeCustom bool
IncludeOwnerOnly bool
Topics string
ApplyStatus int
CloudbrainType int
Keyword string
UID int64
Status int
IncludePublicOnly bool
IncludeOfficialOnly bool
IncludePrivateOnly bool
IncludeStarByMe bool
IncludeCustom bool
IncludeOwnerOnly bool
Topics string
ApplyStatus int
CloudbrainType int
Framework string
FrameworkVersion string
CudaVersion string
PythonVersion string
OperationSystem string
OperationSystemVersion string
ThirdPackages string
AiCenterId string
TrainType string
ComputeResource string
ListOptions
SearchOrderBy
}
@@ -113,12 +173,20 @@ type CommitImageGrampusParams struct {
}
type CommitGrampusImageParams struct {
CommitImageGrampusParams
IsPrivate bool
Topics []string
CloudBrainType int
UID int64
Place string
Type int
IsPrivate bool
Topics []string
CloudBrainType int
UID int64
Place string
Type int
Framework string
FrameworkVersion string
CudaVersion string
PythonVersion string
OperationSystem string
OperationSystemVersion string
ThirdPackages string
ComputeResource string
}

type CommitGrampusImageResult struct {
@@ -231,6 +299,15 @@ func GetImageByTag(tag string) (*Image, error) {
return image, nil
}

func GetImageByComputeResource(computeResource string) ([]Image, error) {
images := make([]Image, 0)
err := x.Where("status=1 and compute_resource=?", computeResource).Find(&images)
if err != nil {
return nil, err
}
return images, nil
}

func GetImageByTagAndCloudbrainType(tag string, cloudbrainType int) (*Image, error) {
image := &Image{Tag: tag}
has, err := x.
@@ -403,6 +480,55 @@ func removeTopicFromImage(e Engine, imageId int64, topic *ImageTopic) error {
return nil
}

func GetImageAvailableColumnValues(opts *SearchAvailableValueOptions) []string {
var cond = builder.NewCond()

if opts.Framework != "" {
cond = cond.And(builder.Eq{"framework": opts.Framework})
}
if opts.FrameworkVersion != "" {
cond = cond.And(builder.Eq{"framework_version": opts.FrameworkVersion})
}
if opts.PythonVersion != "" {
cond = cond.And(builder.Eq{"python_version": opts.PythonVersion})
}
if opts.ComputeResource != "" {
cond = cond.And(builder.Eq{"compute_resource": opts.ComputeResource})
}
if opts.OnlyRecommend {
cond = cond.And(builder.Eq{"type": RECOMMOND_TYPE})
}
if opts.IncludeOwnerOnly {

cond = cond.And(builder.Eq{"uid": opts.UID})
}

if opts.IncludeStarByMe {

subQuery := builder.Select("image_id").From("image_star").
Where(builder.Eq{"uid": opts.UID})
var starCond = builder.In("id", subQuery)
cond = cond.And(starCond)

}

cond = cond.And(builder.NotNull{opts.Column})

var columnValues []string
var re []string
_ = x.Table("image").Where(cond).Distinct(opts.Column).Desc(opts.Column).Cols(opts.Column).Find(&columnValues)
if columnValues == nil {
return []string{}
} else {
for _, tmp := range columnValues {
if tmp != "" {
re = append(re, tmp)
}
}
}
return re
}

func SearchImage(opts *SearchImageOptions) (ImageList, int64, error) {
cond := SearchImageCondition(opts)
return SearchImageByCondition(opts, cond)
@@ -431,6 +557,13 @@ func SearchImageCondition(opts *SearchImageOptions) builder.Cond {

likes = likes.Or(builder.Like{"LOWER(description)", strings.ToLower(v)})

likes = likes.Or(builder.Like{"LOWER(third_packages)", strings.ToLower(v)})

likes = likes.Or(builder.Like{"LOWER(CONCAT(framework,framework_version))", strings.ToLower(v)})
likes = likes.Or(builder.Like{"LOWER(CONCAT('cuda',cuda_version))", strings.ToLower(v)})
likes = likes.Or(builder.Like{"LOWER(CONCAT('python',python_version))", strings.ToLower(v)})
likes = likes.Or(builder.Like{"LOWER(CONCAT(operation_system,operation_system_version))", strings.ToLower(v)})

}
keywordCond = keywordCond.Or(likes)

@@ -473,6 +606,48 @@ func SearchImageCondition(opts *SearchImageOptions) builder.Cond {
if opts.Status >= 0 {
cond = cond.And(builder.Eq{"status": opts.Status})
}
if opts.Framework != "" {
cond = cond.And(builder.Eq{"framework": opts.Framework})
}
if opts.FrameworkVersion != "" {
cond = cond.And(builder.Eq{"framework_version": opts.FrameworkVersion})
}
if opts.CudaVersion != "" {
cond = cond.And(builder.Eq{"cuda_version": opts.CudaVersion})
}
if opts.PythonVersion != "" {
cond = cond.And(builder.Eq{"python_version": opts.PythonVersion})
}
if opts.OperationSystem != "" {
cond = cond.And(builder.Eq{"operation_system": opts.OperationSystem})
}
if opts.OperationSystemVersion != "" {
cond = cond.And(builder.Eq{"operation_system_version": opts.OperationSystemVersion})
}
if opts.AiCenterId != "" {
cond = cond.And(builder.Like{"ai_center_images", "\"aiCenterId\":\"" + opts.AiCenterId + "\""})
}
if opts.TrainType != "" {
var orCond = builder.NewCond()
orCond = orCond.Or(builder.Eq{"train_type": opts.TrainType})
orCond = orCond.Or(builder.Eq{"train_type": ""})
log.Info("opts.TrainType=" + opts.TrainType)
cond = cond.And(orCond)
}
if opts.ComputeResource != "" {
cond = cond.And(builder.Eq{"compute_resource": opts.ComputeResource})
}
var third_likes, isValid = builder.NewCond(), false
for _, v := range strings.Split(opts.ThirdPackages, " ") {
if v != "" {
third_likes = third_likes.And(builder.Like{"LOWER(third_packages)", strings.ToLower(v)})
isValid = true
}

}
if isValid {
cond = cond.And(third_likes)
}

if opts.IncludeStarByMe {

@@ -579,7 +754,8 @@ func CreateLocalImage(image *Image) error {

func UpdateLocalImage(image *Image) error {

_, err := x.ID(image.ID).Cols("description", "is_private", "status", "message", "apply_status").Update(image)
_, err := x.ID(image.ID).Cols("description", "is_private", "status", "message", "apply_status", "framework", "framework_version",
"cuda_version", "python_version", "operation_system", "operation_system_version,third_packages").Update(image)
return err
}

@@ -591,7 +767,7 @@ func UpdateLocalImageStatus(image *Image) error {

func UpdateLocalImageStatusAndPlace(image *Image) error {

_, err := x.ID(image.ID).Cols("status", "place").Update(image)
_, err := x.ID(image.ID).Cols("status", "place", "ai_center_images").Update(image)
return err
}

@@ -672,3 +848,73 @@ func GetRecommondType(recommond bool) int {
}

}

func GetGrampusAllBaseImage() (ImageList, error) {
var cond = builder.NewCond()
cond = cond.And(builder.Eq{"grampus_base_image": 1})
images := make(ImageList, 0, 100)

err := x.Table("image").Where(cond).Find(&images)
if err != nil {
return nil, err
}
return images, nil
}

func getComputeResourceByProcessType(processType string) string {
tail := strings.LastIndex(processType, "/")
if tail > 0 {
return strings.ToUpper(processType[tail+1:])
}
return strings.ToUpper(processType)
}

func SyncGrampusAllBaseImageToDb(insertList []GrampusImage, updateList []GrampusImage, updateDbList []*Image, deleteList []*Image, doerId int64) error {
for _, tmp := range insertList {
log.Info("insert image,image name=" + tmp.Name)
insObj := &Image{
Type: 5,
CloudbrainType: 2,
UID: doerId,
IsPrivate: false,
Tag: tmp.Name,
ProcessorType: tmp.ProcessorType,
ComputeResource: getComputeResourceByProcessType(tmp.ProcessorType),
Status: 1,
GrampusBaseImage: 1,
TrainType: tmp.TrainType,
HuJingId: tmp.ID,
AiCenterImages: tmp.AICenterImage,
}
if insObj.ComputeResource == "GPU" && tmp.AICenterImage != nil && len(tmp.AICenterImage) > 0 {
insObj.Place = tmp.AICenterImage[0].ImageUrl
}
x.Insert(insObj)
}
for _, tmp := range deleteList {
log.Info("delete image,image name=" + tmp.Tag)
x.Delete(tmp)
}

for i, tmp := range updateList {
log.Info("update image,image name=" + tmp.Name)
updateDbList[i].AiCenterImages = tmp.AICenterImage
updateDbList[i].Tag = tmp.Name
x.ID(updateDbList[i].ID).Cols("ai_center_images", "tag").Update(updateDbList[i])
}

return nil
}

func GetGrampusSrcAllBaseImage(srcImageUrl string) (ImageList, error) {
var cond = builder.NewCond()
cond = cond.And(builder.Eq{"grampus_base_image": 1})
cond = cond.And(builder.Eq{"train_type": "Notebook"})
cond = cond.And(builder.Like{"ai_center_images", "\"imageUrl\":\"" + srcImageUrl + "\""})
images := make(ImageList, 0, 10)
err := x.Table("image").Where(cond).Find(&images)
if err != nil {
return nil, err
}
return images, nil
}

+ 12
- 3
models/cloudbrain_spec.go View File

@@ -30,6 +30,13 @@ type CloudbrainSpec struct {
UpdatedTime timeutil.TimeStamp `xorm:"updated"`
}

func (s CloudbrainSpec) ExclusiveType() string {
if s.IsExclusive {
return SpecExclusive
}
return SpecPublic
}

func (s CloudbrainSpec) ConvertToSpecification() *Specification {
return &Specification{
ID: s.SpecId,
@@ -44,10 +51,12 @@ func (s CloudbrainSpec) ConvertToSpecification() *Specification {
UnitPrice: s.UnitPrice,
QueueId: s.QueueId,
QueueCode: s.QueueCode,
QueueName: s.QueueName,
QueueType: s.QueueType,
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,
IsExclusive: s.IsExclusive,
SpecExclusiveType: s.ExclusiveType(),
ExclusiveOrg: s.ExclusiveOrg,
HasInternet: s.HasInternet,
}
@@ -71,7 +80,7 @@ func NewCloudBrainSpec(cloudbrainId int64, s Specification) CloudbrainSpec {
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,
IsExclusive: s.IsExclusive,
IsExclusive: s.IsSpecExclusive(),
ExclusiveOrg: s.ExclusiveOrg,
HasInternet: s.HasInternet,
}
@@ -170,7 +179,7 @@ func UpdateCloudbrainSpec(cloudbrainId int64, s *Specification) (int64, error) {
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,
IsExclusive: s.IsExclusive,
IsExclusive: s.IsSpecExclusive(),
ExclusiveOrg: s.ExclusiveOrg,
HasInternet: s.HasInternet,
}


+ 1
- 0
models/models.go View File

@@ -195,6 +195,7 @@ func init() {
new(UserBusinessAnalysisYesterday),
new(UserBusinessAnalysisLastWeek),
new(UserLoginLog),
new(UserLoginActionLog),
new(UserOtherInfo),
new(UserMetrics),
new(UserAnalysisPara),


+ 61
- 0
models/resource_exclusive_pool.go View File

@@ -0,0 +1,61 @@
package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
)

type ResourceExclusivePool struct {
ID int64 `xorm:"pk autoincr"`
SceneId int64
OrgName string
JobType string
Cluster string
QueueId int64
ComputeResource string
CreatedTime timeutil.TimeStamp `xorm:"created"`
CreatedBy int64
UpdatedTime timeutil.TimeStamp `xorm:"updated"`
UpdatedBy int64
}

func FindExclusivePools() ([]*ResourceExclusivePool, error) {
sq := make([]*ResourceExclusivePool, 0)

err := x.Find(&sq)
if err != nil {
return nil, err
}
return sq, nil
}

func InsertExclusivePools(queue []ResourceExclusivePool) (int64, error) {
return x.Insert(&queue)
}

func IsQueueInExclusivePool(queueId int64) bool {
n, _ := x.In("queue_id", queueId).Count(&ResourceExclusivePool{})
return n > 0
}

func FindExclusiveQueueIds() []int64 {
existsIds := make([]int64, 0)
err := x.Table("resource_exclusive_pool").Distinct("queue_id").Find(&existsIds)
if err != nil {
log.Error("FindQueuesExclusiveMap err.%v", err)
return existsIds
}

return existsIds
}

func FindQueuesExclusiveMap() map[int64]string {
resultMap := make(map[int64]string, 0)

existsIds := FindExclusiveQueueIds()

for _, id := range existsIds {
resultMap[id] = ""
}
return resultMap
}

+ 144
- 26
models/resource_queue.go View File

@@ -2,6 +2,7 @@ package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"errors"
"strconv"
@@ -27,6 +28,7 @@ type ResourceQueue struct {
CardsTotalNum int
HasInternet int //0 unknown;1 no internet;2 has internet
IsAutomaticSync bool
IsAvailable bool
Remark string
DeletedTime timeutil.TimeStamp `xorm:"deleted"`
CreatedTime timeutil.TimeStamp `xorm:"created"`
@@ -50,6 +52,7 @@ func (r ResourceQueue) ConvertToRes() *ResourceQueueRes {
UpdatedTime: r.UpdatedTime,
Remark: r.Remark,
HasInternet: AICenterInternetStatus(r.HasInternet),
IsAvailable: r.IsAvailable,
}
}

@@ -66,9 +69,14 @@ type ResourceQueueReq struct {
Remark string
QueueName string
QueueType string
IsAvailable int
}

func (r ResourceQueueReq) ToDTO() ResourceQueue {
isAvailable := false
if r.IsAvailable == 2 {
isAvailable = true
}
q := ResourceQueue{
QueueCode: r.QueueCode,
Cluster: r.Cluster,
@@ -83,6 +91,7 @@ func (r ResourceQueueReq) ToDTO() ResourceQueue {
UpdatedBy: r.CreatorId,
QueueName: r.QueueName,
QueueType: r.QueueType,
IsAvailable: isAvailable,
}
if r.Cluster == OpenICluster {
if r.AiCenterCode == AICenterOfCloudBrainOne {
@@ -98,12 +107,14 @@ func (r ResourceQueueReq) ToDTO() ResourceQueue {

type SearchResourceQueueOptions struct {
ListOptions
Cluster string
AiCenterCode string
ComputeResource string
AccCardType string
HasInternet SpecInternetQuery
QueueType string
Cluster string
AiCenterCode string
ComputeResource string
AccCardType string
HasInternet SpecInternetQuery
QueueType string
IsAvailable int
IsQueueExclusive int
}

type ResourceQueueListRes struct {
@@ -132,14 +143,22 @@ type ResourceAiCenterRes struct {
AiCenterName string
}

func (r *ResourceAiCenterRes) Tr(language string) {
r.AiCenterName = GetAiCenterShow(r.AiCenterCode, r.AiCenterName, language)
}

type GetQueueCodesOptions struct {
Cluster string
}

func NewResourceQueueListRes(totalSize int64, list []ResourceQueue) *ResourceQueueListRes {
exclusiveMap := FindQueuesExclusiveMap()
resList := make([]*ResourceQueueRes, len(list))
for i, v := range list {
resList[i] = v.ConvertToRes()
if _, exists := exclusiveMap[v.ID]; exists {
resList[i].IsQueueExclusive = true
}
}
return &ResourceQueueListRes{
TotalSize: totalSize,
@@ -148,19 +167,21 @@ func NewResourceQueueListRes(totalSize int64, list []ResourceQueue) *ResourceQue
}

type ResourceQueueRes struct {
ID int64
QueueCode string
QueueType string
QueueName string
Cluster string
AiCenterCode string
AiCenterName string
ComputeResource string
AccCardType string
CardsTotalNum int
UpdatedTime timeutil.TimeStamp
Remark string
HasInternet AICenterInternetStatus
ID int64
QueueCode string
QueueType string
QueueName string
Cluster string
AiCenterCode string
AiCenterName string
ComputeResource string
AccCardType string
CardsTotalNum int
UpdatedTime timeutil.TimeStamp
Remark string
HasInternet AICenterInternetStatus
IsAvailable bool
IsQueueExclusive bool
}

func InsertResourceQueue(queue ResourceQueue) (int64, error) {
@@ -170,8 +191,54 @@ func InsertResourceQueue(queue ResourceQueue) (int64, error) {
func UpdateResourceQueueById(queueId int64, queue ResourceQueue) (int64, error) {
return x.ID(queueId).Update(&queue)
}
func UpdateResourceCardsTotalNumAndInternetStatus(queueId int64, queue ResourceQueue) (int64, error) {
return x.ID(queueId).Cols("cards_total_num", "remark", "has_internet", "queue_type", "queue_name").Update(&queue)
func UpdateResourceCardsTotalNumAndInternetStatus(queueId int64, queue ResourceQueue, isAvailable int) (int64, error) {
sess := x.NewSession()
if err := sess.Begin(); err != nil {
sess.Close()
return 0, err
}
var err error
defer func() {
if err != nil {
sess.Rollback()
}
sess.Close()
}()

cols := []string{"cards_total_num", "remark", "has_internet", "queue_type", "queue_name"}
if isAvailable > 0 {
if isAvailable == 1 {
cols = append(cols, "is_available")
queue.IsAvailable = false
} else if isAvailable == 2 {
cols = append(cols, "is_available")
queue.IsAvailable = true
}
}

n, err := sess.ID(queueId).Cols(cols...).Update(&queue)
if err != nil {
return 0, err
}
specIds := make([]int64, 0)
if err = sess.Cols("resource_specification.id").Table("resource_specification").
In("queue_id", queueId).Find(&specIds); err != nil {
return 0, err
}
if len(specIds) == 0 {
return n, nil
}
if isAvailable == 1 {
if _, err = sess.Cols("status", "is_available").Table("resource_specification").In("id", specIds).Update(&ResourceSpecification{Status: SpecOffShelf, IsAvailable: false}); err != nil {
return 0, err
}
} else if isAvailable == 2 {
if _, err = sess.Cols("is_available").Table("resource_specification").In("id", specIds).Update(&ResourceSpecification{IsAvailable: true}); err != nil {
return 0, err
}
}
sess.Commit()
return n, nil
}

func SearchResourceQueue(opts SearchResourceQueueOptions) (int64, []ResourceQueue, error) {
@@ -199,6 +266,21 @@ func SearchResourceQueue(opts SearchResourceQueueOptions) (int64, []ResourceQueu
if opts.QueueType != "" {
cond = cond.And(builder.Eq{"queue_type": opts.QueueType})
}
if opts.IsAvailable > 0 {
if opts.IsAvailable == 1 {
cond = cond.And(builder.Eq{"is_available": false})
} else if opts.IsAvailable == 2 {
cond = cond.And(builder.Eq{"is_available": true})
}
}
if opts.IsQueueExclusive > 0 {
queueIds := FindExclusiveQueueIds()
if opts.IsQueueExclusive == 1 {
cond = cond.And(builder.NotIn("id", queueIds))
} else if opts.IsQueueExclusive == 2 {
cond = cond.And(builder.In("id", queueIds))
}
}
n, err := x.Where(cond).Unscoped().Count(&ResourceQueue{})
if err != nil {
return 0, nil, err
@@ -334,10 +416,7 @@ func SyncGrampusQueues(updateList []ResourceQueue, insertList []ResourceQueue, e
}

if len(deleteQueueIds) > 0 {
if _, err = sess.In("id", deleteQueueIds).Update(&ResourceQueue{Remark: "自动同步时被下架"}); err != nil {
return err
}
if _, err = sess.In("id", deleteQueueIds).Delete(&ResourceQueue{}); err != nil {
if _, err = sess.Cols("is_available").Table("resource_queue").In("id", deleteQueueIds).Update(&ResourceQueue{IsAvailable: false}); err != nil {
return err
}

@@ -348,7 +427,7 @@ func SyncGrampusQueues(updateList []ResourceQueue, insertList []ResourceQueue, e
return err
}
if len(deleteSpcIds) > 0 {
if _, err = sess.In("id", deleteSpcIds).Update(&ResourceSpecification{Status: SpecOffShelf}); err != nil {
if _, err = sess.Cols("status", "is_available").Table("resource_specification").In("id", deleteSpcIds).Update(&ResourceSpecification{Status: SpecOffShelf, IsAvailable: false}); err != nil {
return err
}
}
@@ -361,6 +440,9 @@ func SyncGrampusQueues(updateList []ResourceQueue, insertList []ResourceQueue, e
if _, err = sess.ID(v.ID).Update(&v); err != nil {
return err
}
if _, err = sess.ID(v.ID).Cols("is_available").Table("resource_queue").Update(&v); err != nil {
return err
}
}

}
@@ -385,6 +467,17 @@ func GetResourceAiCenters() ([]ResourceAiCenterRes, error) {
return r, nil
}

func GetAvailableResourceAiCenters() ([]*ResourceAiCenterRes, error) {
r := make([]*ResourceAiCenterRes, 0)
sql := "SELECT t.ai_center_code, t.ai_center_name FROM (SELECT DISTINCT resource_queue.ai_center_code, resource_queue.ai_center_name,resource_queue.cluster FROM resource_queue inner join resource_specification on resource_queue.id = resource_specification.queue_id inner join resource_scene_spec on resource_specification.id = resource_scene_spec.spec_id WHERE (resource_queue.deleted_time IS NULL OR resource_queue.deleted_time=0) and resource_queue.is_available = true and resource_specification.status = 2 ) t ORDER BY cluster desc,ai_center_code asc"

err := x.SQL(sql).Find(&r)
if err != nil {
return nil, err
}
return r, nil
}

func GetExclusiveQueueIds(opts FindSpecsOptions) []*ResourceExclusivePool {
pools, err := FindExclusivePools()
if err != nil {
@@ -432,3 +525,28 @@ func IsUserInExclusivePool(userId int64) bool {
}
return false
}

var defaultLanguage = "zh-CN"

func GetAiCenterShow(aiCenterCode, aiCenterName, language string) string {
if aiCenterCode == "" {
return aiCenterName
}
if aiCenterName == "" {
aiCenterName = aiCenterCode
}
if setting.AiCenterCodeAndNameAndLocMapInfo != nil {
if info, ok := setting.AiCenterCodeAndNameAndLocMapInfo[aiCenterCode]; ok {
if language == defaultLanguage {
return info.Content
} else {
return info.ContentEN
}
} else {
return aiCenterName
}
} else {
return aiCenterName
}
return ""
}

+ 15
- 45
models/resource_scene.go View File

@@ -27,7 +27,6 @@ type ResourceScene struct {
JobType string
Cluster string
ComputeResource string
IsExclusive bool
IsSpecExclusive string
SceneType string //共享或者独占场景
ExclusiveOrg string
@@ -46,22 +45,6 @@ type ResourceSceneSpec struct {
CreatedTime timeutil.TimeStamp `xorm:"created"`
}

type ResourceExclusivePool struct {
ID int64 `xorm:"pk autoincr"`
SceneId int64
OrgName string
JobType string
Cluster string
QueueId int64
ComputeResource string
CreatedTime timeutil.TimeStamp `xorm:"created"`
CreatedBy int64
UpdatedTime timeutil.TimeStamp `xorm:"updated"`
UpdatedBy int64
DeleteTime timeutil.TimeStamp `xorm:"deleted"`
DeletedBy int64
}

type ResourceSceneReq struct {
ID int64
SceneName string
@@ -95,6 +78,7 @@ type ResourceSceneListRes struct {
}

func NewResourceSceneListRes(totalSize int64, list []ResourceSceneRes) *ResourceSceneListRes {

return &ResourceSceneListRes{
TotalSize: totalSize,
List: list,
@@ -139,16 +123,17 @@ type ResourceSpecInfo struct {
UpdatedTime timeutil.TimeStamp
SceneId int64
//queue
Cluster string
AiCenterCode string
AiCenterName string
QueueCode string
QueueType string
QueueName string
QueueId int64
ComputeResource string
AccCardType string
HasInternet int
Cluster string
AiCenterCode string
AiCenterName string
QueueCode string
QueueType string
QueueName string
QueueId int64
ComputeResource string
AccCardType string
HasInternet int
IsQueueExclusive bool
}

func (ResourceSpecInfo) TableName() string {
@@ -177,7 +162,6 @@ func InsertResourceScene(r ResourceSceneReq) error {
rs := ResourceScene{
SceneName: r.SceneName,
JobType: r.JobType,
IsExclusive: r.IsSpecExclusive == "exclusive",
IsSpecExclusive: r.IsSpecExclusive,
SceneType: r.SceneType,
Cluster: r.Cluster,
@@ -275,12 +259,11 @@ func UpdateResourceScene(r ResourceSceneReq) error {
//update scene
rs := ResourceScene{
SceneName: r.SceneName,
IsExclusive: r.IsSpecExclusive == "exclusive",
IsSpecExclusive: r.IsSpecExclusive,
ExclusiveOrg: r.ExclusiveOrg,
SceneType: r.SceneType,
}
if _, err = sess.ID(r.ID).UseBool("is_spec_exclusive", "is_exclusive").Update(&rs); err != nil {
if _, err = sess.ID(r.ID).Cols("is_spec_exclusive", "scene_name", "exclusive_org", "scene_type").Update(&rs); err != nil {
return err
}

@@ -447,7 +430,8 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen
"resource_queue.ai_center_code", "resource_queue.acc_card_type",
"resource_queue.id as queue_id", "resource_queue.compute_resource",
"resource_queue.queue_code", "resource_queue.ai_center_name",
"resource_queue.has_internet", "resource_queue.queue_name", "resource_queue.queue_type",
"resource_queue.has_internet", "resource_queue.queue_name",
"resource_queue.queue_type",
).In("resource_scene_spec.scene_id", sceneIds).
Join("INNER", "resource_scene_spec", "resource_scene_spec.spec_id = resource_specification.id").
Join("INNER", "resource_queue", "resource_queue.ID = resource_specification.queue_id").
@@ -475,17 +459,3 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen

return count, r, nil
}

func FindExclusivePools() ([]*ResourceExclusivePool, error) {
sq := make([]*ResourceExclusivePool, 0)

err := x.Find(&sq)
if err != nil {
return nil, err
}
return sq, nil
}

func InsertExclusivePools(queue []ResourceExclusivePool) (int64, error) {
return x.Insert(&queue)
}

+ 291
- 21
models/resource_specification.go View File

@@ -229,26 +229,30 @@ type FindSpecsOptions struct {
}

type Specification struct {
ID int64
SourceSpecId string
AccCardsNum int
AccCardType string
CpuCores int
MemGiB float32
GPUMemGiB float32
ShareMemGiB float32
ComputeResource string
UnitPrice int
QueueId int64
QueueCode string
QueueName string
QueueType string
HasInternet int
Cluster string
AiCenterCode string
AiCenterName string
IsExclusive bool
ExclusiveOrg string
ID int64
SourceSpecId string
AccCardsNum int
AccCardType string
CpuCores int
MemGiB float32
GPUMemGiB float32
ShareMemGiB float32
ComputeResource string
UnitPrice int
QueueId int64
QueueCode string
QueueName string
QueueType string
HasInternet int
Cluster string
AiCenterCode string
AiCenterName string
SpecExclusiveType string `xorm:"is_spec_exclusive"`
ExclusiveOrg string
}

func (s Specification) IsSpecExclusive() bool {
return s.SpecExclusiveType == SpecExclusive
}

func (Specification) TableName() string {
@@ -393,7 +397,7 @@ func FilterExclusiveSpecs(r []*Specification, userId int64) []*Specification {
if _, has := specMap[spec.ID]; has {
continue
}
if !spec.IsExclusive {
if !spec.IsSpecExclusive() {
specs = append(specs, spec)
specMap[spec.ID] = ""
continue
@@ -905,3 +909,269 @@ func GetGrampusSpecs() (map[string]*Specification, error) {
}
return grampusSpecs, nil
}

type GetResourceListOpts struct {
ListOptions
Resource []string
AccCardType string
AccCardNum int
ExcludeAccCardNums []int
AICenterCode string
MinPrice int
MaxPrice int
}

type ResourceDetailInfo struct {
Spec ResourceSpecificationRes
IsQueueExclusive bool
AICenterList []ResourceAiCenterRes
}

type ResourceInfo4CardRequest struct {
ComputeResource string
AccCardType string
AccCardsNum int
CpuCores int
MemGiB float32
GPUMemGiB float32
ShareMemGiB float32
UnitPrice int
IsExclusive bool
IsSpecExclusive string
AICenterList []*ResourceAiCenterRes
}

func (r *ResourceInfo4CardRequest) Tr(language string) {
if r.AICenterList == nil {
return
}
for i := 0; i < len(r.AICenterList); i++ {
r.AICenterList[i].Tr(language)
}
}

type ResourceWithAICenter4CardRequest struct {
Cluster string
AICenterCode string
AICenterName string
ComputeResource string
AccCardType string
AccCardsNum int
CpuCores int
MemGiB float32
GPUMemGiB float32
ShareMemGiB float32
UnitPrice int
IsExclusive bool
IsSpecExclusive string
}

func GetResourceListPaging(opts GetResourceListOpts) ([]*ResourceInfo4CardRequest, int64, error) {
cond := builder.NewCond()
resourceList := make([]string, 0)
for i := 0; i < len(opts.Resource); i++ {
if opts.Resource[i] != "" {
resourceList = append(resourceList, opts.Resource[i])
}
}
if len(resourceList) > 0 {
cond = cond.And(builder.In("resource_queue.compute_resource", resourceList))
}
if opts.AccCardType != "" {
cond = cond.And(builder.Eq{"resource_queue.acc_card_type": opts.AccCardType})
}
if opts.AccCardNum >= 0 {
if opts.AccCardNum > 999 {
cond = cond.And(builder.NotIn("resource_specification.acc_cards_num", opts.ExcludeAccCardNums))
} else {
cond = cond.And(builder.Eq{"resource_specification.acc_cards_num": opts.AccCardNum})
}
}
if opts.AICenterCode != "" {
cond = cond.And(builder.Eq{"resource_queue.ai_center_code": opts.AICenterCode})
}
if opts.MaxPrice >= 0 && opts.MinPrice >= 0 && opts.MaxPrice < opts.MinPrice {
opts.MaxPrice = -1
opts.MinPrice = -1
}
if opts.MaxPrice >= 0 {
cond = cond.And(builder.Lte{"resource_specification.unit_price": opts.MaxPrice})
}
if opts.MinPrice >= 0 {
cond = cond.And(builder.Gte{"resource_specification.unit_price": opts.MinPrice})
}
cond = cond.And(builder.Or(builder.Eq{"resource_queue.deleted_time": 0}, builder.IsNull{"resource_queue.deleted_time"}))
cond = cond.And(builder.Eq{"resource_specification.status": 2})
//先按多字段去重分页查询资源规格
//再基于结果查询智算中心信息
resourceInfos := make([]*ResourceInfo4CardRequest, 0)
err := x.Table("resource_specification").
Join("LEFT", "resource_exclusive_pool", "resource_specification.queue_id = resource_exclusive_pool.queue_id").
Join("INNER", "resource_queue", "resource_specification.queue_id = resource_queue.id").
Join("INNER", "resource_scene_spec", "resource_specification.id = resource_scene_spec.spec_id").
Join("INNER", "resource_scene", "resource_scene.id = resource_scene_spec.scene_id").
Select("Distinct resource_queue.compute_resource, resource_queue.acc_card_type," +
"resource_specification.acc_cards_num, resource_specification.cpu_cores, resource_specification.mem_gi_b, " +
"resource_specification.gpu_mem_gi_b, resource_specification.share_mem_gi_b, resource_specification.unit_price," +
"COALESCE(resource_exclusive_pool.queue_id IS NOT NULL, false) AS is_exclusive,resource_scene.is_spec_exclusive").
Where(cond).
OrderBy(" resource_queue.compute_resource DESC,resource_queue.acc_card_type ,is_exclusive," +
"resource_specification.acc_cards_num DESC,resource_specification.gpu_mem_gi_b DESC," +
"resource_specification.cpu_cores DESC,resource_specification.mem_gi_b DESC").
Find(&resourceInfos)

if err != nil {
return nil, 0, err
}
tmpResourceInfos := make([]*ResourceInfo4CardRequest, 0)
for i := 0; i < len(resourceInfos); i++ {
//此处是为了过滤那些专属池中的规格又被配置到共享场景中的情况
if !resourceInfos[i].IsExclusive || (resourceInfos[i].IsExclusive && resourceInfos[i].IsSpecExclusive == "") {
tmpResourceInfos = append(tmpResourceInfos, resourceInfos[i])
}
}
resourceInfos = tmpResourceInfos
if len(resourceInfos) == 0 {
return []*ResourceInfo4CardRequest{}, 0, nil
}

total := int64(len(resourceInfos))
startIndex := int64((opts.Page - 1) * opts.PageSize)
endIndex := int64(opts.Page * opts.PageSize)
if startIndex >= total {
return []*ResourceInfo4CardRequest{}, 0, nil
}
if endIndex > total {
endIndex = total
}
resourceInfos = resourceInfos[startIndex:endIndex]

newCond := builder.NewCond()
for _, spec := range resourceInfos {
if spec.IsExclusive {
newCond = newCond.Or(builder.And(builder.Eq{"resource_queue.compute_resource": spec.ComputeResource},
builder.Eq{"resource_queue.acc_card_type": spec.AccCardType},
builder.Eq{"resource_specification.acc_cards_num": spec.AccCardsNum},
builder.Eq{"resource_specification.cpu_cores": spec.CpuCores},
builder.Eq{"resource_specification.mem_gi_b": spec.MemGiB},
builder.Eq{"resource_specification.gpu_mem_gi_b": spec.GPUMemGiB},
builder.Eq{"resource_specification.share_mem_gi_b": spec.ShareMemGiB},
builder.Eq{"resource_specification.unit_price": spec.UnitPrice},
builder.NotNull{"resource_exclusive_pool.queue_id"}))
} else if spec.IsSpecExclusive == SpecExclusive {
newCond = newCond.Or(builder.And(builder.Eq{"resource_queue.compute_resource": spec.ComputeResource},
builder.Eq{"resource_queue.acc_card_type": spec.AccCardType},
builder.Eq{"resource_specification.acc_cards_num": spec.AccCardsNum},
builder.Eq{"resource_specification.cpu_cores": spec.CpuCores},
builder.Eq{"resource_specification.mem_gi_b": spec.MemGiB},
builder.Eq{"resource_specification.gpu_mem_gi_b": spec.GPUMemGiB},
builder.Eq{"resource_specification.share_mem_gi_b": spec.ShareMemGiB},
builder.Eq{"resource_specification.unit_price": spec.UnitPrice},
builder.Eq{"resource_scene.is_spec_exclusive": SpecExclusive},
builder.IsNull{"resource_exclusive_pool.queue_id"}))
} else {
newCond = newCond.Or(builder.And(builder.Eq{"resource_queue.compute_resource": spec.ComputeResource},
builder.Eq{"resource_queue.acc_card_type": spec.AccCardType},
builder.Eq{"resource_specification.acc_cards_num": spec.AccCardsNum},
builder.Eq{"resource_specification.cpu_cores": spec.CpuCores},
builder.Eq{"resource_specification.mem_gi_b": spec.MemGiB},
builder.Eq{"resource_specification.gpu_mem_gi_b": spec.GPUMemGiB},
builder.Eq{"resource_specification.share_mem_gi_b": spec.ShareMemGiB},
builder.Eq{"resource_specification.unit_price": spec.UnitPrice},
builder.Or(builder.Eq{"resource_scene.is_spec_exclusive": SpecPublic}, builder.IsNull{"resource_scene.is_spec_exclusive"}),
builder.IsNull{"resource_exclusive_pool.queue_id"}))
}

}
newCond = newCond.And(builder.Or(builder.Eq{"resource_queue.deleted_time": 0}, builder.IsNull{"resource_queue.deleted_time"}))
newCond = newCond.And(builder.Eq{"resource_specification.status": 2})

withCenterInfos := make([]ResourceWithAICenter4CardRequest, 0)
err = x.Table("resource_specification").
Join("LEFT", "resource_exclusive_pool", "resource_specification.queue_id = resource_exclusive_pool.queue_id").
Join("INNER", "resource_queue", "resource_specification.queue_id = resource_queue.id").
Join("INNER", "resource_scene_spec", "resource_specification.id = resource_scene_spec.spec_id").
Join("INNER", "resource_scene", "resource_scene.id = resource_scene_spec.scene_id").
Select("resource_queue.cluster,resource_queue.ai_center_code,resource_queue.ai_center_name,resource_queue.compute_resource, resource_queue.acc_card_type," +
"resource_specification.acc_cards_num, resource_specification.cpu_cores, resource_specification.mem_gi_b, " +
"resource_specification.gpu_mem_gi_b, resource_specification.share_mem_gi_b, resource_specification.unit_price," +
"COALESCE(resource_exclusive_pool.queue_id IS NOT NULL, false) AS is_exclusive,resource_scene.is_spec_exclusive").
Where(newCond).
Find(&withCenterInfos)

if err != nil {
return nil, 0, err
}
tmpMap := make(map[string][]*ResourceAiCenterRes, 0)
for i := 0; i < len(withCenterInfos); i++ {
t := withCenterInfos[i]
key := fmt.Sprintf("%s_%s_%d_%d_%f_%f_%f_%d_%t_%s", t.ComputeResource, t.AccCardType, t.AccCardsNum,
t.CpuCores, t.MemGiB, t.GPUMemGiB, t.ShareMemGiB, t.UnitPrice, t.IsExclusive, t.IsSpecExclusive)
if _, exists := tmpMap[key]; exists {
centerExists := false
for _, center := range tmpMap[key] {
if center.AiCenterCode == t.AICenterCode {
centerExists = true
}
}
if centerExists {
continue
}
tmpMap[key] = append(tmpMap[key], &ResourceAiCenterRes{
AiCenterCode: t.AICenterCode,
AiCenterName: t.AICenterName,
})
} else {
tmpMap[key] = []*ResourceAiCenterRes{{
AiCenterCode: t.AICenterCode,
AiCenterName: t.AICenterName,
}}
}
}

for i := 0; i < len(resourceInfos); i++ {
t := resourceInfos[i]
key := fmt.Sprintf("%s_%s_%d_%d_%f_%f_%f_%d_%t_%s", t.ComputeResource, t.AccCardType, t.AccCardsNum,
t.CpuCores, t.MemGiB, t.GPUMemGiB, t.ShareMemGiB, t.UnitPrice, t.IsExclusive, t.IsSpecExclusive)
resourceInfos[i].AICenterList = tmpMap[key]
}
return resourceInfos, total, nil
}

type AccCardInfo struct {
ComputeSource string
CardList []string
}

func GetAccCardList() ([]AccCardInfo, error) {
res := make([]AccCardInfo, 0)
r := make([]*Specification, 0)
err := x.Where("resource_specification.status = ? and (resource_queue.deleted_time = 0 or resource_queue.deleted_time is null)", SpecOnShelf).
Join("INNER", "resource_queue", "resource_queue.id = resource_specification.queue_id").
Join("INNER", "resource_scene_spec", "resource_scene_spec.spec_id = resource_specification.id").
Join("INNER", "resource_scene", "resource_scene_spec.scene_id = resource_scene.id").
OrderBy("resource_queue.compute_resource asc,resource_queue.acc_card_type asc").
Unscoped().Distinct("resource_queue.compute_resource,resource_queue.acc_card_type").Find(&r)
if err != nil {
return nil, err
}
tmpMap := make(map[string][]string, 0)
keys := make([]string, 0)
for i := 0; i < len(r); i++ {
spec := r[i]
if _, exists := tmpMap[spec.ComputeResource]; exists {
tmpMap[spec.ComputeResource] = append(tmpMap[spec.ComputeResource], spec.AccCardType)
} else {
keys = append(keys, spec.ComputeResource)
tmpMap[spec.ComputeResource] = []string{spec.AccCardType}
}
}
for i := 0; i < len(keys); i++ {
res = append(res, AccCardInfo{
ComputeSource: keys[i],
CardList: tmpMap[keys[i]],
})
}

return res, nil
}

+ 2
- 1
models/task_config.go View File

@@ -85,7 +85,8 @@ func GetTaskTypeFromAction(a ActionType) TaskType {
ActionCreateGrampusGPUOnlineInferTask,
ActionCreateGrampusGPUTrainTask,
ActionCreateGrampusGPUInferenceTask,
ActionCreateGrampusILUVATARInferenceTask:
ActionCreateGrampusILUVATARInferenceTask,
ActionCreateGrampusILUVATARTrainTask:
return TaskCreateCloudbrainTask
case ActionCreateRepo:
return TaskCreatePublicRepo


+ 11
- 0
models/user_analysis_for_activity.go View File

@@ -589,3 +589,14 @@ func QueryUserAnnualReport(userId int64) *UserSummaryCurrentYear {
}
return nil
}

func GetLastModifyTime() string {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
userBusinessAnalysisLastMonth := &UserBusinessAnalysisLastMonth{}
err := statictisSess.Select("*").Table(new(UserBusinessAnalysisLastMonth)).Limit(1, 0).Find(userBusinessAnalysisLastMonth)
if err == nil {
return userBusinessAnalysisLastMonth.DataDate
}
return ""
}

+ 40
- 20
models/user_business_analysis.go View File

@@ -41,24 +41,6 @@ func (ulist UserBusinessAnalysisList) Less(i, j int) bool {
return ulist[i].ID > ulist[j].ID
}

func getLastCountDate() int64 {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
statictisSess.Limit(1, 0)
userBusinessAnalysisList := make([]*UserBusinessAnalysis, 0)
if err := statictisSess.Table("user_business_analysis").OrderBy("count_date desc").Limit(1, 0).
Find(&userBusinessAnalysisList); err == nil {
for _, userRecord := range userBusinessAnalysisList {
return userRecord.CountDate - 10000
}
} else {
log.Info("query error." + err.Error())
}
currentTimeNow := time.Now()
pageStartTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, currentTimeNow.Location())
return pageStartTime.Unix()
}

func QueryMetricsPage(start int64, end int64) ([]*UserMetrics, int64) {

statictisSess := xStatistic.NewSession()
@@ -633,6 +615,8 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
CreateRepoCountMap, _, _, _ := queryUserCreateRepo(start_unix, end_unix)
LoginCountMap := queryLoginCount(start_unix, end_unix)

LoginActionCountMap := queryLoginActionCount(start_unix, end_unix)

OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap, _ := queryCloudBrainTask(start_unix, end_unix)
AiModelManageMap := queryUserModel(start_unix, end_unix)
@@ -695,6 +679,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
dateRecordAll.EncyclopediasCount = getMapKeyStringValue(dateRecordAll.Name, wikiCountMap)
dateRecordAll.CreateRepoCount = getMapValue(dateRecordAll.ID, CreateRepoCountMap)
dateRecordAll.LoginCount = getMapValue(dateRecordAll.ID, LoginCountMap)
dateRecordAll.LoginActionCount = getMapValue(dateRecordAll.ID, LoginActionCountMap)

if _, ok := OpenIIndexMap[dateRecordAll.ID]; !ok {
dateRecordAll.OpenIIndex = 0
@@ -720,6 +705,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
dateRecordAll.RecommendImage = getMapValue(dateRecordAll.ID, RecommendImage)
dateRecordAll.InvitationUserNum = getMapValue(dateRecordAll.ID, InvitationMap)
dateRecordAll.UserIndexPrimitive = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight)

userIndexMap[dateRecordAll.ID] = dateRecordAll.UserIndexPrimitive
if maxUserIndex < dateRecordAll.UserIndexPrimitive {
maxUserIndex = dateRecordAll.UserIndexPrimitive
@@ -933,7 +919,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static

insertBatchSql := "INSERT INTO public." + tableName +
"(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " +
"commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location,focus_other_user,collect_dataset,collected_dataset,recommend_dataset,collect_image,collected_image,recommend_image,user_index_primitive,phone,invitation_user_num,model_convert_count) " +
"commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location,focus_other_user,collect_dataset,collected_dataset,recommend_dataset,collect_image,collected_image,recommend_image,user_index_primitive,phone,invitation_user_num,model_convert_count,login_action_count) " +
"VALUES"

for i, record := range dateRecords {
@@ -942,7 +928,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static
", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) +
", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) +
", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "'," +
fmt.Sprint(record.FocusOtherUser) + "," + fmt.Sprint(record.CollectDataset) + "," + fmt.Sprint(record.CollectedDataset) + "," + fmt.Sprint(record.RecommendDataset) + "," + fmt.Sprint(record.CollectImage) + "," + fmt.Sprint(record.CollectedImage) + "," + fmt.Sprint(record.RecommendImage) + "," + fmt.Sprint(record.UserIndexPrimitive) + ",'" + record.Phone + "'" + "," + fmt.Sprint(record.InvitationUserNum) + "," + fmt.Sprint(record.ModelConvertCount) + ")"
fmt.Sprint(record.FocusOtherUser) + "," + fmt.Sprint(record.CollectDataset) + "," + fmt.Sprint(record.CollectedDataset) + "," + fmt.Sprint(record.RecommendDataset) + "," + fmt.Sprint(record.CollectImage) + "," + fmt.Sprint(record.CollectedImage) + "," + fmt.Sprint(record.RecommendImage) + "," + fmt.Sprint(record.UserIndexPrimitive) + ",'" + record.Phone + "'" + "," + fmt.Sprint(record.InvitationUserNum) + "," + fmt.Sprint(record.ModelConvertCount) + "," + fmt.Sprint(record.LoginActionCount) + ")"
if i < (len(dateRecords) - 1) {
insertBatchSql += ","
}
@@ -2222,6 +2208,40 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int {
return resultMap
}

func queryLoginActionCount(start_unix int64, end_unix int64) map[int64]int {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()

resultMap := make(map[int64]int)
cond := "created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := statictisSess.Where(cond).Count(new(UserLoginActionLog))
if err != nil {
log.Info("query UserLoginActionLog error. return.")
return resultMap
}
var indexTotal int64
indexTotal = 0
for {
statictisSess.Select("id,u_id").Table("user_login_action_log").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
userLoginActionLogList := make([]*UserLoginActionLog, 0)
statictisSess.Find(&userLoginActionLogList)
log.Info("query user login action size=" + fmt.Sprint(len(userLoginActionLogList)))
for _, loginRecord := range userLoginActionLogList {
if _, ok := resultMap[loginRecord.UId]; !ok {
resultMap[loginRecord.UId] = 1
} else {
resultMap[loginRecord.UId] += 1
}
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
log.Info("user login action size=" + fmt.Sprint(len(resultMap)))
return resultMap
}

func queryUserModel(start_unix int64, end_unix int64) map[int64]int {
sess := x.NewSession()
defer sess.Close()


+ 19
- 1
models/user_business_struct.go View File

@@ -35,7 +35,7 @@ type UserBusinessAnalysisCurrentYear struct {
RegistDate timeutil.TimeStamp `xorm:"NOT NULL"`
//repo
CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"`
//login count, from elk
//login count
LoginCount int `xorm:"NOT NULL DEFAULT 0"`
//openi index
OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"`
@@ -69,6 +69,8 @@ type UserBusinessAnalysisCurrentYear struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisLast30Day struct {
@@ -138,6 +140,8 @@ type UserBusinessAnalysisLast30Day struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisLastMonth struct {
@@ -207,6 +211,8 @@ type UserBusinessAnalysisLastMonth struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisCurrentMonth struct {
@@ -276,6 +282,8 @@ type UserBusinessAnalysisCurrentMonth struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisCurrentWeek struct {
@@ -346,6 +354,8 @@ type UserBusinessAnalysisCurrentWeek struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisYesterday struct {
@@ -416,6 +426,8 @@ type UserBusinessAnalysisYesterday struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysisLastWeek struct {
@@ -486,6 +498,8 @@ type UserBusinessAnalysisLastWeek struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserAnalysisPara struct {
@@ -603,6 +617,8 @@ type UserBusinessAnalysisAll struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

type UserBusinessAnalysis struct {
@@ -692,4 +708,6 @@ type UserBusinessAnalysis struct {
Phone string `xorm:"NULL"`
InvitationUserNum int `xorm:"NOT NULL DEFAULT 0"`
ModelConvertCount int `xorm:"NOT NULL DEFAULT 0"`

LoginActionCount int `xorm:"NOT NULL DEFAULT 0"`
}

+ 20
- 0
models/user_login_action_log.go View File

@@ -0,0 +1,20 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
)

//用户活跃信息表
type UserLoginActionLog struct {
ID int64 `xorm:"pk autoincr"`
UId int64 `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}

func SaveLoginActionToDb(uid int64) {
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
var dateRecord UserLoginActionLog
dateRecord.UId = uid
statictisSess.Insert(&dateRecord)
}

+ 51
- 20
modules/auth/cloudbrain.go View File

@@ -36,34 +36,65 @@ type CreateCloudBrainForm struct {
}

type CommitImageCloudBrainForm struct {
Description string `form:"description" binding:"Required"`
Type int `form:"type" binding:"Required"`
Tag string `form:"tag" binding:"Required;MaxSize(100)" `
IsPrivate bool `form:"isPrivate" binding:"Required"`
Topics string `form:"topics"`
Description string `form:"description" binding:"Required"`
Type int `form:"type" binding:"Required"`
Tag string `form:"tag" binding:"Required;MaxSize(100)" `
IsPrivate bool `form:"isPrivate" binding:"Required"`
Topics string `form:"topics"`
Framework string `form:"framework" binding:"Required"`
FrameworkVersion string `form:"frameworkVersion"`
CudaVersion string `form:"cudaVersion" binding:"Required"`
PythonVersion string `form:"pythonVersion" binding:"Required"`
OperationSystem string `form:"operationSystem"`
OperationSystemVersion string `form:"operationSystemVersion"`
ThirdPackages string `form:"thirdPackages"`
ComputeResource string `form:"computeResource"`
}
type CommitImageGrampusForm struct {
Description string `form:"description" binding:"Required"`
Type int `form:"type" binding:"Required"`
Tag string `form:"tag" binding:"Required;MaxSize(50)" `
IsPrivate bool `form:"isPrivate" binding:"Required"`
Topics string `form:"topics"`
Description string `form:"description" binding:"Required"`
Type int `form:"type" binding:"Required"`
Tag string `form:"tag" binding:"Required;MaxSize(50)" `
IsPrivate bool `form:"isPrivate" binding:"Required"`
Topics string `form:"topics"`
Framework string `form:"framework" binding:"Required"`
FrameworkVersion string `form:"frameworkVersion"`
CudaVersion string `form:"cudaVersion" binding:"Required"`
PythonVersion string `form:"pythonVersion" binding:"Required"`
OperationSystem string `form:"operationSystem"`
OperationSystemVersion string `form:"operationSystemVersion"`
ThirdPackages string `form:"thirdPackages"`
ComputeResource string `form:"computeResource"`
}

type CommitAdminImageCloudBrainForm struct {
Description string `form:"description" binding:"Required"`
Type int `form:"type" binding:"Required"`
Tag string `form:"tag" binding:"Required;MaxSize(100)" `
IsPrivate bool `form:"isPrivate" binding:"Required"`
Topics string `form:"topics"`
Place string `form:"place" binding:"Required"`
IsRecommend bool `form:"isRecommend" binding:"Required"`
Description string `form:"description" binding:"Required"`
Type int `form:"type" binding:"Required"`
Tag string `form:"tag" binding:"Required;MaxSize(100)" `
IsPrivate bool `form:"isPrivate" binding:"Required"`
Topics string `form:"topics"`
Place string `form:"place" binding:"Required"`
IsRecommend bool `form:"isRecommend" binding:"Required"`
Framework string `form:"framework" binding:"Required"`
FrameworkVersion string `form:"frameworkVersion" binding:"Required"`
CudaVersion string `form:"cudaVersion" binding:"Required"`
PythonVersion string `form:"pythonVersion" binding:"Required"`
OperationSystem string `form:"operationSystem"`
OperationSystemVersion string `form:"operationSystemVersion"`
ThirdPackages string `form:"thirdPackages"`
ComputeResource string `form:"computeResource"`
}

type EditImageCloudBrainForm struct {
ID int64 `form:"id" binding:"Required"`
Description string `form:"description" binding:"Required"`
Topics string `form:"topics"`
ID int64 `form:"id" binding:"Required"`
Description string `form:"description" binding:"Required"`
Topics string `form:"topics"`
Framework string `form:"framework" binding:"Required"`
FrameworkVersion string `form:"frameworkVersion" binding:"Required"`
CudaVersion string `form:"cudaVersion" binding:"Required"`
PythonVersion string `form:"pythonVersion" binding:"Required"`
OperationSystem string `form:"operationSystem"`
OperationSystemVersion string `form:"operationSystemVersion"`
ThirdPackages string `form:"thirdPackages"`
}

type ReviewImageForm struct {


+ 40
- 18
modules/cloudbrain/resty.go View File

@@ -350,15 +350,22 @@ sendjob:
}

image := models.Image{
Type: models.NORMAL_TYPE,
CloudbrainType: params.CloudBrainType,
UID: params.UID,
IsPrivate: params.IsPrivate,
Tag: imageTag,
Description: params.ImageDescription,
Place: setting.Cloudbrain.ImageURLPrefix + imageTag,
Status: models.IMAGE_STATUS_COMMIT,
ApplyStatus: models.NoneApply,
Type: models.NORMAL_TYPE,
CloudbrainType: params.CloudBrainType,
UID: params.UID,
IsPrivate: params.IsPrivate,
Tag: imageTag,
Description: params.ImageDescription,
Place: setting.Cloudbrain.ImageURLPrefix + imageTag,
Status: models.IMAGE_STATUS_COMMIT,
ApplyStatus: models.NoneApply,
Framework: params.Framework,
FrameworkVersion: params.FrameworkVersion,
CudaVersion: params.CudaVersion,
PythonVersion: params.PythonVersion,
OperationSystem: params.OperationSystem,
OperationSystemVersion: params.OperationSystemVersion,
ThirdPackages: params.ThirdPackages,
}

err = models.WithTx(func(ctx models.DBContext) error {
@@ -367,6 +374,13 @@ sendjob:
dbImage.IsPrivate = params.IsPrivate
dbImage.Description = params.ImageDescription
dbImage.Status = models.IMAGE_STATUS_COMMIT
dbImage.Framework = params.Framework
dbImage.FrameworkVersion = params.FrameworkVersion
dbImage.CudaVersion = params.CudaVersion
dbImage.PythonVersion = params.PythonVersion
dbImage.OperationSystem = params.OperationSystem
dbImage.OperationSystemVersion = params.OperationSystemVersion
dbImage.ThirdPackages = params.ThirdPackages
image = *dbImage
if err := models.UpdateLocalImage(dbImage); err != nil {
log.Error("Failed to update image record.", err)
@@ -406,15 +420,23 @@ func CommitAdminImage(params models.CommitImageParams, doer *models.User) error
}

image := models.Image{
CloudbrainType: params.CloudBrainType,
UID: params.UID,
IsPrivate: params.IsPrivate,
Tag: imageTag,
Description: params.ImageDescription,
Place: params.Place,
Status: models.IMAGE_STATUS_SUCCESS,
Type: params.Type,
ApplyStatus: models.NoneApply,
CloudbrainType: params.CloudBrainType,
UID: params.UID,
IsPrivate: params.IsPrivate,
Tag: imageTag,
Description: params.ImageDescription,
Place: params.Place,
Status: models.IMAGE_STATUS_SUCCESS,
Type: params.Type,
ApplyStatus: models.NoneApply,
Framework: params.Framework,
FrameworkVersion: params.FrameworkVersion,
CudaVersion: params.CudaVersion,
PythonVersion: params.PythonVersion,
OperationSystem: params.OperationSystem,
OperationSystemVersion: params.OperationSystemVersion,
ThirdPackages: params.ThirdPackages,
ComputeResource: params.ComputeResource,
}

err = models.WithTx(func(ctx models.DBContext) error {


+ 1
- 0
modules/context/context.go View File

@@ -307,6 +307,7 @@ func Contexter() macaron.Handler {

if ctx.User != nil {
ctx.IsSigned = true
toCache(ctx)
ctx.Data["IsSigned"] = ctx.IsSigned
ctx.Data["SignedUser"] = ctx.User
ctx.Data["SignedUserID"] = ctx.User.ID


+ 42
- 0
modules/context/user_action_count.go View File

@@ -0,0 +1,42 @@
package context

import (
"fmt"
"sync"
"time"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
)

var userActionMap sync.Map

var userChannel = make(chan int64, 10000)

func UserActionChannelInit() {
go consumerUserAction(userChannel)
}

func UserActionMapClear() {
userActionMap.Range(func(key, value any) bool {
userActionMap.Delete(key)
return true
})
}

func toCache(ctx *Context) {
if ctx.User != nil {
_, ok := userActionMap.Load(ctx.User.ID)
if !ok {
log.Info("add user uid to login action db. uid=" + fmt.Sprint(ctx.User.ID))
userActionMap.Store(ctx.User.ID, time.Now())
userChannel <- ctx.User.ID
}
}
}

func consumerUserAction(in <-chan int64) {
for uid := range in {
models.SaveLoginActionToDb(uid)
}
}

+ 2
- 0
modules/grampus/grampus.go View File

@@ -514,6 +514,8 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (jobId str
actionType = models.ActionCreateGrampusGPUTrainTask
} else if req.ComputeResource == models.GCUResource {
actionType = models.ActionCreateGrampusGCUTrainTask
} else if req.ComputeResource == models.ILUVATAR {
actionType = models.ActionCreateGrampusILUVATARTrainTask
}
notification.NotifyOtherTask(ctx.User, ctx.Repo.Repository, jobID, req.DisplayJobName, actionType)



+ 111
- 28
modules/grampus/resty.go View File

@@ -32,6 +32,7 @@ const (
urlGetResourceSpecs = urlOpenApiV1 + "resourcespec"
urlGetAiCenter = urlOpenApiV1 + "sharescreen/aicenter"
urlGetImages = urlOpenApiV1 + "image"
urlDelImages = urlOpenApiV1 + "delimage"
urlNotebookJob = urlOpenApiV1 + "notebook"

errorIllegalToken = 1005
@@ -268,12 +269,10 @@ func GetImages(processorType string, jobType string) (*models.GetGrampusImagesRe
var result models.GetGrampusImagesResult

retry := 0

queryType := "TrainJob"
if jobType == string(models.JobTypeDebug) {
queryType = "Notebook"
}

sendjob:
_, err := client.R().
SetAuthToken(TOKEN).
@@ -299,6 +298,35 @@ sendjob:
return &result, nil
}

func GetAllBaseImages() (*models.GetGrampusImagesResult, error) {
checkSetting()
client := getRestyClient()
var result models.GetGrampusImagesResult
retry := 0
sendjob:
_, err := client.R().
SetAuthToken(TOKEN).
SetResult(&result).
Get(HOST + urlGetImages)

if err != nil {
return nil, fmt.Errorf("resty GetAllBaseImages: %v", err)
}
if result.ErrorCode == errorIllegalToken && retry < 1 {
retry++
log.Info("retry get token")
_ = getToken()
goto sendjob
}

if result.ErrorCode != 0 {
log.Error("GetAllBaseImages failed(%d): %s", result.ErrorCode, result.ErrorMsg)
return &result, fmt.Errorf("GetImages failed(%d): %s", result.ErrorCode, result.ErrorMsg)
}

return &result, nil
}

func GetTrainJobLog(jobID string, nodeId ...int) (string, error) {
checkSetting()
client := getRestyClient()
@@ -643,7 +671,7 @@ sendjob:
return restartResponse, nil
}

func CommitImage(jobID string, params models.CommitGrampusImageParams, doer *models.User) error {
func CommitImage(jobID, computeResource, aiCenterId string, params models.CommitGrampusImageParams, doer *models.User) error {
imageTag := strings.TrimSpace(params.ImageVersion)

dbImage, err := models.GetImageByTag(imageTag)
@@ -697,36 +725,60 @@ sendjob:
return fmt.Errorf("CommitImage err: imageid is empty. %s", result.ErrorMsg)
}

aiCenterList := make(models.AiCenterImages, 0)
ai := models.AiCenterImage{
AiCenterId: aiCenterId,
}
aiCenterList = append(aiCenterList, ai)

image := models.Image{
Type: models.NORMAL_TYPE,
CloudbrainType: params.CloudBrainType,
UID: params.UID,
IsPrivate: params.IsPrivate,
Tag: imageTag,
Description: params.Description,
ImageID: result.ImageId,
Status: models.IMAGE_STATUS_COMMIT,
ApplyStatus: models.NoneApply,
Type: models.NORMAL_TYPE,
CloudbrainType: params.CloudBrainType,
UID: params.UID,
IsPrivate: params.IsPrivate,
Tag: imageTag,
Description: params.Description,
ImageID: result.ImageId,
Status: models.IMAGE_STATUS_COMMIT,
ApplyStatus: models.NoneApply,
Framework: params.Framework,
FrameworkVersion: params.FrameworkVersion,
CudaVersion: params.CudaVersion,
PythonVersion: params.PythonVersion,
OperationSystem: params.OperationSystem,
OperationSystemVersion: params.OperationSystemVersion,
ThirdPackages: params.ThirdPackages,
ComputeResource: params.ComputeResource,
GrampusBaseImage: 0,
AiCenterImages: aiCenterList,
}

err = models.WithTx(func(ctx models.DBContext) error {
models.UpdateAutoIncrementIndex()
if dbImage != nil {
dbImage.IsPrivate = params.IsPrivate
dbImage.Description = params.Description
dbImage.Status = models.IMAGE_STATUS_COMMIT
image = *dbImage
if err := models.UpdateLocalImage(dbImage); err != nil {
log.Error("Failed to update image record.", err)
return fmt.Errorf("CommitImage err: %s", res.String())
}

} else {
if err := models.CreateLocalImage(&image); err != nil {
log.Error("Failed to insert image record.", err)
return fmt.Errorf("CommitImage err: %s", res.String())
}
// if dbImage != nil {
// dbImage.IsPrivate = params.IsPrivate
// dbImage.Description = params.Description
// dbImage.Status = models.IMAGE_STATUS_COMMIT
// dbImage.Framework = params.Framework
// dbImage.FrameworkVersion = params.FrameworkVersion
// dbImage.CudaVersion = params.CudaVersion
// dbImage.PythonVersion = params.PythonVersion
// dbImage.OperationSystem = params.OperationSystem
// dbImage.OperationSystemVersion = params.OperationSystemVersion
// dbImage.ThirdPackages = params.ThirdPackages
// dbImage.ComputeResource = params.ComputeResource
// image = *dbImage
// if err := models.UpdateLocalImage(dbImage); err != nil {
// log.Error("Failed to update image record.", err)
// return fmt.Errorf("CommitImage err: %s", res.String())
// }

// } else {
if err := models.CreateLocalImage(&image); err != nil {
log.Error("Failed to insert image record.", err)
return fmt.Errorf("CommitImage err: %s", res.String())
}
//}
if err := models.SaveImageTopics(image.ID, params.Topics...); err != nil {
log.Error("Failed to insert image record.", err)
return fmt.Errorf("CommitImage err: %s", res.String())
@@ -789,6 +841,9 @@ func updateImageStatus(image models.Image) {
commitSuccess = true
image.Status = models.IMAGE_STATUS_SUCCESS
image.Place = result.Image.ImageFullAddr
if image.AiCenterImages != nil {
image.AiCenterImages[0].ImageUrl = result.Image.ImageFullAddr
}
models.UpdateLocalImageStatusAndPlace(&image)
return
}
@@ -797,7 +852,6 @@ func updateImageStatus(image models.Image) {
commitSuccess = false
break
}

}
}
if !commitSuccess {
@@ -806,3 +860,32 @@ func updateImageStatus(image models.Image) {
}

}

func DeleteImage(image *models.Image) error {
checkSetting()
client := getRestyClient()
log.Info("start to delete remote image=" + image.ImageID)
var result models.GrampusResult
retry := 0
sendjob:
res, err := client.R().
SetAuthToken(TOKEN).
SetResult(&result).
Delete(HOST + urlDelImages + "/" + image.ImageID)

if err != nil {
log.Info("DeleteImage error=" + err.Error())
return fmt.Errorf("resty DeleteImage: %v", err)
}
if result.ErrorCode == errorIllegalToken && retry < 1 {
retry++
log.Info("retry get token")
_ = getToken()
goto sendjob
}
if res.StatusCode() != http.StatusOK {
log.Info("res status code=" + fmt.Sprint(res.StatusCode()))
return fmt.Errorf("DeleteImage err: %s", res.String())
}
return nil
}

+ 4
- 0
modules/public/public.go View File

@@ -44,6 +44,10 @@ func Custom(opts *Options) macaron.Handler {
return opts.staticHandler(path.Join(setting.CustomPath, "public"))
}

func CustomJson(opts *Options) macaron.Handler {
return opts.staticHandler(path.Join(setting.CustomPath, "public", "json"))
}

// staticFileSystem implements http.FileSystem interface.
type staticFileSystem struct {
dir *http.Dir


+ 2
- 0
modules/setting/setting.go View File

@@ -874,6 +874,7 @@ var (
ATTACHEMENT_NUM_A_USER_LAST10M int
ATTACHEMENT_SIZE_A_USER int64 //G
ALL_ATTACHEMENT_NUM_SDK int
IGNORE_FLAG string
}{}

LLM_CHAT_API = struct {
@@ -1878,6 +1879,7 @@ func getFlowControlConfig() {
FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST24HOUR = sec.Key("ATTACHEMENT_NUM_A_USER_LAST24HOUR").MustInt(1000)
FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST10M = sec.Key("ATTACHEMENT_NUM_A_USER_LAST10M").MustInt(10)
FLOW_CONTROL.ATTACHEMENT_SIZE_A_USER = sec.Key("ATTACHEMENT_SIZE_A_USER").MustInt64(500)
FLOW_CONTROL.IGNORE_FLAG = sec.Key("IGNORE_FLAG").MustString("")
}

func getModelAppConfig() {


+ 1
- 0
modules/structs/card_requests.go View File

@@ -12,6 +12,7 @@ type CardReq struct {
Contact string `json:"contact" binding:"Required"`
PhoneNumber string `json:"phone_number" binding:"Required"`
EmailAddress string `json:"email_address" binding:"Required;Email;MaxSize(254)"`
Wechat string `json:"wechat" binding:"Required;MaxSize(254)"`
Org string `json:"org" binding:"MaxSize(500)"`
Description string `json:"description" binding:"MaxSize(3000)"`
Review string `json:"review"`


+ 5
- 1
modules/urfs_client/objectstorage/mocks/objectstorage_mock.go View File

@@ -1,5 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: objectstorage.go

// Package mocks is a generated GoMock package.
package mocks

import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)

+ 1
- 0
options/locale/locale_en-US.ini View File

@@ -3239,6 +3239,7 @@ task_c2net_cpusupercomputejob=`created CPU type HPC task <a href="%s/supercomput
task_nputrainjob=`created NPU training task <a href="%s/modelarts/train-job/%s">%s</a>`
task_inferencejob=`created reasoning task <a href="%s/modelarts/inference-job/%s">%s</a>`
task_c2net_gpu_inferencejob=`created GPU type inference task <a href="%s/grampus/inference-job/%s">%s</a>`
task_c2net_gpgpu_iluvatar_trainjob=`created ILUVATAR-GPGPU type train task <a href="%s/grampus/train-job/%s">%s</a>`
task_c2net_gpgpu_iluvatar_inferencejob=`created ILUVATAR-GPGPU type inference task <a href="%s/grampus/inference-job/%s">%s</a>`
task_benchmark=`created profiling task <a href="%s/cloudbrain/benchmark/%s">%s</a>`
task_createmodel=`created new model <a href="%s/modelmanage/model_readme_tmpl?name=%s">%s</a>`


+ 1
- 0
options/locale/locale_zh-CN.ini View File

@@ -3258,6 +3258,7 @@ task_c2net_cpusupercomputejob=`创建了CPU类型超算任务 <a href="%s/superc
task_nputrainjob=`创建了NPU类型训练任务 <a href="%s/modelarts/train-job/%s">%s</a>`
task_inferencejob=`创建了推理任务 <a href="%s/modelarts/inference-job/%s">%s</a>`
task_c2net_gpu_inferencejob=`创建了GPU类型推理任务 <a href="%s/grampus/inference-job/%s">%s</a>`
task_c2net_gpgpu_iluvatar_trainjob=`创建了ILUVATAR-GPGPU类型训练任务 <a href="%s/grampus/train-job/%s">%s</a>`
task_c2net_gpgpu_iluvatar_inferencejob=`创建了ILUVATAR-GPGPU类型推理任务 <a href="%s/grampus/inference-job/%s">%s</a>`
task_benchmark=`创建了评测任务 <a href="%s/cloudbrain/benchmark/%s">%s</a>`
task_createmodel=`导入了新模型 <a href="%s/modelmanage/model_readme_tmpl?name=%s">%s</a>`


+ 7
- 5
public/home/home.js View File

@@ -243,7 +243,7 @@ document.onreadystatechange = function () {
html += recordPrefix + actionName;
html += " <a href=\"" + getRepoLink(record) + "\" rel=\"nofollow\">" + getRepotext(record) + "</a>"
}
else if(record.OpType == "24" || record.OpType == "26" || record.OpType == "27" || record.OpType == "28" || record.OpType == "50" || record.OpType == "51"
else if(record.OpType == "24" || record.OpType == "26" || record.OpType == "27" || record.OpType == "28" || record.OpType == "50" || record.OpType == "51"
|| record.OpType == "30" || record.OpType == "31" || record.OpType == "32" || record.OpType == "33" || record.OpType == "42" || record.OpType == "44"){
html += recordPrefix + actionName;
const taskLink = getTaskLink(record);
@@ -253,9 +253,9 @@ document.onreadystatechange = function () {
html += " <span style=\"color: rgba(0,0,0,0.3)\">" + record.RefName + "</span>"
}
}
else if(record.OpType == "25" || record.OpType == "29" || record.OpType == "39" || record.OpType == "40" || record.OpType == "41"
else if(record.OpType == "25" || record.OpType == "29" || record.OpType == "39" || record.OpType == "40" || record.OpType == "41"
|| record.OpType == "43"|| record.OpType == "44"|| record.OpType == "45"|| record.OpType == "46"|| record.OpType == "47"
|| record.OpType == "48"|| record.OpType == "49"
|| record.OpType == "48"|| record.OpType == "49" || record.OpType == "53"
){
html += recordPrefix + actionName;
const taskLink = getTaskLink(record);
@@ -331,8 +331,8 @@ function getTaskLink(record){
re = re + "/cloudbrain/train-job/" + record.Cloudbrain.ID;
} else {
re = '';
}
}else if(record.OpType == 32 || record.OpType == 33 || record.OpType == 42 || record.OpType == 44){
}
}else if(record.OpType == 32 || record.OpType == 33 || record.OpType == 42 || record.OpType == 44 || record.OpType == 53){
if (record.Cloudbrain) {
re = re + "/grampus/train-job/" + record.Cloudbrain.ID;
} else {
@@ -518,6 +518,7 @@ var actionNameZH={
"47":"创建了CPU类型超算任务",
"48":"创建了ILUVATAR-GPGPU类型调试任务",
"49":"创建了METAX-GPGPU类型调试任务",
"53":"创建了ILUVATAR-GPGPU类型训练任务",
};

var actionNameEN={
@@ -562,6 +563,7 @@ var actionNameEN={
"47":" created CPU type super compute task ",
"48":" created ILUVATAR-GPGPU type debugging task ",
"49":" created METAX-GPGPU type debugging task ",
"53":" created ILUVATAR-GPGPU type training task ",
};

var repoAndOrgZH={


+ 25
- 10
routers/admin/resources.go View File

@@ -1,15 +1,16 @@
package admin

import (
"net/http"
"strconv"
"strings"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/cloudbrain/resource"
"net/http"
"strconv"
"strings"
)

const (
@@ -48,6 +49,8 @@ func GetResourceQueueList(ctx *context.Context) {
accCardType := ctx.Query("card")
hasInternet := ctx.QueryInt("hasInternet")
queueType := ctx.Query("queueType")
isAvailable := ctx.QueryInt("isAvailable")
isQueueExclusive := ctx.QueryInt("isQueueExclusive")

if pageSize > 1000 {
log.Error("GetResourceQueueList pageSize too large.")
@@ -55,13 +58,15 @@ func GetResourceQueueList(ctx *context.Context) {
return
}
list, err := resource.GetResourceQueueList(models.SearchResourceQueueOptions{
ListOptions: models.ListOptions{Page: page, PageSize: pageSize},
Cluster: cluster,
AiCenterCode: aiCenterCode,
ComputeResource: computeResource,
AccCardType: accCardType,
HasInternet: models.SpecInternetQuery(hasInternet),
QueueType: queueType,
ListOptions: models.ListOptions{Page: page, PageSize: pageSize},
Cluster: cluster,
AiCenterCode: aiCenterCode,
ComputeResource: computeResource,
AccCardType: accCardType,
HasInternet: models.SpecInternetQuery(hasInternet),
QueueType: queueType,
IsAvailable: isAvailable,
IsQueueExclusive: isQueueExclusive,
})
if err != nil {
log.Error("GetResourceQueueList error.%v", err)
@@ -125,6 +130,16 @@ func SyncGrampusQueue(ctx *context.Context) {
ctx.JSON(http.StatusOK, response.Success())
}

func SyncGrampusImage(ctx *context.Context) {
err := resource.SyncGrampusImage(ctx.User.ID)
if err != nil {
log.Error("sync image error. %v", err)
ctx.JSON(http.StatusOK, response.ServerError(err.Error()))
return
}
ctx.JSON(http.StatusOK, response.Success())
}

func GetResourceSpecificationList(ctx *context.Context) {
page := ctx.QueryInt("page")
pageSize := ctx.QueryInt("pageSize")


+ 14
- 0
routers/api/v1/api.go View File

@@ -59,6 +59,7 @@
package v1

import (
"code.gitea.io/gitea/routers/resources"
"net/http"
"strings"

@@ -709,6 +710,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/custom", reqToken(), repo.GetCustomImages)
m.Get("/star", reqToken(), repo.GetStarImages)
m.Get("/npu", reqToken(), repo.GetNpuImages)
m.Get("/availableFilter", reqToken(),
repo.GetAvailableFilerInfo)

})

@@ -821,6 +824,7 @@ func RegisterRoutes(m *macaron.Macaron) {
})
}, operationReq)

m.Get("/query_user_count_time_info", operationReq, repo_ext.QueryUserCountTimeInfo)
m.Get("/query_metrics_current_month", operationReq, repo_ext.QueryUserMetricsCurrentMonth)
m.Get("/query_metrics_current_week", operationReq, repo_ext.QueryUserMetricsCurrentWeek)
m.Get("/query_metrics_current_year", operationReq, repo_ext.QueryUserMetricsCurrentYear)
@@ -1480,9 +1484,19 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/prd/event", authentication.ValidEventSource)
m.Post("/prd/event", authentication.AcceptWechatEvent)
})
m.Group("/authentication/wechat", func() {
m.Get("/qrCode4Bind", authentication.GetQRCode4Bind)
m.Get("/bindStatus", authentication.GetBindStatus)
m.Post("/unbind", authentication.UnbindWechat)
}, reqToken())
m.Get("/wechat/material", authentication.GetMaterial)
m.Get("/cloudbrain/get_newest_job", repo.GetNewestJobs)
m.Get("/cloudbrain/get_center_info", repo.GetAICenterInfo)

m.Group("/resources", func() {
m.Get("/acc_card/list", resources.GetAccCardList)
m.Get("/ai_center/available", resources.GetAvailableAICenterList)
})
}, securityHeaders(), context.APIContexter(), sudo())
}



+ 29
- 13
routers/api/v1/repo/attachments.go View File

@@ -6,6 +6,7 @@ import (
"sync"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
@@ -74,12 +75,20 @@ func NewMultipart(ctx *context.APIContext) {
})
return
}
if err := routeRepo.CheckFlowForDatasetSDK(); err != nil {
ctx.JSON(200, map[string]string{
"result_code": "-1",
"msg": err.Error(),
})
return
ignore := false
if setting.FLOW_CONTROL.IGNORE_FLAG != "" {
if ctx.Query("IGNORE_FLAG") == setting.FLOW_CONTROL.IGNORE_FLAG {
ignore = true
}
}
if !ignore {
if err := routeRepo.CheckFlowForDatasetSDK(); err != nil {
ctx.JSON(200, map[string]string{
"result_code": "-1",
"msg": err.Error(),
})
return
}
}
mutex.Lock()
defer mutex.Unlock()
@@ -159,13 +168,20 @@ func NewModelMultipart(ctx *context.APIContext) {
})
return
}

if err := routeRepo.CheckFlowForModelSDK(); err != nil {
ctx.JSON(200, map[string]string{
"result_code": "-1",
"msg": err.Error(),
})
return
ignore := false
if setting.FLOW_CONTROL.IGNORE_FLAG != "" {
if ctx.Query("IGNORE_FLAG") == setting.FLOW_CONTROL.IGNORE_FLAG {
ignore = true
}
}
if !ignore {
if err := routeRepo.CheckFlowForModelSDK(); err != nil {
ctx.JSON(200, map[string]string{
"result_code": "-1",
"msg": err.Error(),
})
return
}
}
modelMutex.Lock()
defer modelMutex.Unlock()


+ 2
- 0
routers/api/v1/repo/cloudbrain_dashboard.go View File

@@ -804,6 +804,7 @@ func GetCloudbrainsDetailData(ctx *context.Context) {
workServerNumber := ctx.QueryInt("workServerNumber")
beginTimeStr := ctx.QueryTrim("beginTime")
endTimeStr := ctx.QueryTrim("endTime")
queueId := ctx.QueryInt64("queueId")
var beginTimeUnix int64
var endTimeUnix int64
if beginTimeStr == "" || endTimeStr == "" {
@@ -899,6 +900,7 @@ func GetCloudbrainsDetailData(ctx *context.Context) {
AccCardType: accCardType,
AccCardsNum: accCardsNum,
WorkServerNumber: workServerNumber,
QueueId: queueId,
})
if err != nil {
ctx.ServerError("Get job failed:", err)


+ 37
- 0
routers/api/v1/repo/images.go View File

@@ -28,6 +28,7 @@ func GetRecommendImages(ctx *context.APIContext) {
IncludeOfficialOnly: true,
Status: models.IMAGE_STATUS_SUCCESS,
CloudbrainType: ctx.QueryInt("cloudbrainType"),
TrainType: ctx.Query("trainType"),
}

routeRepo.GetImages(ctx.Context, &opts)
@@ -59,6 +60,7 @@ func GetCustomImages(ctx *context.APIContext) {
Topics: ctx.Query("topic"),
Status: -1,
CloudbrainType: ctx.QueryInt("cloudbrainType"),
TrainType: "",
}
routeRepo.GetImages(ctx.Context, &opts)

@@ -73,6 +75,7 @@ func GetStarImages(ctx *context.APIContext) {
Topics: ctx.Query("topic"),
Status: models.IMAGE_STATUS_SUCCESS,
CloudbrainType: ctx.QueryInt("cloudbrainType"),
TrainType: "",
}
routeRepo.GetImages(ctx.Context, &opts)

@@ -88,6 +91,40 @@ func GetNpuImages(ctx *context.APIContext) {
}
}

func GetAvailableFilerInfo(ctx *context.APIContext) {
columns := []string{"framework", "framework_version", "python_version", "cuda_version"}

i := ctx.QueryInt("index")

frameWork := ctx.Query("framework")
version := ctx.Query("version")
pythonVersion := ctx.Query("python")
compute_resource := ctx.Query("compute_resource")
onlyRecommend := ctx.QueryBool("recommend")
includeMineOnly := ctx.QueryBool("mine")
starByMe := ctx.QueryBool("star")
if i < 0 || i >= len(columns) {
i = 0
}
columnName := columns[i]

opts := &models.SearchAvailableValueOptions{
Column: columnName,
Framework: frameWork,
FrameworkVersion: version,
PythonVersion: pythonVersion,
ComputeResource: compute_resource,
OnlyRecommend: onlyRecommend,
IncludeOwnerOnly: includeMineOnly,
IncludeStarByMe: starByMe,
UID: getUID(ctx),
}
ctx.JSON(http.StatusOK, models.BaseMessageWithDataApi{
Data: models.GetImageAvailableColumnValues(opts),
})

}

func getModelArtsImages(ctx *context.APIContext) {

var versionInfos modelarts.VersionInfo


+ 17
- 21
routers/api/v1/repo/modelmanage.go View File

@@ -9,6 +9,7 @@ import (
"time"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/cloudbrain"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/convert"
"code.gitea.io/gitea/modules/grampus"
@@ -361,30 +362,25 @@ func QueryModelConvertResultFileList(ctx *context.APIContext, id string) ([]stor
return nil, err
}
if job.IsGpuTrainTask() {
//get dirs
dirs, err := routerRepo.GetModelDirs(job.ID, parentDir)
if err != nil {
log.Error("GetModelDirs failed:%v", err.Error(), ctx.Data["msgID"])
return nil, err
}

var fileInfos []storage.FileInfo
err = json.Unmarshal([]byte(dirs), &fileInfos)
if err != nil {
log.Error("json.Unmarshal failed:%v", err.Error(), ctx.Data["msgID"])
return nil, err
}

for i, fileInfo := range fileInfos {
temp, _ := time.Parse("2006-01-02 15:04:05", fileInfo.ModTime)
fileInfos[i].ModTime = temp.Local().Format("2006-01-02 15:04:05")
}
path := setting.CBCodePathPrefix + job.ID + cloudbrain.ModelMountPath + "/"
log.Info("get model convert result file path=" + path)
fileInfos, err := storage.GetAllObjectByBucketAndPrefixMinio(setting.Attachment.Minio.Bucket, path)
if err == nil {
for i, fileInfo := range fileInfos {
temp, _ := time.Parse("2006-01-02 15:04:05", fileInfo.ModTime)
fileInfos[i].ModTime = temp.Local().Format("2006-01-02 15:04:05")
}

sort.Slice(fileInfos, func(i, j int) bool {
return fileInfos[i].ModTime > fileInfos[j].ModTime
})
sort.Slice(fileInfos, func(i, j int) bool {
return fileInfos[i].ModTime > fileInfos[j].ModTime
})

return fileInfos, nil
return fileInfos, nil
} else {
log.Info("query models path error.")
return nil, err
}
} else {
var versionName = "V0001"
models, err := storage.GetObsListObject(job.ID, "output/", parentDir, versionName)


+ 65
- 0
routers/card_request/card_request.go View File

@@ -1,7 +1,10 @@
package card_request

import (
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/cloudbrain/resource"
"net/http"
"strconv"
"strings"
"time"

@@ -52,6 +55,67 @@ func GetCardRequestList(ctx *context.Context) {
getRequestShowList(ctx, opts, false)
}

func GetResourceList(ctx *context.Context) {
page := ctx.QueryInt("page")
pageSize := ctx.QueryInt("pageSize")
r := ctx.QueryStrings("resource")
accCardType := ctx.Query("accCardType")
accCardNum := ctx.QueryInt("accCardNum")
excludeAccCardNumStr := ctx.Query("excludeAccCardNums")
centerCode := ctx.Query("centerCode")
minPrice := ctx.QueryInt("minPrice")
maxPrice := ctx.QueryInt("maxPrice")
if page < 1 {
page = 1
}
if pageSize < 1 {
pageSize = 15
}
excludeAccCardNums := make([]int, 0)
if excludeAccCardNumStr != "" {
numStrArray := strings.Split(excludeAccCardNumStr, "|")
for _, s := range numStrArray {
if s == "" {
continue
}
n, err := strconv.Atoi(s)
if err == nil {
excludeAccCardNums = append(excludeAccCardNums, n)
}
}
}
opts := models.GetResourceListOpts{
ListOptions: models.ListOptions{
Page: page,
PageSize: pageSize,
},
Resource: r,
AccCardType: accCardType,
AccCardNum: accCardNum,
ExcludeAccCardNums: excludeAccCardNums,
AICenterCode: centerCode,
MinPrice: minPrice,
MaxPrice: maxPrice,
}
res, total, err := resource.GetResourceListPaging(opts)
if err != nil {
log.Error("GetResourceList err.opts=%+v,%v", opts, err)
ctx.JSON(http.StatusOK, response.OuterResponseError(err))
return
}
if res != nil {
for i := 0; i < len(res); i++ {
res[i].Tr(ctx.Language())
}
}
resultMap := make(map[string]interface{})
resultMap["list"] = res
resultMap["total"] = total
resultMap["page"] = page
resultMap["pageSize"] = pageSize
ctx.JSON(http.StatusOK, response.OuterSuccessWithData(resultMap))
}

func GetMyCardRequestList(ctx *context.Context) {

page := ctx.QueryInt("page")
@@ -241,6 +305,7 @@ func getRequestShowList(ctx *context.Context, opts *models.CardRequestOptions, c
customShow.Review = v.Review
customShow.PhoneNumber = v.PhoneNumber
customShow.EmailAddress = v.EmailAddress
customShow.Wechat = v.Wechat
customShow.Contact = v.Contact
customShow.Specs = v.Specs
customShow.Org = v.Org


+ 3
- 0
routers/init.go View File

@@ -21,6 +21,7 @@ import (
"code.gitea.io/gitea/modules/eventsource"
"code.gitea.io/gitea/modules/modelappservice"

userlogin "code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/highlight"
code_indexer "code.gitea.io/gitea/modules/indexer/code"
@@ -78,6 +79,8 @@ func NewServices() {
log.Info("labelmsg.Init() succeed.")
modelappservice.Init()
log.Info("modelappservice.Init() succeed.")
userlogin.UserActionChannelInit()
log.Info("UserActionChannelInit succeed.")
InitESClient()
log.Info("ES Client succeed.")
}


+ 3
- 0
routers/repo/ai_model_convert.go View File

@@ -1109,6 +1109,9 @@ func ModelConvertDownloadModel(ctx *context.Context) {
jobName := ctx.Query("jobName")
if job.IsGpuTrainTask() {
filePath := "jobs/" + jobName + "/model/" + parentDir
if !strings.HasSuffix(filePath, fileName) {
filePath += fileName
}
url, err := storage.Attachments.PresignedGetURL(filePath, fileName)
if err != nil {
log.Error("PresignedGetURL failed: %v", err.Error(), ctx.Data["msgID"])


+ 1
- 1
routers/repo/ai_model_manage.go View File

@@ -156,7 +156,7 @@ func asyncToCopyModel(aiTask *models.Cloudbrain, id string, modelSelectedFile st
updateStatus(id, modelSize, STATUS_FINISHED, modelPath, "")
insertModelFile(id)
}
} else if aiTask.ComputeResource == models.GPUResource || aiTask.ComputeResource == models.GCUResource {
} else if aiTask.ComputeResource == models.GPUResource || aiTask.ComputeResource == models.GCUResource || aiTask.ComputeResource == models.ILUVATAR {

modelPath, modelSize, err := downloadModelFromCloudBrainOne(aiTask.JobName, "", modelSelectedFile, destKeyNamePrefix)
if err != nil {


+ 96
- 18
routers/repo/cloudbrain.go View File

@@ -1227,6 +1227,13 @@ func CloudBrainImageEditPost(ctx *context.Context, form auth.EditImageCloudBrain
}

image.Description = form.Description
image.Framework = form.Framework
image.FrameworkVersion = form.FrameworkVersion
image.CudaVersion = form.CudaVersion
image.PythonVersion = form.PythonVersion
image.OperationSystem = form.OperationSystem
image.OperationSystemVersion = form.OperationSystemVersion
image.ThirdPackages = form.ThirdPackages

err = models.WithTx(func(ctx models.DBContext) error {
if err := models.UpdateLocalImage(image); err != nil {
@@ -1255,7 +1262,15 @@ func CloudBrainImageDelete(ctx *context.Context) {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_not_exist")))
return
}

image, err := models.GetImageByID(id)
if err != nil {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_not_exist")))
return
}
err = grampus.DeleteImage(image)
if err != nil {
log.Info("Delete remote delete failed.error=" + err.Error())
}
err = models.DeleteLocalImage(id)
if err != nil {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_delete_fail")))
@@ -1298,12 +1313,20 @@ func CloudBrainAdminCommitImage(ctx *context.Context, form auth.CommitAdminImage
ImageDescription: form.Description,
ImageTag: form.Tag,
},
IsPrivate: form.IsPrivate,
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
Type: models.GetRecommondType(form.IsRecommend),
Place: form.Place,
IsPrivate: form.IsPrivate,
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
Type: models.GetRecommondType(form.IsRecommend),
Place: form.Place,
Framework: form.Framework,
FrameworkVersion: form.FrameworkVersion,
CudaVersion: form.CudaVersion,
PythonVersion: form.PythonVersion,
OperationSystem: form.OperationSystem,
OperationSystemVersion: form.OperationSystemVersion,
ThirdPackages: form.ThirdPackages,
ComputeResource: form.ComputeResource,
}, ctx.User)
if err != nil {
log.Error("CommitImagefailed")
@@ -1347,10 +1370,18 @@ func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrain
ImageDescription: form.Description,
ImageTag: form.Tag,
},
IsPrivate: form.IsPrivate,
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
IsPrivate: form.IsPrivate,
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
Framework: form.Framework,
FrameworkVersion: form.FrameworkVersion,
CudaVersion: form.CudaVersion,
PythonVersion: form.PythonVersion,
OperationSystem: form.OperationSystem,
OperationSystemVersion: form.OperationSystemVersion,
ThirdPackages: form.ThirdPackages,
ComputeResource: form.ComputeResource,
}, ctx.User)
if err != nil {
log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"])
@@ -1737,6 +1768,26 @@ func GetImages(ctx *context.Context, opts *models.SearchImageOptions) {
}

opts.SearchOrderBy = orderBy
opts.Framework = ctx.Query("framework")
opts.FrameworkVersion = ctx.Query("frameworkVersion")
opts.CudaVersion = ctx.Query("cuda")
opts.PythonVersion = ctx.Query("python")
opts.OperationSystem = ctx.Query("os")
opts.OperationSystemVersion = ctx.Query("osVersion")
opts.ThirdPackages = ctx.Query("thirdParty")

if ctx.QueryInt64("spec") > 0 {
spec, err := models.FindSpecs(models.FindSpecsOptions{
SpecId: ctx.QueryInt64("spec"),
})
if err == nil {
opts.AiCenterId = spec[0].AiCenterCode
}
}
computeResource := ctx.Query("computeResource")
if computeResource != "" {
opts.ComputeResource = computeResource
}

imageList, total, err := models.SearchImage(opts)
if err != nil {
@@ -1746,6 +1797,11 @@ func GetImages(ctx *context.Context, opts *models.SearchImageOptions) {
Images: []*models.Image{},
})
} else {
for _, tmp := range imageList {
if tmp.Place == "" {
tmp.Place = getImageUrl(tmp, opts.AiCenterId)
}
}
ctx.JSON(http.StatusOK, models.ImagesPageResult{
Count: total,
Images: imageList,
@@ -1753,6 +1809,21 @@ func GetImages(ctx *context.Context, opts *models.SearchImageOptions) {
}
}

func getImageUrl(image *models.Image, aiCenterID string) string {
if image.AiCenterImages != nil {
for _, tmp := range image.AiCenterImages {
if aiCenterID == "" {
return tmp.ImageUrl
}
if tmp.AiCenterId == aiCenterID {
log.Info("tmp.url=" + tmp.ImageUrl)
return tmp.ImageUrl
}
}
}
return ""
}

func getUID(ctx *context.Context) int64 {
var uid int64 = -1
if ctx.IsSigned {
@@ -1764,13 +1835,20 @@ func getUID(ctx *context.Context) int64 {
func GetAllImages(ctx *context.Context) {
uid := getUID(ctx)
opts := models.SearchImageOptions{
UID: uid,
Keyword: ctx.Query("q"),
ApplyStatus: ctx.QueryInt("apply"),
Topics: ctx.Query("topic"),
IncludeOfficialOnly: ctx.QueryBool("recommend"),
CloudbrainType: ctx.QueryInt("cloudbrainType"),
Status: -1,
UID: uid,
Keyword: ctx.Query("q"),
ApplyStatus: ctx.QueryInt("apply"),
Topics: ctx.Query("topic"),
IncludeOfficialOnly: ctx.QueryBool("recommend"),
CloudbrainType: ctx.QueryInt("cloudbrainType"),
Status: -1,
Framework: ctx.Query("framework"),
FrameworkVersion: ctx.Query("frameworkVesion"),
CudaVersion: ctx.Query("cuda"),
PythonVersion: ctx.Query("python"),
OperationSystem: ctx.Query("os"),
OperationSystemVersion: ctx.Query("osVersion"),
ThirdPackages: ctx.Query("thirdParty"),
}

if ctx.Query("private") != "" {


+ 1
- 1
routers/repo/dataset.go View File

@@ -683,7 +683,7 @@ func asyncToExportDataset(dataset *models.Dataset, storeType int, modelSelectedF
id := uuid.String()
fileName := getFileName(shortFile)
log.Info("shortSrcFile=" + shortFile + " fileName=" + fileName)
if aiTask.ComputeResource == models.GPUResource || aiTask.ComputeResource == models.GCUResource {
if aiTask.ComputeResource == models.GPUResource || aiTask.ComputeResource == models.GCUResource || aiTask.ComputeResource == models.ILUVATAR {
size := getFileSizeFromMinio(aiTask.JobName, shortFile)
if isExistInAttachment(fileName, size, dataset, storeType) {
msgMap[shortFile] = -2


+ 38
- 51
routers/repo/grampus.go View File

@@ -31,6 +31,7 @@ import (
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/grampus"
"code.gitea.io/gitea/modules/modelarts"

// "code.gitea.io/gitea/modules/notification"
// "code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
@@ -66,86 +67,57 @@ const (
tplGrampusNotebookMLUNew base.TplName = "repo/grampus/notebook/mlu/new"
tplGrampusTrainJobMLUNew base.TplName = "repo/grampus/trainjob/mlu/new"

//IluvatarGPGPU
tplGrampusTrainJobIluvatarGPGPUNew base.TplName = "repo/grampus/trainjob/iluvatar-gpgpu/new"

//C2NET notebook
tplGrampusNotebookNew base.TplName = "repo/grampus/notebook/new"
tplGrampusNotebookNew base.TplName = "repo/grampus/notebook/new"

// Inference job
tplGrampusInferenceNew base.TplName = "repo/grampus/inference/new"
tplGrampusInferenceShow base.TplName = "repo/grampus/inference/show"
tplGrampusInferenceNew base.TplName = "repo/grampus/inference/new"
tplGrampusInferenceShow base.TplName = "repo/grampus/inference/show"
)

func GrampusInferenceNew(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true
ctx.HTML(http.StatusOK, tplGrampusInferenceNew)
ctx.HTML(http.StatusOK, tplGrampusInferenceNew)
}

func GrampusInferenceShow(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true
ctx.HTML(http.StatusOK, tplGrampusInferenceShow)
ctx.HTML(http.StatusOK, tplGrampusInferenceShow)
}

func GrampusNotebookNew(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true
ctx.HTML(http.StatusOK, tplGrampusNotebookNew)
// ctx.Data["IsCreate"] = true
// ctx.Data["PageIsCloudBrain"] = true
// notebookType := ctx.QueryInt("type")
// processType := grampus.ProcessorTypeGPU
// if notebookType == 1 {
// processType = grampus.ProcessorTypeNPU
// } else if notebookType == 2 {
// processType = grampus.ProcessorTypeGCU
// } else if notebookType == 3 {
// processType = grampus.ProcessorTypeMLU
// ctx.HTML(http.StatusOK, tplGrampusNotebookMLUNew)
// return
// }
// err := grampusNotebookNewDataPrepare(ctx, processType)
// if err != nil {
// ctx.ServerError("get new notebook-job info failed", err)
// return
// }
// if processType == grampus.ProcessorTypeGPU {
// ctx.HTML(http.StatusOK, tplGrampusNotebookGPUNew)
// } else if processType == grampus.ProcessorTypeNPU {
// ctx.HTML(http.StatusOK, tplGrampusNotebookNPUNew)
// } else if processType == grampus.ProcessorTypeGCU {
// ctx.HTML(http.StatusOK, tplGrampusNotebookGCUNew)
// }
}

func GrampusTrainJobGPUNew(ctx *context.Context) {
ctx.Data["IsCreate"] = true
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
if err != nil {
ctx.ServerError("get new train-job info failed", err)
return
}
ctx.Data["PageIsCloudBrain"] = true

ctx.HTML(http.StatusOK, tplGrampusTrainJobGPUNew)
}

func GrampusTrainJobNPUNew(ctx *context.Context) {
ctx.Data["IsCreate"] = true
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
if err != nil {
ctx.ServerError("get new train-job info failed", err)
return
}
ctx.Data["PageIsCloudBrain"] = true
ctx.HTML(200, tplGrampusTrainJobNPUNew)
}

func GrampusTrainJobGCUNew(ctx *context.Context) {
ctx.Data["IsCreate"] = true
err := grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGCU)
if err != nil {
ctx.ServerError("get new train-job info failed", err)
return
}

ctx.Data["PageIsCloudBrain"] = true
ctx.HTML(http.StatusOK, tplGrampusTrainJobGCUNew)
}

func GrampusTrainJobIluvatarGPGPUNew(ctx *context.Context) {
ctx.Data["IsCreate"] = true
ctx.Data["PageIsCloudBrain"] = true
ctx.HTML(http.StatusOK, tplGrampusTrainJobIluvatarGPGPUNew)
}

func GrampusNotebookCreate(ctx *context.Context, form auth.CreateGrampusNotebookForm) {
ctx.Data["IsCreate"] = true
displayJobName := form.DisplayJobName
@@ -1990,13 +1962,20 @@ func GrampusNotebookRestart(ctx *context.Context) {
}
cloudbrainTask.GrampusNotebookRestart(ctx)
}

func getImageComputeResource(processType string) string {
tail := strings.LastIndex(processType, "/")
if tail > 0 {
return strings.ToUpper(processType[tail+1:])
}
return strings.ToUpper(processType)
}
func GrampusCommitImageShow(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true
ctx.Data["Type"] = ctx.Cloudbrain.Type
if ctx.Cloudbrain.ComputeResource == models.NPUResource {
ctx.Error(http.StatusBadRequest, "unsupported compute resource type")
}
ctx.Data["ComputeResource"] = getImageComputeResource(ctx.Cloudbrain.ComputeResource)
ctx.HTML(200, tplGrampusNotebookGPUImageShow)
}

@@ -2018,16 +1997,24 @@ func GrampusCommitImage(ctx *context.Context, form auth.CommitImageGrampusForm)
return
}

err := grampus.CommitImage(ctx.Cloudbrain.JobID, models.CommitGrampusImageParams{
err := grampus.CommitImage(ctx.Cloudbrain.JobID, ctx.Cloudbrain.ComputeResource, ctx.Cloudbrain.GetAiCenter(), models.CommitGrampusImageParams{
CommitImageGrampusParams: models.CommitImageGrampusParams{
ImageName: setting.Grampus.GPUImageCommonName,
Description: form.Description,
ImageVersion: form.Tag,
TaskName: "task0",
}, IsPrivate: form.IsPrivate,
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
CloudBrainType: form.Type,
Topics: validTopics,
UID: ctx.User.ID,
Framework: form.Framework,
FrameworkVersion: form.FrameworkVersion,
PythonVersion: form.PythonVersion,
CudaVersion: form.CudaVersion,
OperationSystem: form.OperationSystem,
OperationSystemVersion: form.OperationSystemVersion,
ThirdPackages: form.ThirdPackages,
ComputeResource: form.ComputeResource,
}, ctx.User)

if err != nil {


+ 222
- 174
routers/repo/repo_statistic.go View File

@@ -2,6 +2,7 @@ package repo

import (
"errors"
"sync"
"time"

"code.gitea.io/gitea/models"
@@ -12,8 +13,23 @@ import (
"code.gitea.io/gitea/services/mailer"

"gitea.com/macaron/macaron"
"github.com/Jeffail/tunny"
)

var statisticMutex sync.Mutex
var statisticWg sync.WaitGroup

type StatisticInput struct {
Date string
Repo *models.Repository
T time.Time
ErrorProjects []string
ReposRadar []*models.RepoStatistic
IsInitMinMaxRadar bool
MinRepoRadar models.RepoStatistic
MaxRepoRadar models.RepoStatistic
}

func StatisticAuto() {
go RepoStatisticAuto()
go TimingCountData()
@@ -52,136 +68,196 @@ func RepoStatisticDaily(date string) {
isInitMinMaxRadar := false

var error_projects = make([]string, 0)
for _, repo := range repos {
projectName := getDistinctProjectName(repo)
log.Info("start statistic: %s", projectName)
var numDevMonths, numWikiViews, numContributor, numKeyContributor, numCommitsGrowth, numCommitLinesGrowth, numContributorsGrowth, numCommits int64
repoGitStat, err := models.GetRepoKPIStats(repo)
if err != nil {
log.Error("GetRepoKPIStats failed: %s", projectName)
} else {
numDevMonths = repoGitStat.DevelopAge
numKeyContributor = repoGitStat.KeyContributors
numWikiViews = repoGitStat.WikiPages
numContributor = repoGitStat.Contributors
numCommitsGrowth = repoGitStat.CommitsAdded
numCommitLinesGrowth = repoGitStat.CommitLinesModified
numContributorsGrowth = repoGitStat.ContributorsAdded
numCommits = repoGitStat.Commits

pool := tunny.NewFunc(4, statisticOneRepo)
defer pool.Close()
statisticWg.Add(len(repos))
for _, repo := range repos {
input := &StatisticInput{
Date: date,
Repo: repo,
T: t,
ErrorProjects: error_projects,
ReposRadar: reposRadar,
IsInitMinMaxRadar: isInitMinMaxRadar,
MinRepoRadar: minRepoRadar,
MaxRepoRadar: maxRepoRadar,
}
pool.Process(input)
}
statisticWg.Wait()

var issueFixedRate float32
if repo.NumIssues != 0 {
issueFixedRate = float32(repo.NumClosedIssues) / float32(repo.NumIssues)
if len(error_projects) > 0 {
mailer.SendWarnNotifyMail(setting.Warn_Notify_Mails, warnEmailMessage)
}

//radar map
log.Info("begin statistic radar")
for _, radarInit := range reposRadar {
if radarInit.IsMirror && setting.RadarMap.IgnoreMirrorRepo {
radarInit.Impact = 0
radarInit.Completeness = 0
radarInit.Liveness = 0
radarInit.ProjectHealth = 0
radarInit.TeamHealth = 0
radarInit.Growth = 0
radarInit.RadarTotal = 0
} else {
issueFixedRate = float32(setting.RadarMap.ProjectHealth0IssueCloseRatio)
radarInit.Impact = normalization.Normalization(radarInit.Impact, minRepoRadar.Impact, maxRepoRadar.Impact)
radarInit.Completeness = normalization.Normalization(radarInit.Completeness, minRepoRadar.Completeness, maxRepoRadar.Completeness)
radarInit.Liveness = normalization.Normalization(radarInit.Liveness, minRepoRadar.Liveness, maxRepoRadar.Liveness)
radarInit.ProjectHealth = normalization.Normalization(radarInit.ProjectHealth, minRepoRadar.ProjectHealth, maxRepoRadar.ProjectHealth)
radarInit.TeamHealth = normalization.Normalization(radarInit.TeamHealth, minRepoRadar.TeamHealth, maxRepoRadar.TeamHealth)
radarInit.Growth = normalization.Normalization(radarInit.Growth, minRepoRadar.Growth, maxRepoRadar.Growth)
radarInit.RadarTotal = normalization.GetRadarValue(radarInit.Impact, radarInit.Completeness, radarInit.Liveness, radarInit.ProjectHealth, radarInit.TeamHealth, radarInit.Growth)
}
models.UpdateRepoStat(radarInit)
}

var numVersions int64
numVersions, err = models.GetReleaseCountByRepoID(repo.ID, models.FindReleasesOptions{})
if err != nil {
log.Error("GetReleaseCountByRepoID failed(%s): %v", projectName, err)
}
log.Info("finish statistic: radar")

var datasetSize int64
datasetSize, err = getDatasetSize(repo)
if err != nil {
log.Error("getDatasetSize failed(%s): %v", projectName, err)
}
}

var numComments int64
numComments, err = models.GetCommentCountByRepoID(repo.ID)
if err != nil {
log.Error("GetCommentCountByRepoID failed(%s): %v", projectName, err)
}
func statisticOneRepo(input interface{}) interface{} {
defer statisticWg.Done()

beginTime, endTime := getStatTime(date)
var numVisits int
numVisits, err = repository.AppointProjectView(repo.OwnerName, repo.Name, beginTime, endTime)
if err != nil {
log.Error("AppointProjectView failed(%s): %v", projectName, err)
}
statisticInput := input.(*StatisticInput)
repo := statisticInput.Repo
projectName := getDistinctProjectName(repo)
log.Info("start statistic: %s", projectName)
var numDevMonths, numWikiViews, numContributor, numKeyContributor, numCommitsGrowth, numCommitLinesGrowth, numContributorsGrowth, numCommits int64
repoGitStat, err := models.GetRepoKPIStats(repo)
if err != nil {
log.Error("GetRepoKPIStats failed: %s", projectName)
} else {
numDevMonths = repoGitStat.DevelopAge
numKeyContributor = repoGitStat.KeyContributors
numWikiViews = repoGitStat.WikiPages
numContributor = repoGitStat.Contributors
numCommitsGrowth = repoGitStat.CommitsAdded
numCommitLinesGrowth = repoGitStat.CommitLinesModified
numContributorsGrowth = repoGitStat.ContributorsAdded
numCommits = repoGitStat.Commits

repoStat := models.RepoStatistic{
RepoID: repo.ID,
Date: date,
Name: repo.Name,
Alias: repo.Alias,
IsPrivate: repo.IsPrivate,
IsMirror: repo.IsMirror,
IsFork: repo.IsFork,
RepoCreatedUnix: repo.CreatedUnix,
OwnerName: repo.OwnerName,
NumWatches: int64(repo.NumWatches),
NumStars: int64(repo.NumStars),
NumForks: int64(repo.NumForks),
NumDownloads: repo.CloneCnt,
NumComments: numComments,
NumVisits: int64(numVisits),
NumClosedIssues: int64(repo.NumClosedIssues),
NumVersions: numVersions,
NumDevMonths: numDevMonths,
RepoSize: repo.Size,
DatasetSize: datasetSize,
NumModels: repo.ModelCnt,
NumWikiViews: numWikiViews,
NumCommits: numCommits,
NumIssues: int64(repo.NumIssues),
NumPulls: int64(repo.NumPulls),
IssueFixedRate: issueFixedRate,
NumContributor: numContributor,
NumKeyContributor: numKeyContributor,
NumCommitsGrowth: numCommitsGrowth,
NumCommitLinesGrowth: numCommitLinesGrowth,
NumContributorsGrowth: numContributorsGrowth,
NumCloudbrain: repo.AiTaskCnt,
NumDatasetFile: repo.DatasetCnt,
NumModelConvert: models.QueryModelConvertCountByRepoID(repo.ID),
}
}

dayBeforeDate := t.AddDate(0, 0, -1).Format("2006-01-02")
repoStatisticsBefore, err := models.GetRepoStatisticByDate(dayBeforeDate, repo.ID)
var issueFixedRate float32
if repo.NumIssues != 0 {
issueFixedRate = float32(repo.NumClosedIssues) / float32(repo.NumIssues)
} else {
issueFixedRate = float32(setting.RadarMap.ProjectHealth0IssueCloseRatio)
}

if err != nil {
log.Error("get data of day before the date failed ", err)
} else {
if len(repoStatisticsBefore) > 0 {
repoStatisticBefore := repoStatisticsBefore[0]
repoStat.NumWatchesAdded = repoStat.NumWatches - repoStatisticBefore.NumWatches
repoStat.NumStarsAdded = repoStat.NumStars - repoStatisticBefore.NumStars
repoStat.NumForksAdded = repoStat.NumForks - repoStatisticBefore.NumForks
repoStat.NumDownloadsAdded = repoStat.NumDownloads - repoStatisticBefore.NumDownloads
repoStat.NumCommentsAdded = repoStat.NumComments - repoStatisticBefore.NumComments
repoStat.NumClosedIssuesAdded = repoStat.NumClosedIssues - repoStatisticBefore.NumClosedIssues
repoStat.NumCommitsAdded = repoStat.NumCommits - repoStatisticBefore.NumCommits
repoStat.NumIssuesAdded = repoStat.NumIssues - repoStatisticBefore.NumIssues
repoStat.NumPullsAdded = repoStat.NumPulls - repoStatisticBefore.NumPulls
repoStat.NumContributorAdded = repoStat.NumContributor - repoStatisticBefore.NumContributor
repoStat.NumModelsAdded = repoStat.NumModels - repoStatisticBefore.NumModels
repoStat.NumCloudbrainAdded = repoStat.NumCloudbrain - repoStatisticBefore.NumCloudbrain
repoStat.NumModelConvertAdded = repoStat.NumModelConvert - repoStatisticBefore.NumModelConvert
repoStat.NumDatasetFileAdded = repoStat.NumDatasetFile - repoStatisticBefore.NumDatasetFile
}
}
day4MonthsAgo := t.AddDate(0, -4, 0)
repoStatisticFourMonthsAgo, err := models.GetOneRepoStatisticBeforeTime(day4MonthsAgo)
if err != nil {
log.Error("Get data of 4 moth ago failed.", err)
} else {
repoStat.NumCommentsGrowth = repoStat.NumComments - repoStatisticFourMonthsAgo.NumComments
repoStat.NumIssuesGrowth = repoStat.NumIssues - repoStatisticFourMonthsAgo.NumIssues
}
var numVersions int64
numVersions, err = models.GetReleaseCountByRepoID(repo.ID, models.FindReleasesOptions{})
if err != nil {
log.Error("GetReleaseCountByRepoID failed(%s): %v", projectName, err)
}

var datasetSize int64
datasetSize, err = getDatasetSize(repo)
if err != nil {
log.Error("getDatasetSize failed(%s): %v", projectName, err)
}

models.SyncStatDataToRepo(repo)
var numComments int64
numComments, err = models.GetCommentCountByRepoID(repo.ID)
if err != nil {
log.Error("GetCommentCountByRepoID failed(%s): %v", projectName, err)
}

if _, err = models.InsertRepoStat(&repoStat); err != nil {
log.Error("InsertRepoStat failed(%s): %v", projectName, err)
log.Error("failed statistic: %s", projectName)
error_projects = append(error_projects, projectName)
beginTime, endTime := getStatTime(statisticInput.Date)
var numVisits int
numVisits, err = repository.AppointProjectView(repo.OwnerName, repo.Name, beginTime, endTime)
if err != nil {
log.Error("AppointProjectView failed(%s): %v", projectName, err)
}

continue
repoStat := models.RepoStatistic{
RepoID: repo.ID,
Date: statisticInput.Date,
Name: repo.Name,
Alias: repo.Alias,
IsPrivate: repo.IsPrivate,
IsMirror: repo.IsMirror,
IsFork: repo.IsFork,
RepoCreatedUnix: repo.CreatedUnix,
OwnerName: repo.OwnerName,
NumWatches: int64(repo.NumWatches),
NumStars: int64(repo.NumStars),
NumForks: int64(repo.NumForks),
NumDownloads: repo.CloneCnt,
NumComments: numComments,
NumVisits: int64(numVisits),
NumClosedIssues: int64(repo.NumClosedIssues),
NumVersions: numVersions,
NumDevMonths: numDevMonths,
RepoSize: repo.Size,
DatasetSize: datasetSize,
NumModels: repo.ModelCnt,
NumWikiViews: numWikiViews,
NumCommits: numCommits,
NumIssues: int64(repo.NumIssues),
NumPulls: int64(repo.NumPulls),
IssueFixedRate: issueFixedRate,
NumContributor: numContributor,
NumKeyContributor: numKeyContributor,
NumCommitsGrowth: numCommitsGrowth,
NumCommitLinesGrowth: numCommitLinesGrowth,
NumContributorsGrowth: numContributorsGrowth,
NumCloudbrain: repo.AiTaskCnt,
NumDatasetFile: repo.DatasetCnt,
NumModelConvert: models.QueryModelConvertCountByRepoID(repo.ID),
}

dayBeforeDate := statisticInput.T.AddDate(0, 0, -1).Format("2006-01-02")
repoStatisticsBefore, err := models.GetRepoStatisticByDate(dayBeforeDate, repo.ID)

if err != nil {
log.Error("get data of day before the date failed ", err)
} else {
if len(repoStatisticsBefore) > 0 {
repoStatisticBefore := repoStatisticsBefore[0]
repoStat.NumWatchesAdded = repoStat.NumWatches - repoStatisticBefore.NumWatches
repoStat.NumStarsAdded = repoStat.NumStars - repoStatisticBefore.NumStars
repoStat.NumForksAdded = repoStat.NumForks - repoStatisticBefore.NumForks
repoStat.NumDownloadsAdded = repoStat.NumDownloads - repoStatisticBefore.NumDownloads
repoStat.NumCommentsAdded = repoStat.NumComments - repoStatisticBefore.NumComments
repoStat.NumClosedIssuesAdded = repoStat.NumClosedIssues - repoStatisticBefore.NumClosedIssues
repoStat.NumCommitsAdded = repoStat.NumCommits - repoStatisticBefore.NumCommits
repoStat.NumIssuesAdded = repoStat.NumIssues - repoStatisticBefore.NumIssues
repoStat.NumPullsAdded = repoStat.NumPulls - repoStatisticBefore.NumPulls
repoStat.NumContributorAdded = repoStat.NumContributor - repoStatisticBefore.NumContributor
repoStat.NumModelsAdded = repoStat.NumModels - repoStatisticBefore.NumModels
repoStat.NumCloudbrainAdded = repoStat.NumCloudbrain - repoStatisticBefore.NumCloudbrain
repoStat.NumModelConvertAdded = repoStat.NumModelConvert - repoStatisticBefore.NumModelConvert
repoStat.NumDatasetFileAdded = repoStat.NumDatasetFile - repoStatisticBefore.NumDatasetFile
}
}
day4MonthsAgo := statisticInput.T.AddDate(0, -4, 0)
repoStatisticFourMonthsAgo, err := models.GetOneRepoStatisticBeforeTime(day4MonthsAgo)
if err != nil {
log.Error("Get data of 4 moth ago failed.", err)
} else {
repoStat.NumCommentsGrowth = repoStat.NumComments - repoStatisticFourMonthsAgo.NumComments
repoStat.NumIssuesGrowth = repoStat.NumIssues - repoStatisticFourMonthsAgo.NumIssues
}

models.SyncStatDataToRepo(repo)

if _, err = models.InsertRepoStat(&repoStat); err != nil {
log.Error("InsertRepoStat failed(%s): %v", projectName, err)
log.Error("failed statistic: %s", projectName)
statisticMutex.Lock()
{
statisticInput.ErrorProjects = append(statisticInput.ErrorProjects, projectName)
}
statisticMutex.Unlock()
return nil
}

statisticMutex.Lock()
{

tempRepoStat := models.RepoStatistic{
RepoID: repoStat.RepoID,
@@ -195,102 +271,74 @@ func RepoStatisticDaily(date string) {
Growth: normalization.GetRepoGrowthInitValue(repoStat.NumCommitLinesGrowth, repoStat.NumIssuesGrowth, repoStat.NumCommitsGrowth, repoStat.NumContributorsGrowth, repoStat.NumCommentsGrowth),
}

reposRadar = append(reposRadar, &tempRepoStat)
statisticInput.ReposRadar = append(statisticInput.ReposRadar, &tempRepoStat)

if !isInitMinMaxRadar {
if !statisticInput.IsInitMinMaxRadar {

if !setting.RadarMap.IgnoreMirrorRepo || (setting.RadarMap.IgnoreMirrorRepo && !tempRepoStat.IsMirror) {
minRepoRadar = tempRepoStat
maxRepoRadar = tempRepoStat
isInitMinMaxRadar = true
statisticInput.MinRepoRadar = tempRepoStat
statisticInput.MaxRepoRadar = tempRepoStat
statisticInput.IsInitMinMaxRadar = true
}

} else {
if !setting.RadarMap.IgnoreMirrorRepo || (setting.RadarMap.IgnoreMirrorRepo && !tempRepoStat.IsMirror) {
if tempRepoStat.Impact < minRepoRadar.Impact {
minRepoRadar.Impact = tempRepoStat.Impact
if tempRepoStat.Impact < statisticInput.MinRepoRadar.Impact {
statisticInput.MinRepoRadar.Impact = tempRepoStat.Impact
}

if tempRepoStat.Impact > maxRepoRadar.Impact {
maxRepoRadar.Impact = tempRepoStat.Impact
if tempRepoStat.Impact > statisticInput.MaxRepoRadar.Impact {
statisticInput.MaxRepoRadar.Impact = tempRepoStat.Impact
}

if tempRepoStat.Completeness < minRepoRadar.Completeness {
minRepoRadar.Completeness = tempRepoStat.Completeness
if tempRepoStat.Completeness < statisticInput.MinRepoRadar.Completeness {
statisticInput.MinRepoRadar.Completeness = tempRepoStat.Completeness
}

if tempRepoStat.Completeness > maxRepoRadar.Completeness {
maxRepoRadar.Completeness = tempRepoStat.Completeness
if tempRepoStat.Completeness > statisticInput.MaxRepoRadar.Completeness {
statisticInput.MaxRepoRadar.Completeness = tempRepoStat.Completeness
}

if tempRepoStat.Liveness < minRepoRadar.Liveness {
minRepoRadar.Liveness = tempRepoStat.Liveness
if tempRepoStat.Liveness < statisticInput.MinRepoRadar.Liveness {
statisticInput.MinRepoRadar.Liveness = tempRepoStat.Liveness
}

if tempRepoStat.Liveness > maxRepoRadar.Liveness {
maxRepoRadar.Liveness = tempRepoStat.Liveness
if tempRepoStat.Liveness > statisticInput.MaxRepoRadar.Liveness {
statisticInput.MaxRepoRadar.Liveness = tempRepoStat.Liveness
}

if tempRepoStat.ProjectHealth < minRepoRadar.ProjectHealth {
minRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
if tempRepoStat.ProjectHealth < statisticInput.MinRepoRadar.ProjectHealth {
statisticInput.MinRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
}

if tempRepoStat.ProjectHealth > maxRepoRadar.ProjectHealth {
maxRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
if tempRepoStat.ProjectHealth > statisticInput.MaxRepoRadar.ProjectHealth {
statisticInput.MaxRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
}

if tempRepoStat.TeamHealth < minRepoRadar.TeamHealth {
minRepoRadar.TeamHealth = tempRepoStat.TeamHealth
if tempRepoStat.TeamHealth < statisticInput.MinRepoRadar.TeamHealth {
statisticInput.MinRepoRadar.TeamHealth = tempRepoStat.TeamHealth
}

if tempRepoStat.TeamHealth > maxRepoRadar.TeamHealth {
maxRepoRadar.TeamHealth = tempRepoStat.TeamHealth
if tempRepoStat.TeamHealth > statisticInput.MaxRepoRadar.TeamHealth {
statisticInput.MaxRepoRadar.TeamHealth = tempRepoStat.TeamHealth
}

if tempRepoStat.Growth < minRepoRadar.Growth {
minRepoRadar.Growth = tempRepoStat.Growth
if tempRepoStat.Growth < statisticInput.MinRepoRadar.Growth {
statisticInput.MinRepoRadar.Growth = tempRepoStat.Growth
}

if tempRepoStat.Growth > maxRepoRadar.Growth {
maxRepoRadar.Growth = tempRepoStat.Growth
if tempRepoStat.Growth > statisticInput.MaxRepoRadar.Growth {
statisticInput.MaxRepoRadar.Growth = tempRepoStat.Growth
}

}

}

log.Info("finish statistic: %s", getDistinctProjectName(repo))
}
statisticMutex.Unlock()

if len(error_projects) > 0 {
mailer.SendWarnNotifyMail(setting.Warn_Notify_Mails, warnEmailMessage)
}

//radar map
log.Info("begin statistic radar")
for _, radarInit := range reposRadar {
if radarInit.IsMirror && setting.RadarMap.IgnoreMirrorRepo {
radarInit.Impact = 0
radarInit.Completeness = 0
radarInit.Liveness = 0
radarInit.ProjectHealth = 0
radarInit.TeamHealth = 0
radarInit.Growth = 0
radarInit.RadarTotal = 0
} else {
radarInit.Impact = normalization.Normalization(radarInit.Impact, minRepoRadar.Impact, maxRepoRadar.Impact)
radarInit.Completeness = normalization.Normalization(radarInit.Completeness, minRepoRadar.Completeness, maxRepoRadar.Completeness)
radarInit.Liveness = normalization.Normalization(radarInit.Liveness, minRepoRadar.Liveness, maxRepoRadar.Liveness)
radarInit.ProjectHealth = normalization.Normalization(radarInit.ProjectHealth, minRepoRadar.ProjectHealth, maxRepoRadar.ProjectHealth)
radarInit.TeamHealth = normalization.Normalization(radarInit.TeamHealth, minRepoRadar.TeamHealth, maxRepoRadar.TeamHealth)
radarInit.Growth = normalization.Normalization(radarInit.Growth, minRepoRadar.Growth, maxRepoRadar.Growth)
radarInit.RadarTotal = normalization.GetRadarValue(radarInit.Impact, radarInit.Completeness, radarInit.Liveness, radarInit.ProjectHealth, radarInit.TeamHealth, radarInit.Growth)
}
models.UpdateRepoStat(radarInit)
}

log.Info("finish statistic: radar")

log.Info("finish statistic: %s", getDistinctProjectName(repo))
return nil
}

func getDistinctProjectName(repo *models.Repository) string {


+ 22
- 0
routers/repo/user_data_analysis.go View File

@@ -24,6 +24,11 @@ const (
USER_YEAR = 2023
)

type ProjectsPeriodData struct {
RecordBeginTime string `json:"recordBeginTime"`
LastUpdatedTime string `json:"lastUpdatedTime"`
}

func getUserMetricsExcelHeader(ctx *context.Context) map[string]string {
excelHeader := make([]string, 0)
excelHeader = append(excelHeader, ctx.Tr("user.metrics.date"))
@@ -749,6 +754,7 @@ func TimingCountDataByDate(date string) {

func TimingCountData() {
log.Info("start to time count data")
context.UserActionMapClear()
currentTimeNow := time.Now()
log.Info("current time:" + currentTimeNow.Format("2006-01-02 15:04:05"))
startTime := currentTimeNow.AddDate(0, 0, -1).Format("2006-01-02")
@@ -951,3 +957,19 @@ func QueryUserAnnualReport(ctx *context.Context) {
result := models.QueryUserAnnualReport(ctx.User.ID)
ctx.JSON(http.StatusOK, result)
}

func getRecordBeginTime() (time.Time, error) {
return time.ParseInLocation("2006-01-02", setting.RadarMap.RecordBeginTime, time.Local)
}

func QueryUserCountTimeInfo(ctx *context.Context) {
recordBeginTime, err := getRecordBeginTime()
if err != nil {
recordBeginTime = time.Now()
}
projectsPeriodData := ProjectsPeriodData{
RecordBeginTime: recordBeginTime.Format("2006-01-02"),
LastUpdatedTime: models.GetLastModifyTime(),
}
ctx.JSON(http.StatusOK, projectsPeriodData)
}

+ 35
- 0
routers/resources/acc_card.go View File

@@ -0,0 +1,35 @@
package resources

import (
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/cloudbrain/resource"
"net/http"
)

func GetAccCardList(ctx *context.Context) {
list, err := resource.GetAccCardList()
if err != nil {
log.Error("GetAccCardList error.%v", err)
ctx.JSON(http.StatusOK, response.OuterResponseError(err))
return
}

m := map[string]interface{}{"list": list}
ctx.JSON(http.StatusOK, response.OuterSuccessWithData(m))
}

func GetAvailableAICenterList(ctx *context.Context) {
list, err := resource.GetAvailableAICenter()
if err != nil {
log.Error("GetAvailableAICenterList error.%v", err)
ctx.JSON(http.StatusOK, response.OuterResponseError(err))
return
}
for i := 0; i < len(list); i++ {
list[i].Tr(ctx.Language())
}
m := map[string]interface{}{"list": list}
ctx.JSON(http.StatusOK, response.OuterSuccessWithData(m))
}

+ 17
- 3
routers/routes/routes.go View File

@@ -196,6 +196,12 @@ func NewMacaron() *macaron.Macaron {
ExpiresAfter: setting.StaticCacheTime,
},
))
m.Use(public.CustomJson(
&public.Options{
SkipLogging: setting.DisableRouterLog,
ExpiresAfter: 0,
},
))
m.Use(public.StaticHandler(
setting.AvatarUploadPath,
&public.Options{
@@ -363,7 +369,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/all/search/", routers.Search)
m.Get("/all/search/", routers.EmptySearch)
m.Get("/all/dosearch/", routers.SearchApi)
m.Post("/user/login/kanban", user.SignInPostAPI)
m.Post("/user/login/kanban", user.SignInPostAPI, reqSignOut)
m.Get("/home/term", routers.HomeTerm)
m.Get("/home/annual_privacy", routers.HomeAnnual)
m.Get("/home/model_privacy", routers.HomeWenxin)
@@ -445,7 +451,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/card_request", func() {
m.Get("/creation/required", card_request.GetCreationInfo)
m.Get("/list", card_request.GetCardRequestList)
m.Get("/resource/list", card_request.GetResourceList)
}, ignSignIn)

m.Group("/card_request", func() {
@@ -453,6 +459,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/my_list", card_request.GetMyCardRequestList)
m.Get("/admin_list", operationReq, card_request.GetAdminCardRequestList)
m.Get("/specification/list", operationReq, admin.GetAllResourceSpecificationList)
m.Get("/resources/queue/centers", operationReq, admin.GetResourceAiCenters)
m.Get("/resources/queue/codes", operationReq, admin.GetResourceQueueCodes)
m.Put("/update/:id", binding.Bind(structs.CardReq{}), card_request.UpdateCardRequest)
m.Put("/admin/update/:id", operationReq, bindIgnErr(structs.CardReq{}), card_request.UpdateCardRequestAndSpec)

@@ -536,7 +544,6 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/unbind", authentication.UnbindWechat)
m.Get("/bind", authentication.GetBindPage)
}, reqSignIn)

m.Group("/user/settings", func() {
m.Get("", userSetting.Profile)
m.Post("", bindIgnErr(auth.UpdateProfileForm{}), userSetting.ProfilePost)
@@ -579,6 +586,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/delete", userSetting.DeleteOAuth2Application)
m.Post("/revoke", userSetting.RevokeOAuth2Grant)
})

m.Combo("/applications").Get(userSetting.Applications).
Post(bindIgnErr(auth.NewAccessTokenForm{}), userSetting.ApplicationsPost)
m.Post("/applications/delete", userSetting.DeleteApplication)
@@ -740,6 +748,9 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/add", binding.Bind(models.ResourceSceneReq{}), admin.AddResourceScene)
m.Post("/update/:id", binding.BindIgnErr(models.ResourceSceneReq{}), admin.UpdateResourceScene)
})
m.Group("/image", func() {
m.Post("/sync", admin.SyncGrampusImage)
})
})
m.Group("/ai_model", func() {
m.Post("/update_version", repo.UpdateAllModelMeta)
@@ -1392,6 +1403,9 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.GrampusTrainJobGCUNew)
m.Post("/create", reqWechatBind, reqRepoCloudBrainWriter, bindIgnErr(auth.CreateGrampusTrainJobForm{}), context.PointAccount(), repo.GrampusTrainJobGcuCreate)
})
m.Group("/iluvatar-gpgpu", func() {
m.Get("/create", reqWechatBind, reqRepoCloudBrainWriter, context.PointAccount(), repo.GrampusTrainJobIluvatarGPGPUNew)
})
})

m.Group("/inference-job", func() {


+ 2
- 2
routers/user/auth.go View File

@@ -306,7 +306,7 @@ func SignInPostAPI(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("sign_in")
UserName := ctx.Query("UserName")
Password := ctx.Query("Password")
log.Info("0000000")
log.Info("u=" + UserName)
orderedOAuth2Names, oauth2Providers, err := models.GetActiveOAuth2Providers()
if err != nil {
ctx.ServerError("UserSignIn", err)
@@ -327,6 +327,7 @@ func SignInPostAPI(ctx *context.Context) {
}
u, err := models.UserSignIn(UserName, Password)
if err != nil {
log.Info("login failed.UserName=" + UserName + " Password=" + Password)
ctx.ServerError("UserSignIn", err)
return
}
@@ -786,7 +787,6 @@ func handleSignInFullNotRedirect(ctx *context.Context, u *models.User, remember
}

ctx.SetCookie("lang", u.Language, nil, setting.AppSubURL, setting.SessionConfig.Domain, setting.SessionConfig.Secure, true)

// Clear whatever CSRF has right now, force to generate a new one
ctx.SetCookie(setting.CSRFCookieName, "", -1, setting.AppSubURL, setting.SessionConfig.Domain, setting.SessionConfig.Secure, true)



+ 17
- 3
services/ai_task_service/cluster/c2net.go View File

@@ -91,14 +91,14 @@ func (c C2NetClusterAdapter) GetNotebookImages(req entity.GetImageReq, centerId
return r, false, nil
}

func hasIntersection(imageCenterInfos []models.AICenterImage, centerId ...string) bool {
func hasIntersection(imageCenterInfos []models.AiCenterImage, centerId ...string) bool {
if len(centerId) == 0 || len(imageCenterInfos) == 0 {
//如果没传centerId或者查询的镜像不含可用中心信息,不进行判断,直接返回true
return true
}
for _, aicenterImage := range imageCenterInfos {
for _, centerCode := range centerId {
if aicenterImage.AICenterID == centerCode {
if aicenterImage.AiCenterId == centerCode {
return true
}
}
@@ -294,6 +294,7 @@ func getGrampusAvailableCenterIds(queues []models.ResourceQueue, imageId string,
}

processType := computeSource.FullName
log.Info("processType=" + computeSource.FullName + " jobType=" + string(jobType))
images, err := grampus.GetImages(processType, string(jobType))
if err != nil {
log.Warn("can not get image info from grampus", err)
@@ -303,11 +304,24 @@ func getGrampusAvailableCenterIds(queues []models.ResourceQueue, imageId string,
for _, image := range images.Infos {
if image.ID == imageId {
for _, centerInfo := range image.AICenterImage {
imageCenterIds = append(imageCenterIds, centerInfo.AICenterID)
imageCenterIds = append(imageCenterIds, centerInfo.AiCenterId)
}
break
}
}
images, err = grampus.GetUserImages(processType, string(jobType))
if err == nil {
for _, image := range images.Infos {
if image.ID == imageId {
for _, centerInfo := range image.AICenterImage {
imageCenterIds = append(imageCenterIds, centerInfo.AiCenterId)
}
break
}
}
} else {
log.Warn("can not get user image info from grampus", err)
}
if len(imageCenterIds) == 0 {
return []string{}, errors.New("image not available")
}


+ 3
- 1
services/ai_task_service/container_builder/code_builder.go View File

@@ -54,7 +54,9 @@ func (b *CodeBuilder) Build(ctx *context.CreationContext) ([]entity.ContainerDat
log.Error("downloadZipCode failed, server timed out: %s (%v)", repo.FullName(), err)
return nil, response.LOAD_CODE_FAILED
}

if opts.VolumeFolder && storageTypes[0] == entity.OBS {
uploader.MKDIR(remoteDir)
}
if err := uploader.UploadDir(codeLocalPath, remoteDir); err != nil {
log.Error("Failed to UploadDir: %s (%v)", repo.FullName(), err)
return nil, response.LOAD_CODE_FAILED


+ 6
- 1
services/ai_task_service/container_builder/dataset_builder.go View File

@@ -29,7 +29,12 @@ func (b *DatasetBuilder) Build(ctx *context.CreationContext) ([]entity.Container
var data []entity.ContainerData

//如果是智算GPU调试任务,需要把dataset文件夹也挂载,这样提交镜像时才不会把dataset下的文件提交到镜像中
if ctx.Request.Cluster == entity.C2Net && (ctx.Request.JobType == models.JobTypeDebug || ctx.Request.JobType == models.JobTypeTrain) && ctx.Request.ComputeSource.Name == models.GPU {
if ctx.Request.Cluster == entity.C2Net && (ctx.Request.JobType == models.JobTypeDebug || ctx.Request.JobType == models.JobTypeTrain) &&
(ctx.Request.ComputeSource.Name == models.GPU ||
ctx.Request.ComputeSource.Name == models.MLU ||
ctx.Request.ComputeSource.Name == models.GCU ||
ctx.Request.ComputeSource.Name == models.ILUVATAR ||
ctx.Request.ComputeSource.Name == models.METAX) {
log.Info("mount dataset directory.")
jobName := ctx.Request.JobName
storageTypes := b.Opts.AcceptStorageType


+ 20
- 20
services/ai_task_service/container_builder/pre_model_builder.go View File

@@ -30,25 +30,25 @@ func (b *PretrainModelBuilder) SetOpts(opts *entity.ContainerBuildOpts) {
func (b *PretrainModelBuilder) Build(ctx *context.CreationContext) ([]entity.ContainerData, *response.BizError) {
form := ctx.Request
var preTrainModelEntity []entity.ContainerData
if ctx.Request.Cluster == entity.C2Net && (ctx.Request.JobType == models.JobTypeDebug || ctx.Request.JobType == models.JobTypeTrain) && ctx.Request.ComputeSource.Name == models.GPU {
//挂载一个文件夹保证容器内pretrainmodel目录提交镜像时不被打包
uploader := storage_helper.SelectStorageHelperFromStorageType(entity.OBS)
objectKey := path.Join(uploader.GetJobDefaultObjectKeyPrefix(form.JobName), "pretrain_model_mount")
uploader.MKDIR(objectKey, "pretrain model folder")
preTrainModelEntity = append(preTrainModelEntity, entity.ContainerData{
Name: "pretrain_model_mount",
Bucket: uploader.GetBucket(),
EndPoint: uploader.GetEndpoint(),
ObjectKey: objectKey + "/",
ReadOnly: false,
ContainerPath: b.Opts.ContainerPath,
RealPath: uploader.GetRealPath(objectKey),
S3DownloadUrl: uploader.GetS3DownloadUrl(objectKey),
IsDir: true,
IsOverwrite: true,
IsNeedUnzip: false,
})
}
//if ctx.Request.Cluster == entity.C2Net && (ctx.Request.JobType == models.JobTypeDebug || ctx.Request.JobType == models.JobTypeTrain) && ctx.Request.ComputeSource.Name == models.GPU {
// //挂载一个文件夹保证容器内pretrainmodel目录提交镜像时不被打包
// uploader := storage_helper.SelectStorageHelperFromStorageType(entity.OBS)
// objectKey := path.Join(uploader.GetJobDefaultObjectKeyPrefix(form.JobName), "pretrain_model_mount")
// uploader.MKDIR(objectKey, "pretrain model folder")
// preTrainModelEntity = append(preTrainModelEntity, entity.ContainerData{
// Name: "pretrain_model_mount",
// Bucket: uploader.GetBucket(),
// EndPoint: uploader.GetEndpoint(),
// ObjectKey: objectKey + "/",
// ReadOnly: false,
// ContainerPath: b.Opts.ContainerPath,
// RealPath: uploader.GetRealPath(objectKey),
// S3DownloadUrl: uploader.GetS3DownloadUrl(objectKey),
// IsDir: true,
// IsOverwrite: true,
// IsNeedUnzip: false,
// })
//}

if b.Opts.Disable {
return preTrainModelEntity, nil
@@ -123,7 +123,7 @@ func (b *PretrainModelBuilder) buildModelData(m *models.AiModelManage, jobName s
Bucket: uploader.GetBucket(),
EndPoint: uploader.GetEndpoint(),
ObjectKey: preTrainModelPath,
ReadOnly: false,
ReadOnly: true,
ContainerPath: path.Join(b.Opts.ContainerPath, m.Name),
RealPath: uploader.GetRealPath(preTrainModelPath),
S3DownloadUrl: uploader.GetS3DownloadUrl(preTrainModelPath),


+ 25
- 1
services/ai_task_service/task/grampus_notebook_task.go View File

@@ -75,7 +75,7 @@ func GetGrampusNoteBookConfig(opts entity.AITaskConfigKey) *entity.AITaskBaseCon
}
}

if opts.ComputeSource == models.NPU || opts.ComputeSource == models.DCU {
if opts.ComputeSource == models.NPU {
config = &entity.AITaskBaseConfig{
ContainerSteps: map[entity.ContainerDataType]*entity.ContainerBuildOpts{
entity.ContainerCode: {
@@ -97,6 +97,29 @@ func GetGrampusNoteBookConfig(opts entity.AITaskConfigKey) *entity.AITaskBaseCon
}
}

if opts.ComputeSource == models.DCU {
config = &entity.AITaskBaseConfig{
ContainerSteps: map[entity.ContainerDataType]*entity.ContainerBuildOpts{
entity.ContainerCode: {
ContainerPath: codePath,
ReadOnly: false,
AcceptStorageType: []entity.StorageType{entity.OBS},
VolumeFolder: true,
},
entity.ContainerDataset: {
ContainerPath: datasetPath,
ReadOnly: true,
AcceptStorageType: []entity.StorageType{entity.MINIO, entity.OBS},
},
entity.ContainerPreTrainModel: {
ContainerPath: pretrainModelPath,
ReadOnly: true,
AcceptStorageType: []entity.StorageType{entity.MINIO, entity.OBS},
},
},
}
}

if opts.ComputeSource == models.GCU {
config = &entity.AITaskBaseConfig{
ContainerSteps: map[entity.ContainerDataType]*entity.ContainerBuildOpts{
@@ -104,6 +127,7 @@ func GetGrampusNoteBookConfig(opts entity.AITaskConfigKey) *entity.AITaskBaseCon
ContainerPath: codePath,
ReadOnly: false,
AcceptStorageType: []entity.StorageType{entity.MINIO, entity.OBS},
VolumeFolder: true,
},
entity.ContainerDataset: {
ContainerPath: datasetPath,


+ 2
- 0
services/ai_task_service/task/grampus_train_task.go View File

@@ -96,6 +96,8 @@ func GetGrampusTrainTaskConfig(opts entity.AITaskConfigKey) *entity.AITaskBaseCo
config.ActionType = models.ActionCreateGrampusGPUTrainTask
case models.GCU:
config.ActionType = models.ActionCreateGrampusGCUTrainTask
case models.ILUVATAR:
config.ActionType = models.ActionCreateGrampusILUVATARTrainTask
}
config.IsActionUseJobId = true
return config


+ 3
- 3
services/ai_task_service/task/task_base.go View File

@@ -289,7 +289,7 @@ func (g DefaultAITaskTemplate) Update(cloudbrainId int64) *response.BizError {
//二是处于PREPARING的时间超过了配置的等待时间,此时意味着异步创建任务时间过长或者出现了未知异常
if cloudbrain.NeedActiveStop() {
log.Info("AI task should active stop.cloudbrainId=%d", cloudbrainId)
if g.JobType == models.JobTypeDebug {
if g.JobType == models.JobTypeDebug || g.JobType == models.JobTypeOnlineInference {
err = StopAITaskByJobNameFromRemote(cloudbrain, c.QueryNoteBookByJobName, c.StopNoteBook)
} else {
err = StopAITaskByJobNameFromRemote(cloudbrain, c.QueryTrainJobByJobName, c.StopTrainJob)
@@ -307,7 +307,7 @@ func (g DefaultAITaskTemplate) Update(cloudbrainId int64) *response.BizError {
log.Info("AI task is preparing.No need to update from remote.cloudbrainId=%d", cloudbrainId)
return nil
}
if g.JobType == models.JobTypeDebug {
if g.JobType == models.JobTypeDebug || g.JobType == models.JobTypeOnlineInference {
err = UpdateAITaskFromRemote(cloudbrain, c.QueryNoteBook)
} else {
err = UpdateAITaskFromRemote(cloudbrain, c.QueryTrainJob)
@@ -435,7 +435,7 @@ func (g DefaultAITaskTemplate) GetOperationProfile(cloudbrainId int64) (*entity.
}
var s *entity.OperationProfile
var err error
if g.JobType == models.JobTypeDebug {
if g.JobType == models.JobTypeDebug || g.JobType == models.JobTypeOnlineInference {
s, err = GetOperationProfile(cloudbrainId, c.GetNoteBookOperationProfile)
} else {
s, err = GetOperationProfile(cloudbrainId, c.GetTrainJobOperationProfile)


+ 41
- 18
services/ai_task_service/task/task_creation_info.go View File

@@ -56,28 +56,15 @@ func GetAITaskCreationInfo(req entity.GetAITaskCreationInfoReq) (*entity.Creatio
//生成任务名称
result.DisplayJobName = t.GetDisplayJobName(req.User.Name)
specsMap := make(map[string][]*structs.SpecificationShow, 0)
//查询有网资源规格
if specs, err := t.GetSpecs(entity.GetSpecOpts{
UserId: req.User.ID,
ComputeSource: *req.ComputeSource,
HasInternet: 2, //0 all;1 no internet;2 has internet
}); err == nil {
specsMap["has_internet"] = specs
}
//查询无网资源规格
if specs, err := t.GetSpecs(entity.GetSpecOpts{
UserId: req.User.ID,
ComputeSource: *req.ComputeSource,
HasInternet: 1, //0 all;1 no internet;2 has internet
}); err == nil {
specsMap["no_internet"] = specs
}

//查询所有资源规格
if specs, err := t.GetSpecs(entity.GetSpecOpts{
UserId: req.User.ID,
ComputeSource: *req.ComputeSource,
}); err == nil {
specsMap["all"] = specs
specsMap["has_internet"] = filterHasInternetSpecs(specs)
specsMap["no_internet"] = filterNoInternetSpecs(specs)
}
result.Specs = specsMap
// 查询镜像列表
@@ -103,6 +90,26 @@ func GetAITaskCreationInfo(req entity.GetAITaskCreationInfoReq) (*entity.Creatio
return result, nil
}

func filterHasInternetSpecs(allSpecs []*structs.SpecificationShow) []*structs.SpecificationShow {
hasInternetSpecs := make([]*structs.SpecificationShow, 0)
for i := 0; i < len(allSpecs); i++ {
if allSpecs[i].HasInternet == int(models.HasInternet) {
hasInternetSpecs = append(hasInternetSpecs, allSpecs[i])
}
}
return hasInternetSpecs
}

func filterNoInternetSpecs(allSpecs []*structs.SpecificationShow) []*structs.SpecificationShow {
noInternetSpecs := make([]*structs.SpecificationShow, 0)
for i := 0; i < len(allSpecs); i++ {
if allSpecs[i].HasInternet == int(models.NoInternet) {
noInternetSpecs = append(noInternetSpecs, allSpecs[i])
}
}
return noInternetSpecs
}

func GetAvailableImageInfoBySpec(req entity.GetAITaskCreationImageInfoReq) (*entity.ImageRequiredInfo, *response.BizError) {
result := &entity.ImageRequiredInfo{}
t, err := GetAITaskTemplate(req.JobType, req.ClusterType)
@@ -115,9 +122,25 @@ func GetAvailableImageInfoBySpec(req entity.GetAITaskCreationImageInfoReq) (*ent
UserId: req.UserID,
JobType: req.JobType,
})

//load from db
reImages := make([]entity.ClusterImage, 0, 10)
result.Images = reImages
log.Info("start to query db,params=" + req.ComputeSource.FullName + " 2=" + req.ComputeSource.Name)
imageList, dbErr := models.GetImageByComputeResource(req.ComputeSource.Name)
if dbErr != nil {
log.Info("query error" + dbErr.Error())
} else {
for _, tmpImage := range imageList {
clusterImage := entity.ClusterImage{
ImageId: tmpImage.ImageID,
ImageName: tmpImage.Tag,
ImageUrl: tmpImage.Place,
}
result.Images = append(result.Images, clusterImage)
}
}
if images, canUseAll, err := t.GetImages(*req.ComputeSource, centerIds...); err == nil {
result.Images = images
result.Images = append(result.Images, images...)
result.CanUseAllImages = canUseAll
}
return result, nil


+ 5
- 2
services/card_request/card_request.go View File

@@ -21,9 +21,9 @@ func GetCreationInfo() (map[string][]string, error) {

for _, xpuInfo := range xpuInfoBase {
if _, ok := xpuInfoMap[xpuInfo.ResourceType]; ok {
xpuInfoMap[xpuInfo.ResourceType] = append(xpuInfoMap[xpuInfo.ResourceType], xpuInfo.CardTypeShow)
xpuInfoMap[xpuInfo.ResourceType] = append(xpuInfoMap[xpuInfo.ResourceType], xpuInfo.CardType)
} else {
xpuInfoMap[xpuInfo.ResourceType] = []string{xpuInfo.CardTypeShow}
xpuInfoMap[xpuInfo.ResourceType] = []string{xpuInfo.CardType}
}
}
return xpuInfoMap, nil
@@ -53,6 +53,7 @@ func UpdateCardRequestAdmin(cardReq api.CardReq) error {
DiskCapacity: cardReq.DiskCapacity,
Contact: cardReq.Contact,
PhoneNumber: cardReq.PhoneNumber,
Wechat: cardReq.Wechat,
BeginDate: cardReq.BeginDate,
EndDate: cardReq.EndDate,
Description: cardReq.Description,
@@ -91,6 +92,7 @@ func UpdateCardRequest(cardReq api.CardReq, request *models.CardRequest) error {
request.Contact = cardReq.Contact
request.EmailAddress = cardReq.EmailAddress
request.PhoneNumber = cardReq.PhoneNumber
request.Wechat = cardReq.Wechat

beginTime, err := time.Parse(DATE_LAYOUT, cardReq.BeginDate)
if err != nil {
@@ -121,6 +123,7 @@ func CreateCardRequest(cardReq api.CardReq, uid int64) error {
DiskCapacity: cardReq.DiskCapacity,
Contact: cardReq.Contact,
PhoneNumber: cardReq.PhoneNumber,
Wechat: cardReq.Wechat,
BeginDate: cardReq.BeginDate,
EndDate: cardReq.EndDate,
Description: cardReq.Description,


+ 69
- 2
services/cloudbrain/resource/resource_queue.go View File

@@ -1,10 +1,12 @@
package resource

import (
"fmt"
"strings"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/grampus"
"code.gitea.io/gitea/modules/log"
"fmt"
)

func AddResourceQueue(req models.ResourceQueueReq) error {
@@ -21,7 +23,7 @@ func UpdateResourceQueue(queueId int64, req models.ResourceQueueReq) error {
QueueName: req.QueueName,
Remark: req.Remark,
HasInternet: req.HasInternet,
}); err != nil {
}, req.IsAvailable); err != nil {
return err
}
return nil
@@ -54,6 +56,69 @@ func GetResourceAiCenters() ([]models.ResourceAiCenterRes, error) {
return r, nil
}

func filterGrampusImage(all []models.GrampusImage) []models.GrampusImage {
result := make([]models.GrampusImage, 0)
for _, tmp := range all {
computeResource := getComputeResourceByProcessType(tmp.ProcessorType)
if computeResource == "GCU" || computeResource == "MLU" || computeResource == "ILUVATAR-GPGPU" || computeResource == "METAX-GPGPU" {
result = append(result, tmp)
}
}
return result
}

func getComputeResourceByProcessType(processType string) string {
tail := strings.LastIndex(processType, "/")
if tail > 0 {
return strings.ToUpper(processType[tail+1:])
}
return strings.ToUpper(processType)
}

func SyncGrampusImage(doerId int64) error {
allImage, err := grampus.GetAllBaseImages()
if err == nil {
log.Info("Query image from grampus, length=" + fmt.Sprint(len(allImage.Infos)))
dbAllImage, err := models.GetGrampusAllBaseImage()
if err == nil {
log.Info("start to deal db image update.")
filterGrampusImageList := filterGrampusImage(allImage.Infos)
log.Info("sync grampus image length=." + fmt.Sprint(len(filterGrampusImageList)))
grampusMap := make(map[string]models.GrampusImage, 0)
for _, image := range filterGrampusImageList {
grampusMap[image.ID] = image
}
dbMap := make(map[string]*models.Image, 0)
for _, image := range dbAllImage {
dbMap[image.HuJingId] = image
}
insertList := make([]models.GrampusImage, 0)
updateList := make([]models.GrampusImage, 0)
updateDbList := make([]*models.Image, 0)
for _, image := range filterGrampusImageList {
if dbImage, ok := dbMap[image.ID]; !ok {
insertList = append(insertList, image)
} else {
updateList = append(updateList, image)
updateDbList = append(updateDbList, dbImage)
}
}
deleteList := make([]*models.Image, 0)
for _, image := range dbAllImage {
if _, ok := grampusMap[image.HuJingId]; !ok {
deleteList = append(deleteList, image)
}
}
models.SyncGrampusAllBaseImageToDb(insertList, updateList, updateDbList, deleteList, doerId)
} else {
log.Error("failed query image db.error=" + err.Error())
}
} else {
log.Error("failed query image grampus.error=" + err.Error())
}
return err
}

func SyncGrampusQueue(doerId int64) error {
r, err := grampus.GetResourceQueue()
if err != nil {
@@ -94,6 +159,7 @@ func SyncGrampusQueue(doerId int64) error {
QueueCode: queue.QueueCode,
QueueName: queue.QueueName,
QueueType: queue.QueueType,
IsAvailable: true,
})
} else {
existIds = append(existIds, oldQueue.ID)
@@ -106,6 +172,7 @@ func SyncGrampusQueue(doerId int64) error {
HasInternet: hasInternet,
QueueName: queue.QueueName,
QueueType: queue.QueueType,
IsAvailable: true,
})
}



+ 19
- 2
services/cloudbrain/resource/resource_specification.go View File

@@ -155,9 +155,14 @@ func GetAllResourceSpecification(opts models.SearchResourceSpecificationOptions)
if err != nil {
return nil, err
}

exclusiveMap := models.FindQueuesExclusiveMap()
res := make([]*models.ResourceSpecInfo, len(r))
for i := 0; i < len(r); i++ {
res[i] = r[i].ConvertToResourceSpecInfo()
if _, exists := exclusiveMap[res[i].QueueId]; exists {
res[i].IsQueueExclusive = true
}
}
return res, nil
}
@@ -205,7 +210,7 @@ func ResourceSpecOnShelf(doerId int64, id int64, unitPrice int) *response.BizErr
if spec == nil {
return response.SPECIFICATION_NOT_EXIST
}
if q, err := models.GetResourceQueue(&models.ResourceQueue{ID: spec.QueueId}); err != nil || q == nil {
if q, err := models.GetResourceQueue(&models.ResourceQueue{ID: spec.QueueId}); err != nil || q == nil || !q.IsAvailable {
return response.RESOURCE_QUEUE_NOT_AVAILABLE
}
if !spec.IsAvailable {
@@ -329,7 +334,7 @@ func InsertCloudbrainSpec(cloudbrainId int64, s *models.Specification) error {
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,
IsExclusive: s.IsExclusive,
IsExclusive: s.IsSpecExclusive(),
ExclusiveOrg: s.ExclusiveOrg,
}
_, err := models.InsertCloudbrainSpec(c)
@@ -695,3 +700,15 @@ func InitQueueAndSpec(opt models.FindSpecsOptions, aiCenterName string, remark s
IsAvailable: true,
})
}

func GetResourceListPaging(opts models.GetResourceListOpts) ([]*models.ResourceInfo4CardRequest, int64, error) {
return models.GetResourceListPaging(opts)
}

func GetAccCardList() ([]models.AccCardInfo, error) {
return models.GetAccCardList()
}

func GetAvailableAICenter() ([]*models.ResourceAiCenterRes, error) {
return models.GetAvailableResourceAiCenters()
}

+ 1
- 1
services/socketwrap/clientManager.go View File

@@ -10,7 +10,7 @@ import (
"github.com/elliotchance/orderedmap"
)

var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51}
var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53}

type ClientsManager struct {
Clients *orderedmap.OrderedMap


+ 10
- 148
templates/admin/cloudbrain/imagecommit.tmpl View File

@@ -1,152 +1,14 @@
<style>
.label_color{
color:#505559 !important;
width: 6% !important;
text-align: center;
}
</style>
{{template "base/head" .}}
<div id="mask">
<div id="loadingPage">
<div class="rect1"></div>
<div class="rect2"></div>
<div class="rect3"></div>
<div class="rect4"></div>
<div class="rect5"></div>
</div>
</div>
<div class="repository">
{{template "repo/header" .}}
<div class="alert"></div>
<div class="ui container">
<div>
<div class="ui negative message" style="display: none;">
</div>
<div class="ui info message" style="display: none;">
</div>
<div class="ui positive message" style="display: none;">
</div>
<h4 class="ui top attached header">
{{.i18n.Tr "repo.submit_image"}}
</h4>
<div class="submit-image-tmplvalue" style="display: none;" data-link="{{$.Link}}" data-edit-page="{{.PageIsAdminImages}}"></div>
<div class="ui attached segment" style="padding: 2em 3em;padding-bottom: 7rem;">
<div class="ui form" id="form_image">
<input type="hidden" name="edit" value="edit">
{{.CsrfTokenHtml}}
<div class="inline field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.dataset_available_clusters"}}</label>
<!-- <div class="ui basic label" style="border: none !important;color:#3291f8;">
<svg class="svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="14" height="14"><path fill="none" d="M0 0h24v24H0z"></path><path d="M4 3h16a1 1 0 0 1 1 1v7H3V4a1 1 0 0 1 1-1zM3 13h18v7a1 1 0 0 1-1 1H4a1 1 0 0 1-1-1v-7zm4 3v2h3v-2H7zM7 6v2h3V6H7z"></path></svg>
CPU/GPU
</div> -->
<div class="ui blue mini menu compact selectcloudbrain" id="adminCommitImage">
<a class="active item" data-type="0">
<svg class="svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="16" height="16">
<path fill="none" d="M0 0h24v24H0z"/>
<path d="M3 2.992C3 2.444 3.445 2 3.993 2h16.014a1 1 0 0 1 .993.992v18.016a.993.993 0 0 1-.993.992H3.993A1 1 0 0 1 3 21.008V2.992zM19 11V4H5v7h14zm0 2H5v7h14v-7zM9 6h6v2H9V6zm0 9h6v2H9v-2z"/>
</svg>
启智CPU/GPU
</a>
<a class="item" data-type="2">
<svg class="svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="16" height="16">
<path fill="none" d="M0 0h24v24H0z"/>
<path d="M3 2.992C3 2.444 3.445 2 3.993 2h16.014a1 1 0 0 1 .993.992v18.016a.993.993 0 0 1-.993.992H3.993A1 1 0 0 1 3 21.008V2.992zM19 11V4H5v7h14zm0 2H5v7h14v-7zM9 6h6v2H9V6zm0 9h6v2H9v-2z"/>
</svg>
智算CPU/GPU
</a>
</div>
<input type="hidden" value="0" name="type">
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "repo.images.name"}}</label>
<input type="text" name="tag" required placeholder="{{$.i18n.Tr "repo.images.name_placerholder"}}" style="width: 80%;" maxlength="100">
<span class="tooltips" style="display: block;padding-left: 1.5rem;">
{{.i18n.Tr "repo.images.name_rule50"}}
</span>
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "repo.images"}}</label>
<input type="text" name="place" required placeholder="{{$.i18n.Tr "cloudbrain.input_mirror"}}" style="width: 80%;" maxlength="300">
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.description"}}</label>
<textarea style="width: 80%;" required id="description" name="description" rows="3" maxlength="1000" placeholder={{.i18n.Tr "repo.images.descr_placerholder"}} onchange="this.value=this.value.substring(0, 1000)" onkeydown="this.value=this.value.substring(0, 1000)" onkeyup="this.value=this.value.substring(0, 1000)"></textarea>
</div>
<div class="inline field" style="display: flex;align-items: center;">
<label class="label_color" for="">{{$.i18n.Tr "repo.model.manage.label"}}</label>&nbsp;
<div class="ui multiple search selection dropdown" id="dropdown_image" style="width: 80%;">
<input type="hidden" name="topics" value="" required>
<div class="default text" id="default_text">{{.i18n.Tr "repo.repo_label_helpe"}}</div>
<div class="menu" id="course_label_item"></div>
</div>
</div>
<span class="tooltips" style="display: block;padding-left: 1.5rem;margin-top: 0.5rem;margin-bottom: 1rem;">{{.i18n.Tr "repo.image.label_tooltips"}}</span>
<div class="inline fields">
<label class="label_color" for="" style="visibility: hidden;"></label>
<div class="field">
<div class="ui radio checkbox">
<input type="radio" name="isRecommend" checked="checked" value="true">
<label>{{.i18n.Tr "admin.images.recommend"}}</label>
</div>
</div>
<div class="field" style="flex: 0.15;">
<div class="ui radio checkbox" >
<input type="radio" name="isRecommend" value="false">
<label>{{.i18n.Tr "admin.images.unrecommend"}}</label>
</div>
</div>
</div>
<!--
<div class="inline fields">
<label class="label_color" for="" style="visibility: hidden;"></label>
<div class="field">
<div class="ui radio checkbox">
<input type="radio" name="isPrivate" checked="checked" value="false">
<label>{{.i18n.Tr "org.settings.visibility.public"}}</label>
</div>
</div>
<div class="field" style="flex: 0.15;">
<div class="ui radio checkbox" >
<input type="radio" name="isPrivate" value="true">
<label>{{.i18n.Tr "home.show_private"}}</label>
</div>
</div>
<div class="field">
<span class="label_color">{{.i18n.Tr "repo.images.public_tooltips"}}</span>
</div>
</div>
-->
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<span style="color: rgb(255, 94, 0);display: inline-flex;"><i class="ri-error-warning-line" style="margin-right: 0.3rem;"></i>{{.i18n.Tr "repo.images.submit_tooltips"}}</span>
</div>
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{.i18n.Tr "repo.cloudbrain.commit_image"}}
</button>
<a class="ui button" id="cancel_submit_image">{{.i18n.Tr "repo.cloudbrain.cancel"}}</a>
</div>
</div>
</div>
</div>
</div>
<script>
window._PageType = "submitAdmin";
window._Image = {{.Image}};
window._PageFrom = 'imageAdmin';
window._PageSubmitLink = "{{$.Link}}";
</script>
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-images-submit.css?v={{MD5 AppVer}}" />
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-images-submit.js?v={{MD5 AppVer}}"></script>
</div>

<!-- 确认模态框 -->
<div>
<div class="ui modal image_confirm_submit">
<div class="header">{{.i18n.Tr "repo.submit_image"}}</div>
<div class="content text red center">
<p><i class="exclamation icon"></i>{{.i18n.Tr "repo.image_overwrite"}}</p>
</div>
<div class="actions">
<button class="ui deny small button">{{.i18n.Tr "cloudbrain.operate_cancel"}}</button>
<button class="ui green small approve button">{{.i18n.Tr "cloudbrain.operate_confirm"}}</button>
</div>
</div>
</div>
{{template "base/footer" .}}
{{template "base/footer" .}}

+ 3
- 24
templates/explore/images.tmpl View File

@@ -1,26 +1,5 @@
{{template "base/head" .}}
<div class="alert"></div>
<div id="images"></div>
<!-- 确认模态框 -->
<div id="deletemodel">
<div class="ui basic modal images">
<div class="ui icon header">
<i class="trash icon"></i> {{.i18n.Tr "repo.images.delete_task"}}
</div>

<div class="content">
<p>{{.i18n.Tr "repo.images.task_delete_confirm"}}</p>
</div>
<div class="actions">
<div class="ui red basic inverted cancel button">
<i class="remove icon"></i>
{{.i18n.Tr "cloudbrain.operate_cancel"}}
</div>
<div class="ui green basic inverted ok button">
<i class="checkmark icon"></i>
{{.i18n.Tr "cloudbrain.operate_confirm"}}
</div>
</div>
</div>
</div>
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-images-square.css?v={{MD5 AppVer}}" />
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-images-square.js?v={{MD5 AppVer}}"></script>
{{template "base/footer" .}}

+ 9
- 121
templates/repo/cloudbrain/image/apply.tmpl View File

@@ -1,126 +1,14 @@
<style>
.label_color{
color:#505559 !important;
width: 6% !important;
text-align: center;
}
.descr-tip-box {
display: inline-block;
border: 1px solid #f2711c;
background-color: rgba(242,113,28,0.05);
width: 80%;
padding: 10px;
}
.descr-tip-head {
color: #888888;
font-size: 14px;
margin-bottom: 4px;
}
.descr-tip-item {
margin: 2px 0;
padding-left: 10px;
color: rgb(242 113 28);
font-size: 12px;
}
</style>
{{template "base/head" .}}
<div id="mask">
<div id="loadingPage">
<div class="rect1"></div>
<div class="rect2"></div>
<div class="rect3"></div>
<div class="rect4"></div>
<div class="rect5"></div>
</div>
</div>
<div class="repository">
{{template "repo/header" .}}
<div class="alert"></div>
<div class="ui container">
<div>
<div class="ui negative message" style="display: none;">
</div>
<div class="ui info message" style="display: none;">
</div>
<div class="ui positive message" style="display: none;">
</div>
<h4 class="ui top attached header">
{{$.i18n.Tr "admin.images.applyrecommendImage"}}
</h4>
<div class="submit-image-tmplvalue" style="display: none;" data-link="/image/{{$.Image.ID}}/apply" data-edit-page="{{.PageFrom}}"></div>
<div class="ui attached segment" style="padding: 2em 3em;padding-bottom: 7rem;">
<div class="ui form" id="form_image">
<input type="hidden" name="edit" value="edit">
{{.CsrfTokenHtml}}
<input type="hidden" name="id" value="{{.Image.ID}}">
<div class="inline field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.dataset_available_clusters"}}</label>
<div class="ui basic label" style="border: none !important;color:#3291f8;">
<svg class="svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="14" height="14"><path fill="none" d="M0 0h24v24H0z"></path><path d="M4 3h16a1 1 0 0 1 1 1v7H3V4a1 1 0 0 1 1-1zM3 13h18v7a1 1 0 0 1-1 1H4a1 1 0 0 1-1-1v-7zm4 3v2h3v-2H7zM7 6v2h3V6H7z"></path></svg>
{{if eq .Image.CloudbrainType 2}}
{{$.i18n.Tr "cloudbrain.resource_cluster_c2net_simple"}} GPU
{{else}}
{{$.i18n.Tr "cloudbrain.resource_cluster_openi_simple"}} GPU
{{end}}
</div>
<input type="hidden" value="{{.Image.CloudbrainType}}" name="type">
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "repo.images.name"}}</label>
<input type="hidden" name="tag" value="{{.Image.Tag}}" >
<input disabled value="{{.Image.Tag}}" style="width: 80%;">
<span class="tooltips" style="display: block;padding-left: 1.5rem;">
{{if eq .Image.CloudbrainType 2}}
{{.i18n.Tr "repo.images.name_rule50"}}
{{else}}
{{.i18n.Tr "repo.images.name_rule100"}}
{{end}}
</span>
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.description"}}</label>
<textarea style="width: 80%;" required id="description" value="{{.Image.Description}}" name="description" rows="3" maxlength="1000" placeholder={{.i18n.Tr "repo.images.descr_placerholder"}} onchange="this.value=this.value.substring(0, 1000)" onkeydown="this.value=this.value.substring(0, 1000)" onkeyup="this.value=this.value.substring(0, 1000)">{{.Image.Description}}</textarea>
</div>
<div class="inline field">
<label class="label_color" style="color:transparent !important;">x</label>
<div class="descr-tip-box">
<div class="descr-tip-head">{{$.i18n.Tr "admin.images.descrTip"}}</div>
<div class="descr-tip-item">{{$.i18n.Tr "admin.images.framework"}}:pytorch1.9.1;</div>
<div class="descr-tip-item">CUDA:cuda11;</div>
<div class="descr-tip-item">{{$.i18n.Tr "admin.images.pythonVersion"}}:python 3.7.11;</div>
<div class="descr-tip-item">{{$.i18n.Tr "admin.images.operatingSystem"}}:Ubuntu 20.02;</div>
<div class="descr-tip-item">{{$.i18n.Tr "admin.images.installedSoftwarePackage"}}:numpy1.21.2</div>
</div>
</div>
<div class="inline field" style="display: flex;align-items: center;">
{{$lenTopics := len .Image.Topics}}
{{$subTopics := subOne $lenTopics}}
<label class="label_color" for="">{{$.i18n.Tr "repo.model.manage.label"}}</label>&nbsp;
<div class="ui multiple search selection dropdown" id="dropdown_image" style="width: 80%;">
<input type="hidden" name="topics" value="{{range $k,$v := .Image.Topics}}{{$v}}{{if ne $k $subTopics}},{{end}}{{end}}" required>
{{range .Image.Topics}}
<a class="ui label transition visible" data-value="{{.}}" style="display: inline-block !important;">{{.}}<i class="delete icon"></i></a>
{{end}}
<div class="default text" id="default_text">{{.i18n.Tr "repo.repo_label_helpe"}}</div>
<div class="menu" id="course_label_item"></div>
</div>
</div>
<span class="tooltips" style="display: block;padding-left: 1.5rem;margin-top: 0.5rem;margin-bottom: 1rem;">{{.i18n.Tr "repo.image.label_tooltips"}}</span>
<div class="inline required field" style="padding-top: 2rem;">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{$.i18n.Tr "admin.images.submitApply"}}
</button>
<a class="ui button" id="cancel_submit_image">{{.i18n.Tr "repo.cloudbrain.cancel"}}</a>
</div>
</div>
</div>
</div>
</div>
<script>
window._PageType = "apply";
window._Image = {{.Image}};
window._PageFrom = {{.PageFrom}};
window._PageSubmitLink = "/image/{{$.Image.ID}}/apply";
</script>
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-images-submit.css?v={{MD5 AppVer}}" />
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-images-submit.js?v={{MD5 AppVer}}"></script>
</div>
{{template "base/footer" .}}
<script>
;(function(){
var Images = {{.Image}};
})();
</script>

+ 9
- 114
templates/repo/cloudbrain/image/edit.tmpl View File

@@ -1,119 +1,14 @@
<style>
.label_color{
color:#505559 !important;
width: 6% !important;
text-align: center;
}
</style>
{{template "base/head" .}}
<div id="mask">
<div id="loadingPage">
<div class="rect1"></div>
<div class="rect2"></div>
<div class="rect3"></div>
<div class="rect4"></div>
<div class="rect5"></div>
</div>
</div>
<div class="repository">
{{template "repo/header" .}}
<div class="alert"></div>
<div class="ui container">
<div>
<div class="ui negative message" style="display: none;">
</div>
<div class="ui info message" style="display: none;">
</div>
<div class="ui positive message" style="display: none;">
</div>
<h4 class="ui top attached header">
{{.i18n.Tr "repo.modify_image"}}
</h4>
<div class="submit-image-tmplvalue" style="display: none;" data-link="/image/{{$.Image.ID}}" data-edit-page="{{.PageFrom}}"></div>
<div class="ui attached segment" style="padding: 2em 3em;padding-bottom: 7rem;">
<div class="ui form" id="form_image">
<input type="hidden" name="edit" value="edit">
{{.CsrfTokenHtml}}
<input type="hidden" name="id" value="{{.Image.ID}}">
<div class="inline field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.dataset_available_clusters"}}</label>
<div class="ui basic label" style="border: none !important;color:#3291f8;">
<svg class="svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="14" height="14"><path fill="none" d="M0 0h24v24H0z"></path><path d="M4 3h16a1 1 0 0 1 1 1v7H3V4a1 1 0 0 1 1-1zM3 13h18v7a1 1 0 0 1-1 1H4a1 1 0 0 1-1-1v-7zm4 3v2h3v-2H7zM7 6v2h3V6H7z"></path></svg>
{{if eq .Image.CloudbrainType 2}}
{{$.i18n.Tr "cloudbrain.resource_cluster_c2net_simple"}} GPU
{{else}}
{{$.i18n.Tr "cloudbrain.resource_cluster_openi_simple"}} GPU
{{end}}
</div>
<input type="hidden" value="{{.Image.CloudbrainType}}" name="type">
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "repo.images.name"}}</label>
<input type="hidden" name="tag" value="{{.Image.Tag}}" >
<input disabled value="{{.Image.Tag}}" style="width: 80%;">
<span class="tooltips" style="display: block;padding-left: 1.5rem;">
{{if eq .Image.CloudbrainType 2}}
{{.i18n.Tr "repo.images.name_rule50"}}
{{else}}
{{.i18n.Tr "repo.images.name_rule100"}}
{{end}}
</span>
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.description"}}</label>
<textarea style="width: 80%;" required id="description" value="{{.Image.Description}}" name="description" rows="3" maxlength="1000" placeholder={{.i18n.Tr "repo.images.descr_placerholder"}} onchange="this.value=this.value.substring(0, 1000)" onkeydown="this.value=this.value.substring(0, 1000)" onkeyup="this.value=this.value.substring(0, 1000)">{{.Image.Description}}</textarea>
</div>
<div class="inline field" style="display: flex;align-items: center;">
{{$lenTopics := len .Image.Topics}}
{{$subTopics := subOne $lenTopics}}
<label class="label_color" for="">{{$.i18n.Tr "repo.model.manage.label"}}</label>&nbsp;
<div class="ui multiple search selection dropdown" id="dropdown_image" style="width: 80%;">
<input type="hidden" name="topics" value="{{range $k,$v := .Image.Topics}}{{$v}}{{if ne $k $subTopics}},{{end}}{{end}}" required>
{{range .Image.Topics}}
<a class="ui label transition visible" data-value="{{.}}" style="display: inline-block !important;">{{.}}<i class="delete icon"></i></a>
{{end}}
<div class="default text" id="default_text">{{.i18n.Tr "repo.repo_label_helpe"}}</div>
<div class="menu" id="course_label_item"></div>
</div>
</div>
<span class="tooltips" style="display: block;padding-left: 1.5rem;margin-top: 0.5rem;margin-bottom: 1rem;">{{.i18n.Tr "repo.image.label_tooltips"}}</span>
<!--
<div class="inline fields">
<label class="label_color" for="" style="visibility: hidden;"></label>
<div class="field">
<div class="ui radio checkbox">
<input type="radio" name="isPrivate" {{if not .Image.IsPrivate}} checked {{end}} value="false">
<label>{{.i18n.Tr "org.settings.visibility.public"}}</label>
</div>
</div>
<div class="field" style="flex: 0.15;">
<div class="ui radio checkbox" >
<input type="radio" name="isPrivate" {{if .Image.IsPrivate}} checked {{end}} value="true">
<label>{{.i18n.Tr "home.show_private"}}</label>
</div>
</div>
<div class="field">
<span class="label_color">{{.i18n.Tr "repo.images.public_tooltips"}}</span>
</div>
</div>
-->
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<span style="color: rgb(255, 94, 0);display: inline-flex;"><i class="ri-error-warning-line" style="margin-right: 0.3rem;"></i>{{.i18n.Tr "repo.images.submit_tooltips"}}</span>
</div>
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{.i18n.Tr "explore.save"}}
</button>
<a class="ui button" id="cancel_submit_image">{{.i18n.Tr "repo.cloudbrain.cancel"}}</a>
</div>
</div>
</div>
</div>
</div>
<script>
window._PageType = "edit";
window._Image = {{.Image}};
window._PageFrom = {{.PageFrom}};
window._PageSubmitLink = "/image/{{$.Image.ID}}";
</script>
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-images-submit.css?v={{MD5 AppVer}}" />
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-images-submit.js?v={{MD5 AppVer}}"></script>
</div>
{{template "base/footer" .}}
<script>
console.log({{.Image}})
</script>

+ 12
- 118
templates/repo/cloudbrain/image/submit.tmpl View File

@@ -1,123 +1,17 @@
<style>
.label_color{
color:#505559 !important;
width: 6% !important;
text-align: center;
}
</style>
{{template "base/head" .}}
<div id="mask">
<div id="loadingPage">
<div class="rect1"></div>
<div class="rect2"></div>
<div class="rect3"></div>
<div class="rect4"></div>
<div class="rect5"></div>
</div>
</div>
<div class="repository">
{{template "repo/header" .}}
<div class="alert"></div>
<div class="ui container">
<div>
<div class="ui negative message" style="display: none;">
</div>
<div class="ui info message" style="display: none;">
</div>
<div class="ui positive message" style="display: none;">
</div>
<h4 class="ui top attached header">
{{.i18n.Tr "repo.submit_image"}}
</h4>
<div class="submit-image-tmplvalue" style="display: none;" data-link="{{$.Link}}" data-repo-link="{{$.RepoLink}}" data-edit-page="submit"></div>
<div class="ui attached segment" style="padding: 2em 3em;padding-bottom: 7rem;">
<div class="ui form" id="form_image">
{{.CsrfTokenHtml}}
<div class="inline field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.dataset_available_clusters"}}</label>
<div class="ui basic label" style="border: none !important;color:#3291f8;">
<svg class="svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="14" height="14"><path fill="none" d="M0 0h24v24H0z"></path><path d="M4 3h16a1 1 0 0 1 1 1v7H3V4a1 1 0 0 1 1-1zM3 13h18v7a1 1 0 0 1-1 1H4a1 1 0 0 1-1-1v-7zm4 3v2h3v-2H7zM7 6v2h3V6H7z"></path></svg>
{{if eq .Type 2}}
{{$.i18n.Tr "cloudbrain.resource_cluster_c2net_simple"}} GPU
{{else}}
{{$.i18n.Tr "cloudbrain.resource_cluster_openi_simple"}} GPU
{{end}}
</div>
<input type="hidden" value="{{.Type}}" name="type">
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "repo.images.name"}}</label>
<input type="text" name="tag" required placeholder="{{$.i18n.Tr "repo.images.name_placerholder"}}" style="width: 80%;" maxlength="{{if eq .Type 2}} 50 {{else}} 100 {{end}}">
<span class="tooltips" style="display: block;padding-left: 1.5rem;">
{{if eq .Type 2}}
{{.i18n.Tr "repo.images.name_rule50"}}
{{else}}
{{.i18n.Tr "repo.images.name_rule100"}}
{{end}}
</span>
</div>
<div class="inline required field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.description"}}</label>
<textarea style="width: 80%;" required id="description" name="description" rows="3" maxlength="1000" placeholder={{.i18n.Tr "repo.images.descr_placerholder"}} onchange="this.value=this.value.substring(0, 1000)" onkeydown="this.value=this.value.substring(0, 1000)" onkeyup="this.value=this.value.substring(0, 1000)"></textarea>
</div>
<div class="inline field" style="display: flex;align-items: center;">
<label class="label_color" for="">{{$.i18n.Tr "repo.model.manage.label"}}</label>&nbsp;
<div class="ui multiple search selection dropdown" id="dropdown_image" style="width: 80%;">
<input type="hidden" name="topics" value="" required>
<div class="default text" id="default_text">{{.i18n.Tr "repo.repo_label_helpe"}}</div>
<div class="menu" id="course_label_item"></div>
</div>
</div>
<span class="tooltips" style="display: block;padding-left: 1.5rem;margin-top: 0.5rem;margin-bottom: 1rem;">{{.i18n.Tr "repo.image.label_tooltips"}}</span>
<!--
<div class="inline fields">
<label class="label_color" for="" style="visibility: hidden;"></label>
<div class="field">
<div class="ui radio checkbox">
<input type="radio" name="isPrivate" checked="checked" value="false">
<label>{{.i18n.Tr "org.settings.visibility.public"}}</label>
</div>
</div>
<div class="field" style="flex: 0.15;">
<div class="ui radio checkbox" >
<input type="radio" name="isPrivate" value="true">
<label>{{.i18n.Tr "home.show_private"}}</label>
</div>
</div>
<div class="field">
<span class="label_color">{{.i18n.Tr "repo.images.public_tooltips"}}</span>
</div>
</div>
-->
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<span style="color: rgb(255, 94, 0);display: inline-flex;"><i class="ri-error-warning-line" style="margin-right: 0.3rem;"></i>{{.i18n.Tr "repo.images.submit_tooltips"}}</span>
</div>
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{.i18n.Tr "repo.cloudbrain.commit_image"}}
</button>
<a class="ui button" id="cancel_submit_image">{{.i18n.Tr "repo.cloudbrain.cancel"}}</a>
</div>
</div>
</div>
</div>
</div>
<script>
window._PageType = "submit";
window._Image = {{.Image}};
window._ImageType = {{.Type}};
window._ImageComputeResource = {{.ComputeResource}};
window._PageFrom = "submit";
window._PageSubmitLink = {{$.Link}};
window._PageRepoLink = {{$.RepoLink}};
</script>
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-images-submit.css?v={{MD5 AppVer}}" />
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-images-submit.js?v={{MD5 AppVer}}"></script>
</div>

<!-- 确认模态框 -->
<div>
<div class="ui modal image_confirm_submit">
<div class="header">{{.i18n.Tr "repo.submit_image"}}</div>
<div class="content text red center">
<p><i class="exclamation icon"></i>{{.i18n.Tr "repo.image_overwrite"}}</p>
</div>
<div class="actions">
<button class="ui deny small button">{{.i18n.Tr "cloudbrain.operate_cancel"}}</button>
<button class="ui green small approve button">{{.i18n.Tr "cloudbrain.operate_confirm"}}</button>
</div>
</div>
</div>
{{template "base/footer" .}}

+ 1
- 0
templates/repo/grampus/trainjob/iluvatar-gpgpu/new.tmpl View File

@@ -0,0 +1 @@
{{ template "repo/cloudbrain/cloudbraincreate" .}}

+ 0
- 6
templates/reward/point/rule.tmpl View File

@@ -104,12 +104,6 @@
<td class="t-center point">-</td>
<td class="t-center"><span class="typ">累计</span>积分获取上限<span class="limit"> - </span></td>
<td>首次更换头像,获得积分。</td>
</tr>
<tr key="TaskInviteFriendRegister">
<td class="t-center">邀请好友</td>
<td class="t-center point">-</td>
<td class="t-center"><span class="typ">累计</span>积分获取上限<span class="limit"> - </span></td>
<td>邀请好友获得积分。</td>
</tr>
<tr key="TaskInviteFriendRegister">
<td class="t-center">邀请好友</td>


+ 9
- 0
templates/user/dashboard/feeds.tmpl View File

@@ -1,3 +1,4 @@
<script>var Feeds={{.Feeds}};</script>
{{range .Feeds}}
<div class="news">
<div class="ui left">
@@ -191,6 +192,12 @@
{{$.i18n.Tr "action.task_c2ent_mlutrainjob" .GetRepoLink (Printf "%d" .Cloudbrain.ID) .RefName | Str2html}}
{{else}}
{{$.i18n.Tr "action.task_c2ent_mlutrainjob" "" "" "" | Str2html}}<span style="">{{.RefName}}{{$.i18n.Tr "repo.issues.deleted_milestone"}}</span>
{{end}}
{{else if eq .GetOpType 53}}
{{if .Cloudbrain}}
{{$.i18n.Tr "action.task_c2net_gpgpu_iluvatar_trainjob" .GetRepoLink (Printf "%d" .Cloudbrain.ID) .RefName | Str2html}}
{{else}}
{{$.i18n.Tr "action.task_c2net_gpgpu_iluvatar_trainjob" "" "" "" | Str2html}}<span style="">{{.RefName}}{{$.i18n.Tr "repo.issues.deleted_milestone"}}</span>
{{end}}
{{else if eq .GetOpType 45}}
{{$.i18n.Tr "action.task_c2ent_onlineinferjob" .GetRepoLink .Content .RefName | Str2html}}
@@ -261,6 +268,8 @@
<span class="text grey"><i class="ri-haze-2-line icon big"></i></span>
{{else if eq .GetOpType 51}}
<span class="text grey"><i class="ri-haze-2-line icon big"></i></span>
{{else if eq .GetOpType 53}}
<span class="text grey"><i class="ri-voice-recognition-line icon big"></i></span>
{{else if eq .GetOpType 29}}
<span class="text grey"><i class="ri-vip-crown-line icon big"></i></span>
{{else if eq .GetOpType 30}}


+ 0
- 1
vendor/gitea.com/macaron/csrf/csrf.go View File

@@ -222,7 +222,6 @@ func Generate(options ...Options) macaron.Handler {
needsNew = true
}
}

if needsNew {
// FIXME: actionId.
x.Token = GenerateToken(x.Secret, x.ID, "POST")


+ 0
- 1
vendor/gitea.com/macaron/csrf/xsrf.go View File

@@ -90,7 +90,6 @@ func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool {
}

expected := generateTokenAtTime(key, userID, actionID, issueTime)

// Check that the token matches the expected value.
// Use constant time comparison to avoid timing attacks.
return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1


+ 19
- 0
vendor/github.com/Jeffail/tunny/LICENSE View File

@@ -0,0 +1,19 @@
Copyright (c) 2014 Ashley Jeffs

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

+ 134
- 0
vendor/github.com/Jeffail/tunny/README.md View File

@@ -0,0 +1,134 @@
![Tunny](tunny_logo.png "Tunny")

[![godoc for Jeffail/tunny][1]][2]
[![goreportcard for Jeffail/tunny][3]][4]

Tunny is a Golang library for spawning and managing a goroutine pool, allowing
you to limit work coming from any number of goroutines with a synchronous API.

A fixed goroutine pool is helpful when you have work coming from an arbitrary
number of asynchronous sources, but a limited capacity for parallel processing.
For example, when processing jobs from HTTP requests that are CPU heavy you can
create a pool with a size that matches your CPU count.

## Install

``` sh
go get github.com/Jeffail/tunny
```

Or, using dep:

``` sh
dep ensure -add github.com/Jeffail/tunny
```

## Use

For most cases your heavy work can be expressed in a simple `func()`, where you
can use `NewFunc`. Let's see how this looks using our HTTP requests to CPU count
example:

``` go
package main

import (
"io/ioutil"
"net/http"
"runtime"

"github.com/Jeffail/tunny"
)

func main() {
numCPUs := runtime.NumCPU()

pool := tunny.NewFunc(numCPUs, func(payload interface{}) interface{} {
var result []byte

// TODO: Something CPU heavy with payload

return result
})
defer pool.Close()

http.HandleFunc("/work", func(w http.ResponseWriter, r *http.Request) {
input, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "Internal error", http.StatusInternalServerError)
}
defer r.Body.Close()

// Funnel this work into our pool. This call is synchronous and will
// block until the job is completed.
result := pool.Process(input)

w.Write(result.([]byte))
})

http.ListenAndServe(":8080", nil)
}
```

Tunny also supports timeouts. You can replace the `Process` call above to the
following:

``` go
result, err := pool.ProcessTimed(input, time.Second*5)
if err == tunny.ErrJobTimedOut {
http.Error(w, "Request timed out", http.StatusRequestTimeout)
}
```

You can also use the context from the request (or any other context) to handle timeouts and deadlines. Simply replace the `Process` call to the following:

``` go
result, err := pool.ProcessCtx(r.Context(), input)
if err == context.DeadlineExceeded {
http.Error(w, "Request timed out", http.StatusRequestTimeout)
}
```

## Changing Pool Size

The size of a Tunny pool can be changed at any time with `SetSize(int)`:

``` go
pool.SetSize(10) // 10 goroutines
pool.SetSize(100) // 100 goroutines
```

This is safe to perform from any goroutine even if others are still processing.

## Goroutines With State

Sometimes each goroutine within a Tunny pool will require its own managed state.
In this case you should implement [`tunny.Worker`][tunny-worker], which includes
calls for terminating, interrupting (in case a job times out and is no longer
needed) and blocking the next job allocation until a condition is met.

When creating a pool using `Worker` types you will need to provide a constructor
function for spawning your custom implementation:

``` go
pool := tunny.New(poolSize, func() Worker {
// TODO: Any per-goroutine state allocation here.
return newCustomWorker()
})
```

This allows Tunny to create and destroy `Worker` types cleanly when the pool
size is changed.

## Ordering

Backlogged jobs are not guaranteed to be processed in order. Due to the current
implementation of channels and select blocks a stack of backlogged jobs will be
processed as a FIFO queue. However, this behaviour is not part of the spec and
should not be relied upon.

[1]: https://godoc.org/github.com/Jeffail/tunny?status.svg
[2]: http://godoc.org/github.com/Jeffail/tunny
[3]: https://goreportcard.com/badge/github.com/Jeffail/tunny
[4]: https://goreportcard.com/report/Jeffail/tunny
[tunny-worker]: https://godoc.org/github.com/Jeffail/tunny#Worker

+ 309
- 0
vendor/github.com/Jeffail/tunny/tunny.go View File

@@ -0,0 +1,309 @@
// Copyright (c) 2014 Ashley Jeffs
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

package tunny

import (
"context"
"errors"
"sync"
"sync/atomic"
"time"
)

//------------------------------------------------------------------------------

// Errors that are used throughout the Tunny API.
var (
ErrPoolNotRunning = errors.New("the pool is not running")
ErrJobNotFunc = errors.New("generic worker not given a func()")
ErrWorkerClosed = errors.New("worker was closed")
ErrJobTimedOut = errors.New("job request timed out")
)

// Worker is an interface representing a Tunny working agent. It will be used to
// block a calling goroutine until ready to process a job, process that job
// synchronously, interrupt its own process call when jobs are abandoned, and
// clean up its resources when being removed from the pool.
//
// Each of these duties are implemented as a single method and can be averted
// when not needed by simply implementing an empty func.
type Worker interface {
// Process will synchronously perform a job and return the result.
Process(interface{}) interface{}

// BlockUntilReady is called before each job is processed and must block the
// calling goroutine until the Worker is ready to process the next job.
BlockUntilReady()

// Interrupt is called when a job is cancelled. The worker is responsible
// for unblocking the Process implementation.
Interrupt()

// Terminate is called when a Worker is removed from the processing pool
// and is responsible for cleaning up any held resources.
Terminate()
}

//------------------------------------------------------------------------------

// closureWorker is a minimal Worker implementation that simply wraps a
// func(interface{}) interface{}
type closureWorker struct {
processor func(interface{}) interface{}
}

func (w *closureWorker) Process(payload interface{}) interface{} {
return w.processor(payload)
}

func (w *closureWorker) BlockUntilReady() {}
func (w *closureWorker) Interrupt() {}
func (w *closureWorker) Terminate() {}

//------------------------------------------------------------------------------

// callbackWorker is a minimal Worker implementation that attempts to cast
// each job into func() and either calls it if successful or returns
// ErrJobNotFunc.
type callbackWorker struct{}

func (w *callbackWorker) Process(payload interface{}) interface{} {
f, ok := payload.(func())
if !ok {
return ErrJobNotFunc
}
f()
return nil
}

func (w *callbackWorker) BlockUntilReady() {}
func (w *callbackWorker) Interrupt() {}
func (w *callbackWorker) Terminate() {}

//------------------------------------------------------------------------------

// Pool is a struct that manages a collection of workers, each with their own
// goroutine. The Pool can initialize, expand, compress and close the workers,
// as well as processing jobs with the workers synchronously.
type Pool struct {
queuedJobs int64

ctor func() Worker
workers []*workerWrapper
reqChan chan workRequest

workerMut sync.Mutex
}

// New creates a new Pool of workers that starts with n workers. You must
// provide a constructor function that creates new Worker types and when you
// change the size of the pool the constructor will be called to create each new
// Worker.
func New(n int, ctor func() Worker) *Pool {
p := &Pool{
ctor: ctor,
reqChan: make(chan workRequest),
}
p.SetSize(n)

return p
}

// NewFunc creates a new Pool of workers where each worker will process using
// the provided func.
func NewFunc(n int, f func(interface{}) interface{}) *Pool {
return New(n, func() Worker {
return &closureWorker{
processor: f,
}
})
}

// NewCallback creates a new Pool of workers where workers cast the job payload
// into a func() and runs it, or returns ErrNotFunc if the cast failed.
func NewCallback(n int) *Pool {
return New(n, func() Worker {
return &callbackWorker{}
})
}

//------------------------------------------------------------------------------

// Process will use the Pool to process a payload and synchronously return the
// result. Process can be called safely by any goroutines, but will panic if the
// Pool has been stopped.
func (p *Pool) Process(payload interface{}) interface{} {
atomic.AddInt64(&p.queuedJobs, 1)

request, open := <-p.reqChan
if !open {
panic(ErrPoolNotRunning)
}

request.jobChan <- payload

payload, open = <-request.retChan
if !open {
panic(ErrWorkerClosed)
}

atomic.AddInt64(&p.queuedJobs, -1)
return payload
}

// ProcessTimed will use the Pool to process a payload and synchronously return
// the result. If the timeout occurs before the job has finished the worker will
// be interrupted and ErrJobTimedOut will be returned. ProcessTimed can be
// called safely by any goroutines.
func (p *Pool) ProcessTimed(
payload interface{},
timeout time.Duration,
) (interface{}, error) {
atomic.AddInt64(&p.queuedJobs, 1)
defer atomic.AddInt64(&p.queuedJobs, -1)

tout := time.NewTimer(timeout)

var request workRequest
var open bool

select {
case request, open = <-p.reqChan:
if !open {
return nil, ErrPoolNotRunning
}
case <-tout.C:
return nil, ErrJobTimedOut
}

select {
case request.jobChan <- payload:
case <-tout.C:
request.interruptFunc()
return nil, ErrJobTimedOut
}

select {
case payload, open = <-request.retChan:
if !open {
return nil, ErrWorkerClosed
}
case <-tout.C:
request.interruptFunc()
return nil, ErrJobTimedOut
}

tout.Stop()
return payload, nil
}

// ProcessCtx will use the Pool to process a payload and synchronously return
// the result. If the context cancels before the job has finished the worker will
// be interrupted and ErrJobTimedOut will be returned. ProcessCtx can be
// called safely by any goroutines.
func (p *Pool) ProcessCtx(ctx context.Context, payload interface{}) (interface{}, error) {
atomic.AddInt64(&p.queuedJobs, 1)
defer atomic.AddInt64(&p.queuedJobs, -1)

var request workRequest
var open bool

select {
case request, open = <-p.reqChan:
if !open {
return nil, ErrPoolNotRunning
}
case <-ctx.Done():
return nil, ctx.Err()
}

select {
case request.jobChan <- payload:
case <-ctx.Done():
request.interruptFunc()
return nil, ctx.Err()
}

select {
case payload, open = <-request.retChan:
if !open {
return nil, ErrWorkerClosed
}
case <-ctx.Done():
request.interruptFunc()
return nil, ctx.Err()
}

return payload, nil
}

// QueueLength returns the current count of pending queued jobs.
func (p *Pool) QueueLength() int64 {
return atomic.LoadInt64(&p.queuedJobs)
}

// SetSize changes the total number of workers in the Pool. This can be called
// by any goroutine at any time unless the Pool has been stopped, in which case
// a panic will occur.
func (p *Pool) SetSize(n int) {
p.workerMut.Lock()
defer p.workerMut.Unlock()

lWorkers := len(p.workers)
if lWorkers == n {
return
}

// Add extra workers if N > len(workers)
for i := lWorkers; i < n; i++ {
p.workers = append(p.workers, newWorkerWrapper(p.reqChan, p.ctor()))
}

// Asynchronously stop all workers > N
for i := n; i < lWorkers; i++ {
p.workers[i].stop()
}

// Synchronously wait for all workers > N to stop
for i := n; i < lWorkers; i++ {
p.workers[i].join()
p.workers[i] = nil
}

// Remove stopped workers from slice
p.workers = p.workers[:n]
}

// GetSize returns the current size of the pool.
func (p *Pool) GetSize() int {
p.workerMut.Lock()
defer p.workerMut.Unlock()

return len(p.workers)
}

// Close will terminate all workers and close the job channel of this Pool.
func (p *Pool) Close() {
p.SetSize(0)
close(p.reqChan)
}

//------------------------------------------------------------------------------

BIN
vendor/github.com/Jeffail/tunny/tunny_logo.png View File

Before After
Width: 1400  |  Height: 350  |  Size: 52 KiB

+ 126
- 0
vendor/github.com/Jeffail/tunny/worker.go View File

@@ -0,0 +1,126 @@
// Copyright (c) 2014 Ashley Jeffs
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

package tunny

//------------------------------------------------------------------------------

// workRequest is a struct containing context representing a workers intention
// to receive a work payload.
type workRequest struct {
// jobChan is used to send the payload to this worker.
jobChan chan<- interface{}

// retChan is used to read the result from this worker.
retChan <-chan interface{}

// interruptFunc can be called to cancel a running job. When called it is no
// longer necessary to read from retChan.
interruptFunc func()
}

//------------------------------------------------------------------------------

// workerWrapper takes a Worker implementation and wraps it within a goroutine
// and channel arrangement. The workerWrapper is responsible for managing the
// lifetime of both the Worker and the goroutine.
type workerWrapper struct {
worker Worker
interruptChan chan struct{}

// reqChan is NOT owned by this type, it is used to send requests for work.
reqChan chan<- workRequest

// closeChan can be closed in order to cleanly shutdown this worker.
closeChan chan struct{}

// closedChan is closed by the run() goroutine when it exits.
closedChan chan struct{}
}

func newWorkerWrapper(
reqChan chan<- workRequest,
worker Worker,
) *workerWrapper {
w := workerWrapper{
worker: worker,
interruptChan: make(chan struct{}),
reqChan: reqChan,
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
}

go w.run()

return &w
}

//------------------------------------------------------------------------------

func (w *workerWrapper) interrupt() {
close(w.interruptChan)
w.worker.Interrupt()
}

func (w *workerWrapper) run() {
jobChan, retChan := make(chan interface{}), make(chan interface{})
defer func() {
w.worker.Terminate()
close(retChan)
close(w.closedChan)
}()

for {
// NOTE: Blocking here will prevent the worker from closing down.
w.worker.BlockUntilReady()
select {
case w.reqChan <- workRequest{
jobChan: jobChan,
retChan: retChan,
interruptFunc: w.interrupt,
}:
select {
case payload := <-jobChan:
result := w.worker.Process(payload)
select {
case retChan <- result:
case <-w.interruptChan:
w.interruptChan = make(chan struct{})
}
case _, _ = <-w.interruptChan:
w.interruptChan = make(chan struct{})
}
case <-w.closeChan:
return
}
}
}

//------------------------------------------------------------------------------

func (w *workerWrapper) stop() {
close(w.closeChan)
}

func (w *workerWrapper) join() {
<-w.closedChan
}

//------------------------------------------------------------------------------

+ 0
- 27
vendor/golang.org/x/exp/LICENSE View File

@@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0
- 22
vendor/golang.org/x/exp/PATENTS View File

@@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)

"This implementation" means the copyrightable works distributed by
Google as part of the Go project.

Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

+ 0
- 50
vendor/golang.org/x/exp/constraints/constraints.go View File

@@ -1,50 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package constraints defines a set of useful constraints to be used
// with type parameters.
package constraints

// Signed is a constraint that permits any signed integer type.
// If future releases of Go add new predeclared signed integer types,
// this constraint will be modified to include them.
type Signed interface {
~int | ~int8 | ~int16 | ~int32 | ~int64
}

// Unsigned is a constraint that permits any unsigned integer type.
// If future releases of Go add new predeclared unsigned integer types,
// this constraint will be modified to include them.
type Unsigned interface {
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}

// Integer is a constraint that permits any integer type.
// If future releases of Go add new predeclared integer types,
// this constraint will be modified to include them.
type Integer interface {
Signed | Unsigned
}

// Float is a constraint that permits any floating-point type.
// If future releases of Go add new predeclared floating-point types,
// this constraint will be modified to include them.
type Float interface {
~float32 | ~float64
}

// Complex is a constraint that permits any complex numeric type.
// If future releases of Go add new predeclared complex numeric types,
// this constraint will be modified to include them.
type Complex interface {
~complex64 | ~complex128
}

// Ordered is a constraint that permits any ordered type: any type
// that supports the operators < <= >= >.
// If future releases of Go add new ordered types,
// this constraint will be modified to include them.
type Ordered interface {
Integer | Float | ~string
}

+ 0
- 44
vendor/golang.org/x/exp/slices/cmp.go View File

@@ -1,44 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package slices

import "golang.org/x/exp/constraints"

// min is a version of the predeclared function from the Go 1.21 release.
func min[T constraints.Ordered](a, b T) T {
if a < b || isNaN(a) {
return a
}
return b
}

// max is a version of the predeclared function from the Go 1.21 release.
func max[T constraints.Ordered](a, b T) T {
if a > b || isNaN(a) {
return a
}
return b
}

// cmpLess is a copy of cmp.Less from the Go 1.21 release.
func cmpLess[T constraints.Ordered](x, y T) bool {
return (isNaN(x) && !isNaN(y)) || x < y
}

// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
func cmpCompare[T constraints.Ordered](x, y T) int {
xNaN := isNaN(x)
yNaN := isNaN(y)
if xNaN && yNaN {
return 0
}
if xNaN || x < y {
return -1
}
if yNaN || x > y {
return +1
}
return 0
}

+ 0
- 499
vendor/golang.org/x/exp/slices/slices.go View File

@@ -1,499 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package slices defines various functions useful with slices of any type.
package slices

import (
"unsafe"

"golang.org/x/exp/constraints"
)

// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) {
return false
}
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
}

// EqualFunc reports whether two slices are equal using an equality
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
for i, v1 := range s1 {
v2 := s2[i]
if !eq(v1, v2) {
return false
}
}
return true
}

// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
// of elements. The elements are compared sequentially, starting at index 0,
// until one element is not equal to the other.
// The result of comparing the first non-matching elements is returned.
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
for i, v1 := range s1 {
if i >= len(s2) {
return +1
}
v2 := s2[i]
if c := cmpCompare(v1, v2); c != 0 {
return c
}
}
if len(s1) < len(s2) {
return -1
}
return 0
}

// CompareFunc is like [Compare] but uses a custom comparison function on each
// pair of elements.
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
for i, v1 := range s1 {
if i >= len(s2) {
return +1
}
v2 := s2[i]
if c := cmp(v1, v2); c != 0 {
return c
}
}
if len(s1) < len(s2) {
return -1
}
return 0
}

// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[S ~[]E, E comparable](s S, v E) int {
for i := range s {
if v == s[i] {
return i
}
}
return -1
}

// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
}
}
return -1
}

// Contains reports whether v is present in s.
func Contains[S ~[]E, E comparable](s S, v E) bool {
return Index(s, v) >= 0
}

// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
return IndexFunc(s, f) >= 0
}

// Insert inserts the values v... into s at index i,
// returning the modified slice.
// The elements at s[i:] are shifted up to make room.
// In the returned slice r, r[i] == v[0],
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
m := len(v)
if m == 0 {
return s
}
n := len(s)
if i == n {
return append(s, v...)
}
if n+m > cap(s) {
// Use append rather than make so that we bump the size of
// the slice up to the next storage class.
// This is what Grow does but we don't call Grow because
// that might copy the values twice.
s2 := append(s[:i], make(S, n+m-i)...)
copy(s2[i:], v)
copy(s2[i+m:], s[i:])
return s2
}
s = s[:n+m]

// before:
// s: aaaaaaaabbbbccccccccdddd
// ^ ^ ^ ^
// i i+m n n+m
// after:
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
//
// a are the values that don't move in s.
// v are the values copied in from v.
// b and c are the values from s that are shifted up in index.
// d are the values that get overwritten, never to be seen again.

if !overlaps(v, s[i+m:]) {
// Easy case - v does not overlap either the c or d regions.
// (It might be in some of a or b, or elsewhere entirely.)
// The data we copy up doesn't write to v at all, so just do it.

copy(s[i+m:], s[i:])

// Now we have
// s: aaaaaaaabbbbbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// Note the b values are duplicated.

copy(s[i:], v)

// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}

// The hard case - v overlaps c or d. We can't just shift up
// the data because we'd move or clobber the values we're trying
// to insert.
// So instead, write v on top of d, then rotate.
copy(s[n:], v)

// Now we have
// s: aaaaaaaabbbbccccccccvvvv
// ^ ^ ^ ^
// i i+m n n+m

rotateRight(s[i:], m)

// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}

// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
// elements contain pointers you might consider zeroing those elements so that
// objects they reference can be garbage collected.
func Delete[S ~[]E, E any](s S, i, j int) S {
_ = s[i:j] // bounds check

return append(s[:i], s[j:]...)
}

// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// When DeleteFunc removes m elements, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage
// collected.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
return s[:i]
}

// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice

if i == j {
return Insert(s, i, v...)
}
if j == len(s) {
return append(s[:i], v...)
}

tot := len(s[:i]) + len(v) + len(s[j:])
if tot > cap(s) {
// Too big to fit, allocate and copy over.
s2 := append(s[:i], make(S, tot-i)...) // See Insert
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
}

r := s[:tot]

if i+len(v) <= j {
// Easy, as v fits in the deleted portion.
copy(r[i:], v)
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
return r
}

// We are expanding (v is bigger than j-i).
// The situation is something like this:
// (example has i=4,j=8,len(s)=16,len(v)=6)
// s: aaaaxxxxbbbbbbbbyy
// ^ ^ ^ ^
// i j len(s) tot
// a: prefix of s
// x: deleted range
// b: more of s
// y: area to expand into

if !overlaps(r[i+len(v):], v) {
// Easy, as v is not clobbered by the first copy.
copy(r[i+len(v):], s[j:])
copy(r[i:], v)
return r
}

// This is a situation where we don't have a single place to which
// we can copy v. Parts of it need to go to two different places.
// We want to copy the prefix of v into y and the suffix into x, then
// rotate |y| spots to the right.
//
// v[2:] v[:2]
// | |
// s: aaaavvvvbbbbbbbbvv
// ^ ^ ^ ^
// i j len(s) tot
//
// If either of those two destinations don't alias v, then we're good.
y := len(v) - (j - i) // length of y portion

if !overlaps(r[i:j], v) {
copy(r[i:j], v[y:])
copy(r[len(s):], v[:y])
rotateRight(r[i:], y)
return r
}
if !overlaps(r[len(s):], v) {
copy(r[len(s):], v[:y])
copy(r[i:j], v[y:])
rotateRight(r[i:], y)
return r
}

// Now we know that v overlaps both x and y.
// That means that the entirety of b is *inside* v.
// So we don't need to preserve b at all; instead we
// can copy v first, then copy the b part of v out of
// v to the right destination.
k := startIdx(v, s[j:])
copy(r[i:], v)
copy(r[i+len(v):], r[i+k:])
return r
}

// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
// Preserve nil in case it matters.
if s == nil {
return nil
}
return append(S([]E{}), s...)
}

// Compact replaces consecutive runs of equal elements with a single copy.
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// When Compact discards m elements in total, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage collected.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if s[k] != s[k-1] {
if i != k {
s[i] = s[k]
}
i++
}
}
return s[:i]
}

// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if !eq(s[k], s[k-1]) {
if i != k {
s[i] = s[k]
}
i++
}
}
return s[:i]
}

// Grow increases the slice's capacity, if necessary, to guarantee space for
// another n elements. After Grow(n), at least n elements can be appended
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
func Grow[S ~[]E, E any](s S, n int) S {
if n < 0 {
panic("cannot be negative")
}
if n -= cap(s) - len(s); n > 0 {
// TODO(https://go.dev/issue/53888): Make using []E instead of S
// to workaround a compiler bug where the runtime.growslice optimization
// does not take effect. Revert when the compiler is fixed.
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
}
return s
}

// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}

// Rotation algorithm explanation:
//
// rotate left by 2
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join first parts
// 89234567 01
// recursively rotate first left part by 2
// 23456789 01
// join at the end
// 2345678901
//
// rotate left by 8
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join last parts
// 89 23456701
// recursively rotate second part left by 6
// 89 01234567
// join at the end
// 8901234567

// TODO: There are other rotate algorithms.
// This algorithm has the desirable property that it moves each element exactly twice.
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.

// rotateLeft rotates b left by n spaces.
// s_final[i] = s_orig[i+r], wrapping around.
func rotateLeft[E any](s []E, r int) {
for r != 0 && r != len(s) {
if r*2 <= len(s) {
swap(s[:r], s[len(s)-r:])
s = s[:len(s)-r]
} else {
swap(s[:len(s)-r], s[r:])
s, r = s[len(s)-r:], r*2-len(s)
}
}
}
func rotateRight[E any](s []E, r int) {
rotateLeft(s, len(s)-r)
}

// swap swaps the contents of x and y. x and y must be equal length and disjoint.
func swap[E any](x, y []E) {
for i := 0; i < len(x); i++ {
x[i], y[i] = y[i], x[i]
}
}

// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
func overlaps[E any](a, b []E) bool {
if len(a) == 0 || len(b) == 0 {
return false
}
elemSize := unsafe.Sizeof(a[0])
if elemSize == 0 {
return false
}
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
// Also see crypto/internal/alias/alias.go:AnyOverlap
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
}

// startIdx returns the index in haystack where the needle starts.
// prerequisite: the needle must be aliased entirely inside the haystack.
func startIdx[E any](haystack, needle []E) int {
p := &needle[0]
for i := range haystack {
if p == &haystack[i] {
return i
}
}
// TODO: what if the overlap is by a non-integral number of Es?
panic("needle not found")
}

// Reverse reverses the elements of the slice in place.
func Reverse[S ~[]E, E any](s S) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}

+ 0
- 195
vendor/golang.org/x/exp/slices/sort.go View File

@@ -1,195 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp

package slices

import (
"math/bits"

"golang.org/x/exp/constraints"
)

// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
func Sort[S ~[]E, E constraints.Ordered](x S) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
}

// SortFunc sorts the slice x in ascending order as determined by the cmp
// function. This sort is not guaranteed to be stable.
// cmp(a, b) should return a negative number when a < b, a positive number when
// a > b and zero when a == b.
//
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
}

// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
stableCmpFunc(x, len(x), cmp)
}

// IsSorted reports whether x is sorted in ascending order.
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
for i := len(x) - 1; i > 0; i-- {
if cmpLess(x[i], x[i-1]) {
return false
}
}
return true
}

// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
for i := len(x) - 1; i > 0; i-- {
if cmp(x[i], x[i-1]) < 0 {
return false
}
}
return true
}

// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Min[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Min: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = min(m, x[i])
}
return m
}

// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MinFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) < 0 {
m = x[i]
}
}
return m
}

// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Max[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Max: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = max(m, x[i])
}
return m
}

// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MaxFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) > 0 {
m = x[i]
}
}
return m
}

// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// Inlining is faster than calling BinarySearchFunc with a lambda.
n := len(x)
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmpLess(x[h], target) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
}

// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
// function. The slice must be sorted in increasing order, where "increasing"
// is defined by cmp. cmp should return 0 if the slice element matches
// the target, a negative number if the slice element precedes the target,
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
n := len(x)
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmp(x[h], target) < 0 {
i = h + 1 // preserves cmp(x[i - 1], target) < 0
} else {
j = h // preserves cmp(x[j], target) >= 0
}
}
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
return i, i < n && cmp(x[i], target) == 0
}

type sortedHint int // hint for pdqsort when choosing the pivot

const (
unknownHint sortedHint = iota
increasingHint
decreasingHint
)

// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
type xorshift uint64

func (r *xorshift) Next() uint64 {
*r ^= *r << 13
*r ^= *r >> 17
*r ^= *r << 5
return uint64(*r)
}

func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}

// isNaN reports whether x is a NaN without requiring the math package.
// This will always return false if T is not floating-point.
func isNaN[T constraints.Ordered](x T) bool {
return x != x
}

+ 0
- 479
vendor/golang.org/x/exp/slices/zsortanyfunc.go View File

@@ -1,479 +0,0 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.

// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package slices

// insertionSortCmpFunc sorts data[a:b] using insertion sort.
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}

// siftDownCmpFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
child++
}
if !(cmp(data[first+root], data[first+child]) < 0) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}

func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
first := a
lo := 0
hi := b - a

// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownCmpFunc(data, i, hi, first, cmp)
}

// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownCmpFunc(data, lo, i, first, cmp)
}
}

// pdqsortCmpFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
const maxInsertion = 12

var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)

for {
length := b - a

if length <= maxInsertion {
insertionSortCmpFunc(data, a, b, cmp)
return
}

// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortCmpFunc(data, a, b, cmp)
return
}

// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsCmpFunc(data, a, b, cmp)
limit--
}

pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
if hint == decreasingHint {
reverseRangeCmpFunc(data, a, b, cmp)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}

// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortCmpFunc(data, a, b, cmp) {
return
}
}

// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
a = mid
continue
}

mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
wasPartitioned = alreadyPartitioned

leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortCmpFunc(data, a, mid, limit, cmp)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
b = mid
}
}
}

// partitionCmpFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--

for {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}

// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for {
for i <= j && !(cmp(data[a], data[i]) < 0) {
i++
}
for i <= j && (cmp(data[a], data[j]) < 0) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}

// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !(cmp(data[i], data[i-1]) < 0) {
i++
}

if i == b {
return true
}

if b-a < shortestShifting {
return false
}

data[i], data[i-1] = data[i-1], data[i]

// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}

// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)

for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}

// choosePivotCmpFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)

l := b - a

var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)

if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
}
// Find the median among i, j, k and stores it into j.
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
}

switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}

// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
if cmp(data[b], data[a]) < 0 {
*swaps++
return b, a
}
return a, b
}

// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
a, b = order2CmpFunc(data, a, b, swaps, cmp)
b, c = order2CmpFunc(data, b, c, swaps, cmp)
a, b = order2CmpFunc(data, a, b, swaps, cmp)
return b
}

// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
}

func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}

func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}

func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortCmpFunc(data, a, b, cmp)
a = b
b += blockSize
}
insertionSortCmpFunc(data, a, n, cmp)

for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeCmpFunc(data, a, m, n, cmp)
}
blockSize *= 2
}
}

// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if cmp(data[h], data[a]) < 0 {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}

// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !(cmp(data[m], data[h]) < 0) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}

mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1

for start < r {
c := int(uint(start+r) >> 1)
if !(cmp(data[p-c], data[c]) < 0) {
start = c + 1
} else {
r = c
}
}

end := n - start
if start < m && m < end {
rotateCmpFunc(data, start, m, end, cmp)
}
if a < start && start < mid {
symMergeCmpFunc(data, a, start, mid, cmp)
}
if mid < end && end < b {
symMergeCmpFunc(data, mid, end, b, cmp)
}
}

// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
i := m - a
j := b - m

for i != j {
if i > j {
swapRangeCmpFunc(data, m-i, m, j, cmp)
i -= j
} else {
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
j -= i
}
}
// i == j
swapRangeCmpFunc(data, m-i, m, i, cmp)
}

+ 0
- 481
vendor/golang.org/x/exp/slices/zsortordered.go View File

@@ -1,481 +0,0 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.

// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package slices

import "golang.org/x/exp/constraints"

// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}

// siftDownOrdered implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
child++
}
if !cmpLess(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}

func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
first := a
lo := 0
hi := b - a

// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownOrdered(data, i, hi, first)
}

// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownOrdered(data, lo, i, first)
}
}

// pdqsortOrdered sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
const maxInsertion = 12

var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)

for {
length := b - a

if length <= maxInsertion {
insertionSortOrdered(data, a, b)
return
}

// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortOrdered(data, a, b)
return
}

// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsOrdered(data, a, b)
limit--
}

pivot, hint := choosePivotOrdered(data, a, b)
if hint == decreasingHint {
reverseRangeOrdered(data, a, b)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}

// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortOrdered(data, a, b) {
return
}
}

// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
}

mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
wasPartitioned = alreadyPartitioned

leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortOrdered(data, a, mid, limit)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortOrdered(data, mid+1, b, limit)
b = mid
}
}
}

// partitionOrdered does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--

for {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}

// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for {
for i <= j && !cmpLess(data[a], data[i]) {
i++
}
for i <= j && cmpLess(data[a], data[j]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}

// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !cmpLess(data[i], data[i-1]) {
i++
}

if i == b {
return true
}

if b-a < shortestShifting {
return false
}

data[i], data[i-1] = data[i-1], data[i]

// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}

// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)

for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}

// choosePivotOrdered chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)

l := b - a

var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)

if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentOrdered(data, i, &swaps)
j = medianAdjacentOrdered(data, j, &swaps)
k = medianAdjacentOrdered(data, k, &swaps)
}
// Find the median among i, j, k and stores it into j.
j = medianOrdered(data, i, j, k, &swaps)
}

switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}

// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
if cmpLess(data[b], data[a]) {
*swaps++
return b, a
}
return a, b
}

// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
a, b = order2Ordered(data, a, b, swaps)
b, c = order2Ordered(data, b, c, swaps)
a, b = order2Ordered(data, a, b, swaps)
return b
}

// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
return medianOrdered(data, a-1, a, a+1, swaps)
}

func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}

func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}

func stableOrdered[E constraints.Ordered](data []E, n int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortOrdered(data, a, b)
a = b
b += blockSize
}
insertionSortOrdered(data, a, n)

for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeOrdered(data, a, a+blockSize, b)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeOrdered(data, a, m, n)
}
blockSize *= 2
}
}

// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if cmpLess(data[h], data[a]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}

// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !cmpLess(data[m], data[h]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}

mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1

for start < r {
c := int(uint(start+r) >> 1)
if !cmpLess(data[p-c], data[c]) {
start = c + 1
} else {
r = c
}
}

end := n - start
if start < m && m < end {
rotateOrdered(data, start, m, end)
}
if a < start && start < mid {
symMergeOrdered(data, a, start, mid)
}
if mid < end && end < b {
symMergeOrdered(data, mid, end, b)
}
}

// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
i := m - a
j := b - m

for i != j {
if i > j {
swapRangeOrdered(data, m-i, m, j)
i -= j
} else {
swapRangeOrdered(data, m-i, m+j-i, i)
j -= i
}
}
// i == j
swapRangeOrdered(data, m-i, m, i)
}

+ 3
- 2
vendor/modules.txt View File

@@ -61,6 +61,9 @@ github.com/360EntSecGroup-Skylar/excelize/v2
# github.com/BurntSushi/toml v0.3.1
## explicit
github.com/BurntSushi/toml
# github.com/Jeffail/tunny v0.1.4
## explicit; go 1.13
github.com/Jeffail/tunny
# github.com/Microsoft/go-winio v0.5.2
## explicit; go 1.13
github.com/Microsoft/go-winio
@@ -1145,8 +1148,6 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
# golang.org/x/exp v0.0.0-20231127185646-65229373498e
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8
## explicit; go 1.12
golang.org/x/image/bmp


+ 115
- 34
web_src/js/components/images/adminImages.vue View File

@@ -15,18 +15,7 @@
<div class="ui ten wide column" style="margin: 1rem 0;">
<el-checkbox v-model="checked" style="padding: 0.5rem 1rem;">{{
$i18n['cloudeBrainMirror']['platform_recommendations'] }}</el-checkbox>
<!-- <el-dropdown @command="handleCommand" trigger="click"
style="border: 1px solid rgba(34,36,38,.15);border-radius: 4px;padding: 0.5rem 1rem;">
<span class="el-dropdown-link">
{{dropdownPrivate}}<i class="el-icon-caret-bottom el-icon--right"></i>
</span>
<el-dropdown-menu slot="dropdown">
<el-dropdown-item :command="{label:$i18n['all'],private:''}">{{$i18n['all']}}</el-dropdown-item>
<el-dropdown-item :command="{label:$i18n['cloudeBrainMirror']['public'],private:false}">{{$i18n['cloudeBrainMirror']['public']}}</el-dropdown-item>
<el-dropdown-item :command="{label:$i18n['cloudeBrainMirror']['private'],private:true}">{{$i18n['cloudeBrainMirror']['private']}}</el-dropdown-item>
</el-dropdown-menu>
</el-dropdown> -->
<el-dropdown @command="handleCommandType" trigger="click"
<!-- <el-dropdown @command="handleCommandType" trigger="click"
style="border: 1px solid rgba(34,36,38,.15);border-radius: 4px;padding: 0.5rem 1rem;">
<span class="el-dropdown-link">
{{ dropdownType }}<i class="el-icon-caret-bottom el-icon--right"></i>
@@ -39,6 +28,28 @@
<el-dropdown-item :command="{ label: $i18n['cloudeBrainMirror']['c2net'], type: 2 }">{{
$i18n['cloudeBrainMirror']['c2net'] }}</el-dropdown-item>
</el-dropdown-menu>
</el-dropdown> -->
<el-dropdown @command="handleCommandComputeResource" trigger="click"
style="border: 1px solid rgba(34,36,38,.15);border-radius: 4px;padding: 0.5rem 1rem;">
<span class="el-dropdown-link">
{{ dropdownComputeResource }}<i class="el-icon-caret-bottom el-icon--right"></i>
</span>
<el-dropdown-menu slot="dropdown">
<el-dropdown-item :command="{ label: $i18n['cloudeBrainMirror']['all_compute_resource'], type: '' }">{{
$i18n['cloudeBrainMirror']['all_compute_resource'] }}</el-dropdown-item>
<el-dropdown-item :command="{ label: $i18n['computeResourceTitle']['GPU'], type: 'GPU' }">{{
$i18n['computeResourceTitle']['GPU'] }}</el-dropdown-item>
<el-dropdown-item :command="{ label: $i18n['computeResourceTitle']['GCU'], type: 'GCU' }">{{
$i18n['computeResourceTitle']['GCU'] }}</el-dropdown-item>
<el-dropdown-item :command="{ label: $i18n['computeResourceTitle']['MLU'], type: 'MLU' }">{{
$i18n['computeResourceTitle']['MLU'] }}</el-dropdown-item>
<el-dropdown-item
:command="{ label: $i18n['computeResourceTitle']['ILUVATAR-GPGPU'], type: 'ILUVATAR-GPGPU' }">{{
$i18n['computeResourceTitle']['ILUVATAR-GPGPU'] }}</el-dropdown-item>
<el-dropdown-item
:command="{ label: $i18n['computeResourceTitle']['METAX-GPGPU'], type: 'METAX-GPGPU' }">{{
$i18n['computeResourceTitle']['METAX-GPGPU'] }}</el-dropdown-item>
</el-dropdown-menu>
</el-dropdown>
<el-dropdown @command="handleApplyState" trigger="click"
style="border: 1px solid rgba(34,36,38,.15);border-radius: 4px;padding: 0.5rem 1rem;">
@@ -75,22 +86,24 @@
</el-dropdown-menu>
</el-dropdown>
</div>
<div class="ui six wide column right aligned" style="margin: 1rem 0;">
<a class="ui blue small button" href="/admin/images/commit_image">{{
<div class="ui six wide column right aligned" style="margin:1rem 0;display: flex;align-items:center;
justify-content:flex-end;">
<el-button size="medium" icon="el-icon-refresh" @click="syncComputerNetwork" v-loading="syncLoading">
{{ $i18n['cloudeBrainMirror']['syncAiNetwork'] }}</el-button>
<a class="ui blue small button" style="margin-left:10px" href="/admin/images/commit_image">{{
$i18n['cloudeBrainMirror']['create_cloud_brain_mirror'] }}</a>
</div>
<div class="ui sixteen wide column" style="padding: 0;overflow-x: auto;">
<el-table :data="tableDataCustom" style="width: 100%;min-width:1700px;" :header-cell-style="tableHeaderStyle">
<el-table-column :label="$i18n['cloudeBrainMirror']['mirror_tag']" min-width="19%" align="left" prop="tag">
<div class="ui sixteen wide column" style="padding: 0;overflow-x:auto;">
<el-table :data="tableDataCustom" style="min-width:100%;" :header-cell-style="tableHeaderStyle">
<el-table-column :label="$i18n['cloudeBrainMirror']['mirror_tag']" width="250px" align="left" prop="tag">
<template slot-scope="scope">
<div style="display: flex;align-items: center;">
<a class="text-over image_title" :title="scope.row.tag">{{ scope.row.tag }}</a>
<!-- <i class="ri-lock-2-line" style="color: #fa8c16;padding: 0 1rem;" v-if="scope.row.isPrivate"></i> -->
<img v-if="scope.row.type == 5" src="/img/jian.svg" style="margin-left: 0.5rem;">
</div>
</template>
</el-table-column>
<el-table-column :label="$i18n['cloudeBrainMirror']['mirror_description']" min-width="26%" align="left"
<el-table-column :label="$i18n['cloudeBrainMirror']['mirror_description']" width="300px" align="left"
prop="description">
<template slot-scope="scope">
<div class="image_desc" :title="scope.row.description">{{ scope.row.description }}
@@ -101,20 +114,48 @@
</div>
</template>
</el-table-column>
<el-table-column prop="cloudbrainType" :label="$i18n['cloudeBrainMirror']['available_clusters']"
min-width="10%" align="center">
<el-table-column :label="$i18n['model_compute_resource']" width="150px" align="left"
prop="compute_resource">
<template slot-scope="scope">
{{ scope.row.cloudbrainType | transformType(vm) }}
<span :title="scope.row.compute_resource">{{ $i18n['computeResourceTitle'][scope.row.compute_resource]
|| scope.row.compute_resource }}</span>
</template>
</el-table-column>
<el-table-column :label="$i18n['cloudeBrainMirror']['framework']" width="150px" align="left"
prop="framework">
<template slot-scope="scope">
{{ scope.row.framework }}<br />{{ scope.row.frameworkVersion }}
</template>
</el-table-column>
<!-- <el-table-column prop="isPrivate" :label="$i18n['cloudeBrainMirror']['state']" min-width="6%" align="center">
<template slot-scope="scope">
<span v-if="scope.row.isPrivate" style="color: rgb(250, 140, 22);">{{$i18n['cloudeBrainMirror']['private']}}</span>
<span v-else style="color: rgb(19, 194, 141);">{{$i18n['cloudeBrainMirror']['public']}}</span>
</template>
</el-table-column> -->
<el-table-column prop="creator" :label="$i18n['cloudeBrainMirror']['creator']" min-width="6%"
align="center">
<el-table-column :label="'Python'" width="100px" align="left" prop="python">
<template slot-scope="scope">
{{ scope.row.pythonVersion }}
</template>
</el-table-column>
<el-table-column :label="'Cuda'" width="100px" align="left" prop="cudaVersion">
<template slot-scope="scope">
{{ scope.row.cudaVersion || '--' }}
</template>
</el-table-column>
<el-table-column :label="$i18n['cloudeBrainMirror']['operationSystem']" width="180px" align="left"
prop="python">
<template slot-scope="scope">
{{ scope.row.operationSystem }}<br />{{ scope.row.operationSystemVersion }}
</template>
</el-table-column>
<el-table-column :label="$i18n['cloudeBrainMirror']['thirdPackages']" width="220px" align="left"
prop="thirdpackages">
<template slot-scope="scope">
<div class="image_desc" :title="scope.row.thirdPackages">{{ scope.row.thirdPackages }}</div>
</template>
</el-table-column>
<!-- <el-table-column prop="cloudbrainType" :label="$i18n['cloudeBrainMirror']['available_clusters']"
width="120px" align="center">
<template slot-scope="scope">
{{ scope.row.cloudbrainType | transformType(vm) }}
</template>
</el-table-column> -->
<el-table-column prop="creator" :label="$i18n['cloudeBrainMirror']['creator']" width="80px" align="center">
<template slot-scope="scope">
<a v-if="scope.row.userName || scope.row.relAvatarLink" :href="'/' + scope.row.userName"
:title="scope.row.userName">
@@ -126,13 +167,13 @@
</template>
</el-table-column>
<el-table-column prop="createdUnix" :label="$i18n['cloudeBrainMirror']['creation_time']" align="center"
min-width="11%">
width="160px">
<template slot-scope="scope">
{{ scope.row.createdUnix | transformTimestamp }}
</template>
</el-table-column>
<el-table-column prop="apply_status" :label="$i18n['cloudeBrainMirror']['approval_status']" min-width="10%"
align="center">
<el-table-column prop="apply_status" :label="$i18n['cloudeBrainMirror']['approval_status']" width="120px"
fixed="right" align="center">
<template slot-scope="scope">
<span v-if="scope.row.apply_status == 0" style="">{{ '--' }}</span>
<div v-if="scope.row.apply_status === 1"
@@ -168,7 +209,8 @@
</div>
</template>
</el-table-column>
<el-table-column align="center" min-width="33%" :label="$i18n['cloudeBrainMirror']['operation']">
<el-table-column align="center" width="400px" :label="$i18n['cloudeBrainMirror']['operation']"
fixed="right">
<template slot-scope="scope">
<div style="display: flex;justify-content: center;align-items: center;">
<div style="display: flex;align-items: center;padding: 0 1rem;" :title="$i18n['citations']">
@@ -253,6 +295,7 @@ export default {
search: '',
dropdownPrivate: '',
dropdownType: '',
dropdownComputeResource: '',
dropdownApplyState: '',
checked: false,
currentPageCustom: 1,
@@ -271,6 +314,8 @@ export default {
reasonDialogTitle: '',
reasonDialogContent: '',
reasonDialogData: null,

syncLoading: false,
};
},
methods: {
@@ -330,6 +375,32 @@ export default {
eidtImage(id) {
location.href = `/image/${id}/imageAdmin?${qs.stringify(this.paramsCustom)}`
},
syncComputerNetwork() {
this.syncLoading = true;
this.$axios.post(`/admin/resources/image/sync?_csrf=${csrf}`).then(res => {
this.syncLoading = false;
res = res.data;
if (res.Code === 0) {
this.$message({
type: 'success',
message: this.$i18n['submittedSuccessfully']
});
this.getImageListCustom()
} else {
this.$message({
type: 'error',
message: this.$i18n['submittedFailed']
});
}
}).catch(err => {
console.log(err);
this.syncLoading = false;
this.$message({
type: 'error',
message: this.$i18n['submittedFailed']
});
});
},
imageStar(index, id, isStar) {
if (isStar) {
this.$axios.put(`/image/${id}/action/unstar`).then((res) => {
@@ -389,6 +460,12 @@ export default {
this.paramsCustom.page = 1
this.getImageListCustom()
},
handleCommandComputeResource(command) {
this.dropdownComputeResource = command.label
this.paramsCustom.computeResource = command.type
this.paramsCustom.page = 1
this.getImageListCustom()
},
handleApplyState(command) {
this.dropdownApplyState = command.label
this.paramsCustom.apply = command.type
@@ -470,6 +547,7 @@ export default {
this.dropdownPrivate = this.$i18n['all'];
this.dropdownType = this.$i18n['cloudeBrainMirror']['all_cluster'];
this.dropdownApplyState = this.$i18n['cloudeBrainMirror']['all_approval_status'];
this.dropdownComputeResource = this.$i18n['cloudeBrainMirror']['all_compute_resource'];
let params = new URLSearchParams(location.search)
if (location.search) {
this.firstSearch = true
@@ -483,6 +561,9 @@ export default {
if (params.has('cloudbrainType')) {
this.dropdownType = params.get('cloudbrainType') === 0 ? this.$i18n['cloudeBrainMirror']['openi'] : this.$i18n['cloudeBrainMirror']['c2net']
}
if (params.has('computeResource')) {
this.dropdownComputeResource = this.$i18n['computeResourceTitle'][params.get('computeResource')];
}
if (params.has('apply')) {
const apply = params.get('apply');
switch (apply) {


+ 1
- 1
web_src/js/features/clipboard.js View File

@@ -3,7 +3,7 @@ export default async function initClipboard(elements) {
if (!els || !els.length) return;

const { default: ClipboardJS } = await import(
/* webpackChunkName: "clipboardjs" */ "clipboard"
/* webpackChunkName: "clipboard" */ "clipboard"
);

const clipboard = new ClipboardJS(els);


+ 1
- 1
web_src/js/features/highlight.js View File

@@ -3,7 +3,7 @@ export default async function highlight(elementOrNodeList) {
const nodes = 'length' in elementOrNodeList ? elementOrNodeList : [elementOrNodeList];
if (!nodes.length) return;

const {default: Worker} = await import(/* webpackChunkName: "highlight.worker" */'./highlight.worker.js');
const {default: Worker} = await import(/* webpackChunkName: "highlight" */'./highlight.worker.js');
const worker = new Worker();

worker.addEventListener('message', ({data}) => {


+ 39
- 5
web_src/js/features/i18nVue.js View File

@@ -147,13 +147,15 @@ export const i18nVue = {
notRemind: '不再提醒',
close: '关闭',
warmPrompt: '温馨提示',
submittedSuccessfully: '提交成功!',
submittedFailed: '提交失败!',

cloudeBrainMirror: {
cloud_brain_mirror: '云脑镜像',
public_mirror: '公开镜像',
recommendImages: '平台推荐镜像',
platform_recommendations:'仅显示平台推荐',
placeholder: '搜镜像Tag/描述/标签...',
placeholder: '搜镜像Tag/描述/操作系统/安装的软件包/标签...',
search:'搜索',
mirror_tag:'镜像Tag',
mirror_description:'镜像描述',
@@ -179,6 +181,7 @@ export const i18nVue = {
cancel_recommendation: '取消推荐',
set_as_recommended: '设为推荐',
create_cloud_brain_mirror: '创建云脑镜像',
syncAiNetwork: '同步智算网络',
openi: '启智',
c2net: '智算网络',
defaultsort: '默认排序',
@@ -201,6 +204,10 @@ export const i18nVue = {
not_recommend: '不同意推荐',
reason: '原因',
pleaseEnterReason: '请输入原因',
framework: '框架',
operationSystem: '操作系统',
thirdPackages: 'Python依赖库',
all_compute_resource: '全部计算资源',
},
modelObj: {
model_label: '选择模型',
@@ -223,7 +230,17 @@ export const i18nVue = {
export_success: '导出成功',
exporting: '正在导出',
please_select_file:'请先选择文件',
}
},
computeResourceTitle: {
'CPU/GPU': '英伟达GPU',
GPU: '英伟达GPU',
NPU: '昇腾NPU',
GCU: '燧原GCU',
MLU: '寒武纪MLU',
DCU: '海光DCU',
'ILUVATAR-GPGPU': '天数智芯GPGPU',
'METAX-GPGPU': '沐曦GPGPU',
},
},
US: {
computer_vision: "computer vision",
@@ -377,16 +394,18 @@ export const i18nVue = {
notRemind: 'Don\'t remind again',
close: 'Close',
warmPrompt: 'Tips',
submittedSuccessfully: 'Submitted Successfully!',
submittedFailed: 'Submitted Failed!',

cloudeBrainMirror: {
cloud_brain_mirror: 'Cloud Brain Mirror',
public_mirror: 'Public Mirror',
recommendImages: 'Recommend Mirror',
platform_recommendations:'Show platform recommendations only',
placeholder: 'Search Mirror tag / description / tag ... ',
placeholder: 'Search Mirror tag/description/operating system/python library/label...',
search:'Search',
mirror_tag:'Mirror Tag',
mirror_description:'mirror_description ',
mirror_description:'Mirror Description ',
available_clusters: 'Cluster/Compute Resources',
creator: 'Creator',
creation_time: 'Creation time',
@@ -409,6 +428,7 @@ export const i18nVue = {
cancel_recommendation: 'Cancel recommendation',
set_as_recommended: 'Set as recommended',
create_cloud_brain_mirror: 'Create cloud brain mirror',
syncAiNetwork: 'Sync C2Net Network',
openi: 'OpenI',
c2net: 'C²NET',
defaultsort: 'Default Sort',
@@ -430,6 +450,10 @@ export const i18nVue = {
not_recommend: 'Not recommend',
reason: 'Reason',
pleaseEnterReason: 'Please enter the reason',
framework: 'Framework',
operationSystem: 'Operating system',
thirdPackages: 'Python libraries',
all_compute_resource: 'All Compute Resource',
},
modelObj: {
model_label: 'Select Model',
@@ -452,6 +476,16 @@ export const i18nVue = {
export_success: 'Export success',
exporting: ' Exporting',
please_select_file: 'Please select a file first',
}
},
computeResourceTitle: {
'CPU/GPU': 'NVIDIA GPU',
GPU: 'NVIDIA GPU',
NPU: 'Ascend NPU',
GCU: 'Enflame GCU',
MLU: 'Cambricon MLU',
DCU: 'HYGON DCU',
'ILUVATAR-GPGPU': 'Iluvatar CoreX GPGPU',
'METAX-GPGPU': 'MetaX GPGPU',
},
},
};

+ 1
- 1
web_src/less/_admin.less View File

@@ -93,7 +93,7 @@
margin-right: 10px !important;
border: 1px solid #d4d4d5;
border-radius: 4px;
box-shadow: 0 1px 2px 0 rgb(34 36 38 / 15%);
box-shadow: 0 1px 2px 0 rgba(34, 36, 38, 15%);
background-color: #fafafa !important;
.item {
align-self: flex-start !important;


+ 9
- 0
web_src/vuepages/apis/modules/common.js View File

@@ -1,5 +1,14 @@
import service from '../service';

// 获取promote配置数据
export const getStaticFile = (filePathName) => {
return service({
url: `${filePathName}`,
method: 'get',
params: {},
});
}

// 获取promote配置数据
export const getPromoteData = (filePathName) => {
return service({


+ 61
- 0
web_src/vuepages/apis/modules/computingpower.js View File

@@ -1,6 +1,67 @@
import service from "../service";
import Qs from 'qs';

// 算力需求-算力资源查询智算中心列表
export const getAvailableAiCenterList = () => {
return service({
url: `/api/v1/resources/ai_center/available`,
method: 'get',
params: {},
data: {},
});
}

// 查询智算列表
export const getAiCenterList = () => {
return service({
url: `/explore/card_request/resources/queue/centers`,
method: 'get',
params: {},
data: {},
});
}

// 查询所有资源队列名称列表
export const getResQueueCode = (params) => { // cluster
return service({
url: `/explore/card_request/resources/queue/codes`,
method: 'get',
params,
});
}

// 获取资源规格清单
// params -cluster,resource,available,
export const getSpecificationList = (params) => {
return service({
url: `/explore/card_request/specification/list`,
method: 'get',
params,
});
};

/* 算力资源 */
// 查询卡类型数据
export const getAccCardList = () => {
return service({
url: `/api/v1/resources/acc_card/list`,
method: 'get',
params: {}
});
};

// 查询算力资源列表
// params-page,pageSize,resource-GPU|NPU...,accCardType-ASCEND910|...,accCardNum:-1|1|2|4|8...,excludeAccCardNums-"1|2|4|8",centerCode,minPrice,maxPrice-积分值,未填请传-1
export const getResourceList = (params) => {
return service({
url: `/explore/card_request/resource/list`,
method: 'get',
params: { ...params },
paramsSerializer: _params => Qs.stringify(_params, { arrayFormat: 'repeat' }),
});
};

/* 算力需求 */
// 获取创建算力计算资源和卡类型信息
export const getDemandCreationRequired = (params) => {
return service({


+ 64
- 3
web_src/vuepages/apis/modules/images.js View File

@@ -1,7 +1,8 @@
import service from "../service";
import service from '../service';
import Qs from 'qs';

// 获取镜像
// params: { type-0|1|2, q, page, pageSize, cloudbrainType-0,1 }
// params: { type-0|1|2, q, page, pageSize, cloudbrainType-0,1, sort, framework, frameworkVersion, cuda, python }
export const getImages = (params) => {
const typeMap = {
'0': 'recommend',
@@ -17,9 +18,69 @@ export const getImages = (params) => {
method: "get",
params: {
q: params.q || '',
cloudbrainType: params.cloudbrainType || '-1',
sort: params.sort,
computeResource: params.computeResource,
framework: params.framework,
frameworkVersion: params.frameworkVersion,
cuda: params.cuda,
python: params.python,
spec: params.spec,
trainType: params.trainType,
page: params.page || 1,
pageSize: params.pageSize || 5,
cloudbrainType: params.cloudbrainType,
}
});
};

export const putImageAction = (params) => {
return service({
url: `/image/${params.id}/action/${params.action}`,
method: 'put',
params: {}
});
};

export const deleteImage = (params) => {
return service({
url: `/image/${params.id}`,
method: 'delete',
params: {}
});
};

export const submitImage = (data) => {
return service({
url: data.link,
method: 'post',
params: {},
data: Qs.stringify(data),
});
};

export const getImageById = (params) => {
return service({
url: `/image/${params.id}`,
method: 'get',
params: {},
});
};

export const searchImageTopics = (params) => {
return service({
url: `/api/v1/image/topics/search`,
method: 'get',
params: { q: params.q }
});
};

// 查询选择镜像过滤条件
// params: index-查询类型:0(可用框架)|1(可用框架版本)|2(可用python版本)|3(可用cuda版本)
// framework-框架,version-框架版本,python-python版本
export const getImageAvailabelFilter = (params) => {
return service({
url: `/api/v1/images/availableFilter`,
method: 'get',
params: { ...params }
});
};

+ 0
- 1
web_src/vuepages/components/cloudbrain/FormTop.vue View File

@@ -115,7 +115,6 @@ export default {
margin-left: -1px;
height: 38px;
padding: 0 12px;
border-left: none;
margin-bottom: 5px;

i {


+ 249
- 19
web_src/vuepages/components/cloudbrain/ImageSelectV1.vue View File

@@ -6,7 +6,8 @@
</div>
<div class="content" :class="errStatus ? 'error' : ''">
<el-input class="field-input" v-model="imageUrl" @input="imageChange"
:placeholder="$t('cloudbrainObj.selectImagePlaceholder')"></el-input>
:readonly="configs.computerResouce != 'GPU'" :placeholder="configs.computerResouce == 'GPU' ?
$t('cloudbrainObj.selectImagePlaceholder') : $t('cloudbrainObj.selectImage')"></el-input>
</div>
</div>
<div class="right-area">
@@ -16,8 +17,8 @@
</div>
</div>
<el-dialog class="model-dlg" :visible.sync="dlgShow" :title="$t('cloudbrainObj.selectImage')" width="1000px"
:modal="true" :close-on-click-modal="false" :show-close="true" :destroy-on-close="false" :before-close="beforeClose"
@open="open" @closed="closed">
:modal="true" :close-on-click-modal="false" :show-close="true" :destroy-on-close="false"
:before-close="beforeClose" @open="open" @closed="closed">
<div class="dlg-content">
<div class="main-area" v-loading="dlgLoading">
<div class="image-tabs-c">
@@ -26,9 +27,23 @@
<el-tab-pane :label="$t('cloudbrainObj.myImage')" name="second"></el-tab-pane>
<el-tab-pane :label="$t('cloudbrainObj.myFavImage')" name="third"></el-tab-pane>
</el-tabs>
</div>
<div class="filter-c">
<div class="cascader-c">
<span class="cascader-tit">{{ $t('imagesObj.filterImages') }}:</span>
<div class="cascader-content-c">
<div class="cascader-tips">{{ filterImagesPlaceholder }}</div>
<el-cascader class="image-filter" ref="cascaderFilterRef" v-model="dlgCascaderFilter.value"
:props="dlgCascaderProps" :options="dlgCascaderFilter.options" clearable
:placeholder="$t('imagesObj.filterImages')" :popper-class="dlgCascaderFilter.popperClass"
@expand-change="handleDlgCascaderFilterExpandChange"
@visible-change="handleDlgCascaderFilterVisibleChange"
@change="handleDlgCascaderFilterChange"></el-cascader>
</div>
</div>
<el-input size="small" class="search-inp" :placeholder="$t('cloudbrainObj.searchImagePlaceholder')"
v-model="dlgSearchValue" @keydown.enter.stop.native.prevent="inputSearch">
<div slot="suffix" class="search-inp-icon" @click="inputSearch">
v-model="dlgSearchValue" @keydown.enter.stop.native.prevent="search">
<div slot="suffix" class="search-inp-icon" @click="search">
<i class="el-icon-search"></i>
</div>
</el-input>
@@ -41,21 +56,35 @@
<span :title="item.tag">{{ item.tag }}</span>
<img v-if="item.type == 5" src="/img/jian.svg" />
</div>
<div></div>
</div>
<div class="item-l-m">
<div class="item-topics">
<span class="type-compute-resource"
:style="item.computeResourceColor ? `background-color: ${item.computeResourceColor};` : ''">{{
item.computeResourceShow }}</span>
<span class="type-sys" v-for="(topic, index) in item.topicsSys" :key="`sys-${index}`">{{ topic
}}</span>
<span class="type-pkg" v-for="(topic, index) in item.topicsPkg" :key="`pkg-${index}`">{{ topic
}}</span>
<span v-for="(topic, index) in item.topics" :key="index">{{ topic }}</span>
</div>
</div>
<div class="item-l-b">
<a class="item-creator" :href="`/${item.userName}`">
<a v-if="item.userName" class="item-creator" :href="`/${item.userName}`">
<img class="ui avatar mini image" style="width: 20px; height: 20px"
:src="item.relAvatarLink ? item.relAvatarLink : `/user/avatar/ghost/-1`" />
</a>
<a v-else class="item-creator" href="javascript:;">
<img class="ui avatar mini image" style="width: 20px; height: 20px"
:src="`/user/avatar/ghost/-1`" />
</a>
<span class="item-descr" :title="item.description">{{ item.description }}</span>
</div>
</div>
<div class="item-r">
<el-button v-if="item.status == 1" @click="chooseImage(item)">{{ $t('cloudbrainObj.useImage')
}}</el-button>
}}</el-button>
<span class="error-content" v-if="item.status == 0">
<i class="CREATING"></i>
<span style="color:#5a5a5a">{{ $t('cloudbrainObj.submitting') }}</span>
@@ -81,15 +110,19 @@
</template>

<script>
import { getImages } from '~/apis/modules/images';
import { getImages, getImageAvailabelFilter } from '~/apis/modules/images';
import { COMPUTER_RESOURCES_COLORS, JOB_TYPE } from '~/const';
import { getListValueWithKey } from '~/utils';

export default {
name: "ImageSelectV1",
props: {
value: { type: String, required: true },
type: { type: Number, default: 0 },
type: { type: Number, default: 0 }, // -1-全部,0-启智GPU,2-智算GPU
showTitle: { type: Boolean, default: true },
required: { type: Boolean, default: true },
spec: { type: String, required: true, },
configs: { type: Object, default: () => ({}) },
},
data() {
return {
@@ -100,7 +133,62 @@ export default {
dlgLoading: false,
dlgActiveName: 'first',
dlgSearchValue: '',

dlgCascaderFilter: {
popperClass: `popper-filter-${Math.random().toString().replace('.', '')}`,
value: [],
options: []
},
dlgCascaderProps: {
lazy: true,
lazyLoad: (node, resolve) => {
const { level } = node;
const path = node.path || [];
const framework = path[0];
const version = path[1];
const python = path[2];
getImageAvailabelFilter({
index: level,
framework,
version,
python,
compute_resource: this.configs.computerResouce,
recommend: this.dlgActiveName == 'first' ? true : undefined,
mine: this.dlgActiveName == 'second' ? true : undefined,
star: this.dlgActiveName == 'third' ? true : undefined,
}).then(res => {
if (res.data.code == 0) {
if (level == 0) {
let data = res.data.data || [];
if (data.indexOf('Other') >= 0) {
data = data.filter(item => item !== 'Other');
data.push('Other');
}
const nodes = data.map(item => ({
value: item,
label: item,
leaf: item == 'Other' ? true : false,
}));
resolve(nodes);
} else {
const nodes = (res.data.data).map(item => ({
value: item,
label: item ? item : 'None',
leaf: level >= (this.configs.computerResouce == 'GPU' ? 3 : 2)
}));
if (!nodes.length) {
node.config.leaf = true;
}
resolve(nodes);
}
} else {
resolve([]);
}
}).catch(err => {
console.log(err);
resolve([]);
});
}
},
dlgPage: 1,
dlgPageSize: 5,
dlgTotal: 0,
@@ -116,6 +204,21 @@ export default {
this.imageUrl = newVal.toString();
this.$emit('input', newVal);
}
},
spec: {
immediate: true,
handler(newVal) {
this.$emit('changeImage');
}
},
},
computed: {
filterImagesPlaceholder() {
const list = this.$t('imagesObj.filterImagesPlaceholder').split('/');
if (this.configs.computerResouce != 'GPU') {
list.pop();
}
return list.join('/');
}
},
methods: {
@@ -133,13 +236,59 @@ export default {
dlgTabClick(tab, event) {
this.dlgTotal = 0;
this.dlgPage = 1;
this.resetDlgCascaderFilter();
this.searchImageData();
},
inputSearch() {
search() {
this.dlgTotal = 0;
this.dlgPage = 1;
this.searchImageData();
},
resetDlgCascaderFilter() {
this.dlgCascaderFilter.value = [];
this.dlgCascaderFilter.options = [];
this.handleDlgCascaderFilterExpandChange([]);
},
handleDlgCascaderFilterVisibleChange() {
const popper = document.querySelector(`.${this.dlgCascaderFilter.popperClass}`);
if (popper && popper.querySelector('.popper-filter-title')) return;
const title = document.createElement('div');
title.classList = ['popper-filter-title'];
title.style = 'display:flex;margin-left:-1px;';
let innerHtml = '';
const titles = [
this.$t('imagesObj.frameworkName'), this.$t('imagesObj.frameworkVersion'),
this.$t('imagesObj.pyVersion'), this.$t('imagesObj.cudaVersion')
];
if (this.configs.computerResouce != 'GPU') {
titles.pop();
}
titles.forEach((item, index) => {
innerHtml += `<div class="popper-filter-title-item"
style="display:flex;align-items:center;height:30px;width:180px;box-sizing:border-box;color:rgb(136, 136, 136);
${index != 0 ? 'border-left:1px solid #E4E7ED;display:none;' : ''}padding-top:10px;padding-left:30px;font-size:12px;">${item}</div>`;
})
title.innerHTML = innerHtml;
popper.prepend(title);
},
handleDlgCascaderFilterExpandChange(value) {
const popper = document.querySelector(`.${this.dlgCascaderFilter.popperClass}`);
if (!popper) return;
const title = popper.querySelector('.popper-filter-title');
if (!title) return;
const items = title.querySelectorAll('.popper-filter-title-item');
items.forEach((item, index) => {
const style = item.style;
if (index <= value.length) {
style.display = 'flex';
} else {
style.display = 'none';
}
})
},
handleDlgCascaderFilterChange() {
this.search();
},
searchImageData() {
const tabName = this.dlgActiveName;
const typeMap = {
@@ -153,11 +302,45 @@ export default {
page: this.dlgPage,
pageSize: this.dlgPageSize,
cloudbrainType: this.type,
computeResource: this.configs.computerResouce,
framework: this.dlgCascaderFilter.value[0],
frameworkVersion: this.dlgCascaderFilter.value[1],
python: this.dlgCascaderFilter.value[2],
cuda: this.dlgCascaderFilter.value[3],
spec: this.configs.computerResouce == 'GPU' ? -1 : this.spec,
trainType: this.configs.computerResouce == 'GPU' ? undefined : getListValueWithKey(JOB_TYPE, this.configs.taskType, 'k', 'train_type'),
}
this.dlgLoading = true;
getImages(params).then(res => {
this.dlgLoading = false;
const data = res.data?.images || [];
data.forEach(item => {
const topicsSys = [];
if (item.framework) {
topicsSys.push(`${item.framework} ${item.frameworkVersion}`.trim());
}
if (item.pythonVersion) {
topicsSys.push(`Python ${item.pythonVersion}`);
}
if (item.cudaVersion) {
topicsSys.push(`Cuda ${item.cudaVersion}`);
}
if (item.operationSystem) {
topicsSys.push(`${item.operationSystem} ${item.operationSystemVersion}`.trim());
}
const topicsPkg = [];
const thirdPackages = item.thirdPackages.split('\n');
thirdPackages.forEach(pkgLine => {
if (pkgLine) {
topicsPkg.push(pkgLine.trim().replace('==', ' '));
}
});
item.topicsSys = topicsSys;
item.topicsPkg = topicsPkg;
const compute_resource = item.compute_resource || 'GPU';
item.computeResourceColor = COMPUTER_RESOURCES_COLORS[compute_resource];
item.computeResourceShow = this.$t('computeResourceTitle.' + compute_resource);
});
this.imageList = data;
this.dlgTotal = parseInt(res.data?.count || 0);
}).catch(err => {
@@ -192,7 +375,9 @@ export default {
return !this.errStatus;
},
},
beforeMount() { },
beforeMount() {
this.resetDlgCascaderFilter();
},
mounted() { },
};
</script>
@@ -242,13 +427,40 @@ export default {
overflow: hidden;
margin-right: 5px;
}
}

.filter-c {
display: flex;
align-items: flex-end;
justify-content: space-between;

.cascader-c {
display: flex;
align-items: flex-end;

.cascader-tit {
margin-bottom: 6px;
}

.cascader-content-c {
.cascader-tips {
font-size: 12px;
padding-left: 16px;
}
}
}

.image-filter {
margin-top: -1px;
width: 340px;
}

.search-inp {
overflow: hidden;
width: 200px;
width: 330px;
z-index: 5;
position: relative;
margin-top: -10px;
margin-top: -1px;

.search-inp-icon {
height: 100%;
@@ -299,14 +511,19 @@ export default {
vertical-align: middle;
}
}
}

.item-l-m {
margin-top: 4px;

.item-topics {
display: inline-block;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
display: flex;
flex-wrap: wrap;

span {
display: flex;
align-items: center;
justify-content: center;
font-size: .85714286rem;
margin: 0 0.14285714em;
padding: 0.3em 0.5em;
@@ -314,8 +531,21 @@ export default {
color: rgba(0, 0, 0, .6);
font-weight: 700;
border-radius: 0.28571429rem;
line-height: 1;
cursor: pointer;
line-height: 1;
margin-bottom: 3px;

&.type-sys {
background: rgba(50, 145, 248, 0.2);
}

&.type-pkg {
background: rgba(91, 185, 115, 0.2);
}

&.type-compute-resource {
color: white;
}
}
}
}


+ 15
- 2
web_src/vuepages/const/index.js View File

@@ -6,12 +6,25 @@ export const POINT_ACTIONS = [
{ k: 'CreatePublicRepo', v: i18n.t('createPublicProject') }, { k: 'CreateIssue', v: i18n.t('dailyPutforwardTasks') }, { k: 'CreatePullRequest', v: i18n.t('dailyPR') }, { k: 'CommentIssue', v: i18n.t('comment') }, { k: 'UploadAttachment', v: i18n.t('uploadDatasetFile') }, { k: 'CreateNewModelTask', v: i18n.t('importNewModel') }, { k: 'BindWechat', v: i18n.t('completeWechatCodeScanningVerification') },
{ k: 'CreateCloudbrainTask', v: i18n.t('dailyRunCloudbrainTasks') }, { k: 'DatasetRecommended', v: i18n.t('datasetRecommendedByThePlatform') }, { k: 'CreateImage', v: i18n.t('submitNewPublicImage') }, { k: 'ImageRecommend', v: i18n.t('imageRecommendedByThePlatform') }, { k: 'ChangeUserAvatar', v: i18n.t('firstChangeofAvatar') }, { k: 'PushCommits', v: i18n.t('dailyCommit') }, { k: 'TaskInviteFriendRegister', v: i18n.t('user.inviteFriends') }
];
export const JOB_TYPE = [{ k: 'DEBUG', v: i18n.t('debugTask') }, { k: 'TRAIN', v: i18n.t('trainTask') }, { k: 'INFERENCE', v: i18n.t('inferenceTask') }, { k: 'BENCHMARK', v: i18n.t('benchmarkTask') }, { k: 'ONLINEINFERENCE', v: i18n.t('onlineinfer') }, { k: 'HPC', v: i18n.t('superComputeTask') }];

export const JOB_TYPE = [
{ k: 'DEBUG', v: i18n.t('debugTask'), train_type: 'Notebook' },
{ k: 'TRAIN', v: i18n.t('trainTask'), train_type: 'TrainJob' },
{ k: 'INFERENCE', v: i18n.t('inferenceTask'), train_type: 'TrainJob' },
{ k: 'BENCHMARK', v: i18n.t('benchmarkTask') },
{ k: 'ONLINEINFERENCE', v: i18n.t('onlineinfer') },
{ k: 'HPC', v: i18n.t('superComputeTask') }
];
// 资源管理
export const CLUSTERS = [{ k: 'OpenI', v: i18n.t('resourcesManagement.OpenI') }, { k: 'C2Net', v: i18n.t('resourcesManagement.C2Net') }];
export const AI_CENTER = [{ k: 'OpenIOne', v: i18n.t('resourcesManagement.OpenIOne') }, { k: 'OpenITwo', v: i18n.t('resourcesManagement.OpenITwo') }, { k: 'OpenIChengdu', v: i18n.t('resourcesManagement.OpenIChengdu') }, { k: 'pclcci', v: i18n.t('resourcesManagement.pclcci') }, { k: 'hefei', v: i18n.t('resourcesManagement.hefeiCenter') }, { k: 'xuchang', v: i18n.t('resourcesManagement.xuchangCenter') }];
export const COMPUTER_RESOURCES = [{ k: 'CPU', v: 'CPU' }, { k: 'GPU', v: 'GPU' }, { k: 'NPU', v: 'NPU' }, { k: 'GCU', v: 'GCU' }, { k: 'MLU', v: 'MLU' }, { k: 'DCU', v: 'DCU' }, { k: 'ILUVATAR-GPGPU', v: 'ILUVATAR-GPGPU' }, { k: 'METAX-GPGPU', v: 'METAX-GPGPU' }];
export const COMPUTER_RESOURCES_COLORS = {
'GPU': '#4fb62f',
'GCU': '#e73828',
'MLU': '#0077ed',
'ILUVATAR-GPGPU': '#0038bd',
'METAX-GPGPU': '#5c246a',
};
export const ACC_CARD_TYPE = [{ k: 'T4', v: 'T4' }, { k: 'A100', v: 'A100' }, { k: 'V100', v: 'V100' }, { k: 'ASCEND910', v: 'Ascend 910' }, { k: 'ASCEND-D910B', v: 'Ascend-D910B' }, { k: 'MLU270', v: 'MLU270' }, { k: 'MLU290', v: 'MLU290' }, { k: 'RTX3080', v: 'RTX3080' }, { k: 'ENFLAME-T20', v: 'ENFLAME-T20' }, { k: 'DCU', v: 'DCU' }, { k: 'BI-V100', v: 'BI-V100' }, { k: 'MR-V100', v: 'MR-V100' }, { k: 'N100', v: 'N100' }];
export const SPECIFICATION_STATUS = [{ k: '1', v: i18n.t('resourcesManagement.willOnShelf') }, { k: '2', v: i18n.t('resourcesManagement.onShelf') }, { k: '3', v: i18n.t('resourcesManagement.offShelf') }];
export const NETWORK_TYPE = [{ k: 1, v: `${i18n.t('cloudbrainObj.networkType')}(${i18n.t('cloudbrainObj.noInternet')})` }, { k: 2, v: `${i18n.t('cloudbrainObj.networkType')}(${i18n.t('cloudbrainObj.hasInternet')})` }];


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save