#5000 V20231211

Merged
ychao_1983 merged 124 commits from V20231211 into develop 4 months ago
  1. +17
    -15
      entity/cluster.go
  2. +9
    -8
      go.mod
  3. +17
    -15
      go.sum
  4. +442
    -0
      models/card_request.go
  5. +33
    -4
      models/cloudbrain.go
  6. +4
    -0
      models/cloudbrain_spec.go
  7. +3
    -0
      models/models.go
  8. +37
    -38
      models/resource_queue.go
  9. +208
    -69
      models/resource_scene.go
  10. +78
    -37
      models/resource_specification.go
  11. +66
    -278
      models/user_business_analysis.go
  12. +0
    -21
      models/user_business_struct.go
  13. +640
    -0
      models/user_year_summary.go
  14. +49
    -2
      modules/grampus/resty.go
  15. +1
    -1
      modules/minio_ext/constants.go
  16. +1
    -1
      modules/storage/minio.go
  17. +3
    -3
      modules/storage/obs.go
  18. +42
    -0
      modules/structs/card_requests.go
  19. +7
    -0
      options/locale/locale_en-US.ini
  20. +7
    -0
      options/locale/locale_zh-CN.ini
  21. BIN
      public/img/ros-hmci/homepage1.png
  22. BIN
      public/img/ros-hmci/mbz605.png
  23. +30
    -5
      routers/admin/resources.go
  24. +268
    -0
      routers/card_request/card_request.go
  25. +9
    -4
      routers/home.go
  26. +1
    -1
      routers/repo/cloudbrain.go
  27. +8
    -4
      routers/repo/user_data_analysis.go
  28. +24
    -3
      routers/routes/routes.go
  29. +148
    -19
      services/ai_task_service/cluster/c2net.go
  30. +1
    -10
      services/ai_task_service/cluster/cloudbrain_two.go
  31. +1
    -0
      services/ai_task_service/context/context.go
  32. +3
    -11
      services/ai_task_service/task/cloudbrain_one_notebook_task.go
  33. +2
    -58
      services/ai_task_service/task/cloudbrain_one_train_task.go
  34. +3
    -28
      services/ai_task_service/task/cloudbrain_two_train_task.go
  35. +2
    -10
      services/ai_task_service/task/grampus_notebook_task.go
  36. +2
    -10
      services/ai_task_service/task/grampus_online_infer_task.go
  37. +2
    -58
      services/ai_task_service/task/grampus_train_task.go
  38. +10
    -0
      services/ai_task_service/task/opt_handler.go
  39. +9
    -2
      services/ai_task_service/task/task_extend.go
  40. +11
    -44
      services/ai_task_service/task/task_service.go
  41. +146
    -0
      services/card_request/card_request.go
  42. +43
    -47
      services/cloudbrain/resource/resource_queue.go
  43. +1
    -0
      services/cloudbrain/resource/resource_scene.go
  44. +8
    -0
      services/cloudbrain/resource/resource_specification.go
  45. +1
    -1
      templates/admin/navbar.tmpl
  46. +19
    -6
      templates/base/head_navbar.tmpl
  47. +18
    -4
      templates/base/head_navbar_fluid.tmpl
  48. +18
    -4
      templates/base/head_navbar_home.tmpl
  49. +18
    -4
      templates/base/head_navbar_pro.tmpl
  50. +8
    -0
      templates/computingpower/demand.tmpl
  51. +10
    -0
      templates/computingpower/domestic.tmpl
  52. +0
    -7
      templates/explore/domestic.tmpl
  53. +0
    -1
      vendor/golang.org/x/crypto/acme/version_go112.go
  54. +0
    -1
      vendor/golang.org/x/crypto/argon2/blamka_amd64.go
  55. +0
    -1
      vendor/golang.org/x/crypto/argon2/blamka_amd64.s
  56. +0
    -1
      vendor/golang.org/x/crypto/argon2/blamka_ref.go
  57. +0
    -1
      vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
  58. +0
    -1
      vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
  59. +0
    -1
      vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
  60. +0
    -1
      vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
  61. +0
    -1
      vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
  62. +0
    -1
      vendor/golang.org/x/crypto/blake2b/register.go
  63. +1
    -2
      vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
  64. +1
    -2
      vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
  65. +1
    -2
      vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
  66. +0
    -1
      vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
  67. +0
    -1
      vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
  68. +0
    -1
      vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
  69. +0
    -1
      vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
  70. +0
    -1
      vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
  71. +0
    -1
      vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
  72. +0
    -1
      vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
  73. +0
    -1
      vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
  74. +0
    -1
      vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
  75. +0
    -1
      vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
  76. +0
    -71
      vendor/golang.org/x/crypto/ed25519/ed25519.go
  77. +3
    -1
      vendor/golang.org/x/crypto/hkdf/hkdf.go
  78. +0
    -1
      vendor/golang.org/x/crypto/internal/alias/alias.go
  79. +0
    -1
      vendor/golang.org/x/crypto/internal/alias/alias_purego.go
  80. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
  81. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
  82. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
  83. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
  84. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
  85. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
  86. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
  87. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
  88. +0
    -1
      vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
  89. +0
    -1
      vendor/golang.org/x/crypto/sha3/hashes_generic.go
  90. +0
    -1
      vendor/golang.org/x/crypto/sha3/keccakf.go
  91. +0
    -1
      vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
  92. +0
    -1
      vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
  93. +0
    -1
      vendor/golang.org/x/crypto/sha3/register.go
  94. +9
    -5
      vendor/golang.org/x/crypto/sha3/sha3.go
  95. +6
    -5
      vendor/golang.org/x/crypto/sha3/sha3_s390x.go
  96. +0
    -1
      vendor/golang.org/x/crypto/sha3/sha3_s390x.s
  97. +14
    -15
      vendor/golang.org/x/crypto/sha3/shake.go
  98. +0
    -1
      vendor/golang.org/x/crypto/sha3/shake_generic.go
  99. +0
    -1
      vendor/golang.org/x/crypto/sha3/xor.go
  100. +0
    -2
      vendor/golang.org/x/crypto/sha3/xor_unaligned.go

+ 17
- 15
entity/cluster.go View File

@@ -25,7 +25,7 @@ type NoteBookTask struct {
AutoStopDuration int64
Name string
Capacity int
CenterID []string
Queues []models.ResourceQueue
Code []ContainerData
Datasets []ContainerData
PreTrainModel []ContainerData
@@ -73,6 +73,7 @@ type QueryTaskResponse struct {
Token string `json:"token"`
CenterId string `json:"center_id"`
CenterName string `json:"center_name"`
QueueCode string `json:"queue_code"`
CodeUrl string `json:"code_url"`
DataUrl string `json:"data_url"`
ContainerIP string `json:"container_ip"`
@@ -108,6 +109,7 @@ func ConvertGrampusNotebookResponse(job models.GrampusNotebookInfo) *QueryTaskRe
DetailedStatus: job.DetailedStatus,
CenterId: centerId,
CenterName: centerName,
QueueCode: task.PoolId,
Url: url,
Token: token,
JobId: job.JobID,
@@ -212,20 +214,20 @@ type ClusterLog struct {
}

type TrainTask struct {
Command string `json:"command"`
Name string `json:"name"`
ImageId string `json:"imageId"`
ImageUrl string `json:"imageUrl"`
ResourceSpecId string `json:"resourceSpecId"`
CenterID []string `json:"centerID"`
ReplicaNum int `json:"replicaNum"`
Datasets []ContainerData `json:"datasets"`
PreTrainModel []ContainerData `json:"models"`
Code []ContainerData `json:"code"`
BootFile string `json:"bootFile"`
OutPut []ContainerData `json:"output"`
LogPath []ContainerData `json:"logPath"`
PoolId string `json:"poolId"`
Command string `json:"command"`
Name string `json:"name"`
ImageId string `json:"imageId"`
ImageUrl string `json:"imageUrl"`
ResourceSpecId string `json:"resourceSpecId"`
Queues []models.ResourceQueue `json:"centerID"`
ReplicaNum int `json:"replicaNum"`
Datasets []ContainerData `json:"datasets"`
PreTrainModel []ContainerData `json:"models"`
Code []ContainerData `json:"code"`
BootFile string `json:"bootFile"`
OutPut []ContainerData `json:"output"`
LogPath []ContainerData `json:"logPath"`
PoolId string `json:"poolId"`
Params models.Parameters
Spec *models.Specification
RepoName string


+ 9
- 8
go.mod View File

@@ -96,12 +96,14 @@ require (
github.com/yuin/goldmark v1.4.13
github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594
github.com/yuin/goldmark-meta v1.1.0
golang.org/x/crypto v0.13.0
golang.org/x/net v0.15.0
golang.org/x/crypto v0.16.0
golang.org/x/exp v0.0.0-20231127185646-65229373498e
golang.org/x/net v0.19.0
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sys v0.12.0
golang.org/x/text v0.13.0
golang.org/x/tools v0.9.3
golang.org/x/sys v0.15.0
golang.org/x/text v0.14.0
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
golang.org/x/tools v0.16.0
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gopkg.in/ini.v1 v1.56.0
gopkg.in/ldap.v3 v3.0.2
@@ -255,9 +257,8 @@ require (
go.mongodb.org/mongo-driver v1.1.1 // indirect
go.opencensus.io v0.22.1 // indirect
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/sync v0.5.0 // indirect
google.golang.org/api v0.9.0 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect


+ 17
- 15
go.sum View File

@@ -854,10 +854,12 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No=
golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=
@@ -874,8 +876,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -911,8 +913,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -933,8 +935,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -980,14 +982,14 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -996,8 +998,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1028,8 +1030,8 @@ golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWc
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=


+ 442
- 0
models/card_request.go View File

@@ -0,0 +1,442 @@
package models

import (
"errors"
"strings"

"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)

const CARD_REQUEST_COMMIT = 1
const CARD_REQUEST_AGREE = 2
const CARD_REQEST_DISAGREE = 3

const RESOURCE_TYPE_SHARE = 1 //共享
const RESOURCE_TYPE_EXCLUSIVE = 2 //独占

const OrderByIDDesc = "card_request.id desc"
const OrderByStatus = "card_request.status asc,card_request.id desc"

type CardRequest struct {
ID int64 `xorm:"pk autoincr"`
ComputeResource string
UID int64
UserName string `xorm:"-"`
CardType string
AccCardsNum string
DiskCapacity int64
ResourceType int
BeginDate string
BeginUnix int64 `xorm:"INDEX"`
EndDate string
EndUnix int64 `xorm:"INDEX"`
Contact string
PhoneNumber string
EmailAddress string
Org string `xorm:"varchar(500)"`
Description string `xorm:"varchar(3000)"`
Status int
Review string `xorm:"varchar(3000)"`
CreatedUnix int64 `xorm:"INDEX created"`
UpdatedUnix int64 `xorm:"INDEX updated"`
DeleteUnix int64 `xorm:"deleted"`
}

type CardRequestSpecRes struct {
ID int64
ComputeResource string
UID int64
UserName string
CardType string
AccCardsNum string
DiskCapacity int64
ResourceType int
BeginDate string
BeginUnix int64
EndDate string
EndUnix int64
Contact string
PhoneNumber string
EmailAddress string
Org string
Description string
Status int
Review string
CreatedUnix int64
UpdatedUnix int64
DeleteUnix int64
Specs []RequestSpecInfo
}

func (CardRequestSpecRes) TableName() string {
return "card_request"
}

type CardRequestSpec struct {
ID int64 `xorm:"pk autoincr"`
RequestId int64 `xorm:"unique(idx_request_spec)"`
SpecId int64 `xorm:"unique(idx_request_spec)"`
CreatedTime timeutil.TimeStamp `xorm:"created"`
}

type CardRequestOptions struct {
ListOptions
UserID int64
OrderBy string
Keyword string

AiCenterCode string
QueueId int64
ComputeResource string
AccCardType string
Cluster string
ResourceType int
UseBeginTime int64
UseEndTime int64
BeginTimeUnix int64
EndTimeUnix int64
NeedSpec bool
}
type CardRequestShowList struct {
Total int64 `json:"total"`
CardRequestList []*CardRequestSpecShow `json:"cardRequestList"`
}

type CardRequestSpecShow struct {
ID int64 `json:"id"`
ComputeResource string `json:"compute_resource"`
CardType string `json:"card_type"`
AccCardsNum string `json:"acc_cards_num"`
BeginDate string `json:"begin_date"`
EndDate string `json:"end_date"`
ResourceType int `json:"resource_type"`
DiskCapacity int64 `json:"disk_capacity"`
UID int64 `json:"uid"`
UserName string `json:"user_name"`
TargetCenter []string `json:"target_center"`
Contact string `json:"contact"`
PhoneNumber string `json:"phone_number"`
EmailAddress string `json:"email_address"`
Org string `json:"org"`
Description string `json:"description"`
Status int `json:"status"`
Review string `json:"review"`
CreatedUnix int64 `json:"created_unix"`
Specs []RequestSpecInfo `json:"specs"`
}

type RequestSpecInfo struct {
ID int64
SourceSpecId string
AccCardsNum int
CpuCores int
MemGiB float32
GPUMemGiB float32
ShareMemGiB float32
UnitPrice int
Status int
UpdatedTime timeutil.TimeStamp
RequestId int64
//queue
Cluster string
AiCenterCode string
AiCenterName string
QueueCode string
QueueName string
QueueType string
QueueId int64
ComputeResource string
AccCardType string
HasInternet int
}

func (RequestSpecInfo) TableName() string {
return "resource_specification"
}

type CardRequestReview struct {
ID int64
Review string
SpecIds []int64
}

func AgreeCardRequest(r CardRequestReview) error {
sess := x.NewSession()
var err error
defer func() {
if err != nil {
sess.Rollback()
}
sess.Close()
}()

// find old scene
old := CardRequest{}
if has, _ := sess.ID(r.ID).Get(&old); !has {
return errors.New("CardRequest not exist")
}
//check specification
specs := make([]ResourceSpecification, 0)
cond := builder.In("id", r.SpecIds)
if err := sess.Where(cond).Find(&specs); err != nil {
return err
}
if len(specs) < len(r.SpecIds) {
return errors.New("specIds not correct")
}

rs := CardRequest{
Status: CARD_REQUEST_AGREE,
Review: "",
}
if _, err = sess.ID(r.ID).Cols("status", "review").Update(&rs); err != nil {
return err
}

//delete scene spec relation
if _, err = sess.Where("request_id = ? ", r.ID).Delete(&CardRequestSpec{}); err != nil {
sess.Rollback()
return err
}

if len(r.SpecIds) == 0 {
return sess.Commit()
}
//build new scene spec relation
rss := make([]CardRequestSpec, len(r.SpecIds))
for i, v := range r.SpecIds {
rss[i] = CardRequestSpec{
RequestId: r.ID,
SpecId: v,
}
}
if _, err = sess.Insert(&rss); err != nil {
sess.Rollback()
return err
}

return sess.Commit()
}

func DisagreeCardRequest(r CardRequestReview) error {
sess := x.NewSession()
var err error
defer func() {
if err != nil {
sess.Rollback()
}
sess.Close()
}()

// find old scene
old := CardRequest{}
if has, _ := sess.ID(r.ID).Get(&old); !has {
return errors.New("CardRequest not exist")
}
//update review_message
rs := CardRequest{
Status: CARD_REQEST_DISAGREE,
Review: r.Review,
}
if _, err = sess.ID(r.ID).Update(&rs); err != nil {
return err
}

//delete scene spec relation
if _, err = sess.Where("request_id = ? ", r.ID).Delete(&CardRequestSpec{}); err != nil {
sess.Rollback()
return err
}

return sess.Commit()
}

func CreateCardRequest(cardRequest *CardRequest) error {
_, err := x.Insert(cardRequest)
return err
}
func GetCardRequestById(id int64) (*CardRequest, error) {
rel := new(CardRequest)
has, err := x.
ID(id).
Get(rel)
if err != nil {
return nil, err
} else if !has {
return nil, ErrNotExist{id}
}

return rel, nil
}

func SearchCardRequest(opts *CardRequestOptions) (int64, []*CardRequestSpecRes, error) {
var cond = builder.NewCond()
if opts.Page <= 0 {
opts.Page = 1
}

needJoinSpec := false
if opts.Keyword != "" {
lowerKeyWord := strings.ToLower(opts.Keyword)
cond = cond.And(builder.Or(builder.Like{"LOWER(card_request.contact)", lowerKeyWord},
builder.Like{"LOWER(card_request.acc_cards_num)", lowerKeyWord},
builder.Like{"LOWER(card_request.description)", lowerKeyWord}, builder.Like{"LOWER(card_request.description)", lowerKeyWord},
builder.Like{"LOWER(card_request.phone_number)", lowerKeyWord}, builder.Like{"LOWER(card_request.org)", lowerKeyWord},
builder.Like{"LOWER(\"user\".name)", lowerKeyWord}))
}
if opts.UserID != 0 {
cond = cond.And(builder.Eq{"\"user\".id": opts.UserID})
}

if opts.ResourceType != 0 {
cond = cond.And(builder.Eq{"card_request.resource_type": opts.ResourceType})
}
if opts.AiCenterCode != "" {
needJoinSpec = true
cond = cond.And(builder.Eq{"resource_queue.ai_center_code": opts.AiCenterCode})
}
if opts.QueueId > 0 {
cond = cond.And(builder.Eq{"resource_queue.id": opts.QueueId})
needJoinSpec = true
}
if opts.ComputeResource != "" {
cond = cond.And(builder.Eq{"card_request.compute_resource": opts.ComputeResource})
}
if opts.AccCardType != "" {
cond = cond.And(builder.Eq{"card_request.card_type": opts.AccCardType})
}
if opts.Cluster != "" {
cond = cond.And(builder.Eq{"resource_queue.cluster": opts.Cluster})
needJoinSpec = true
}

if opts.UseBeginTime != 0 {
cond = cond.And(builder.Gte{"card_request.begin_unix": opts.UseBeginTime})
}
if opts.UseEndTime != 0 {
cond = cond.And(builder.Lte{"card_request.end_unix": opts.UseEndTime})
}

if opts.BeginTimeUnix != 0 {
cond = cond.And(builder.Gte{"card_request.created_unix": opts.BeginTimeUnix})
}
if opts.EndTimeUnix != 0 {
cond = cond.And(builder.Lte{"card_request.created_unix": opts.EndTimeUnix})
}
if opts.OrderBy == "" {
opts.OrderBy = OrderByIDDesc
}

cond = cond.And(builder.NewCond().Or(builder.Eq{"card_request.delete_unix": 0}).Or(builder.IsNull{"card_request.delete_unix"}))
cols := []string{"card_request.id", "card_request.compute_resource", "card_request.contact", "card_request.card_type", "card_request.acc_cards_num",
"card_request.disk_capacity", "card_request.resource_type", "card_request.begin_date", "card_request.end_date", "card_request.uid",
"card_request.phone_number", "card_request.email_address", "card_request.org", "card_request.description", "card_request.status", "card_request.review",
"card_request.created_unix"}
var count int64
var err error
if needJoinSpec {
count, err = x.Where(cond).
Distinct("card_request.id").
Join("INNER", "card_request_spec", "card_request_spec.request_id = card_request.id").
Join("INNER", "user", "\"user\".id = card_request.uid").
Join("INNER", "resource_specification", "resource_specification.id = card_request_spec.spec_id").
Join("INNER", "resource_queue", "resource_queue.id = resource_specification.queue_id").
Count(&CardRequestSpecRes{})
if err != nil {
return 0, nil, err
}
} else {
count, err = x.Where(cond).
Distinct("card_request.id").
Join("INNER", "user", "\"user\".id = card_request.uid").
Count(&CardRequestSpecRes{})
}

r := make([]*CardRequestSpecRes, 0)
if needJoinSpec {
if err = x.Where(cond).Distinct(cols...).
Join("INNER", "card_request_spec", "card_request_spec.request_id = card_request.id").
Join("INNER", "user", "\"user\".id = card_request.uid").
Join("INNER", "resource_specification", "resource_specification.id = card_request_spec.spec_id").
Join("INNER", "resource_queue", "resource_queue.id = resource_specification.queue_id").
OrderBy(opts.OrderBy).
Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).
Find(&r); err != nil {
return 0, nil, err
}
} else {
if err = x.Where(cond).Distinct(cols...).
Join("INNER", "user", "\"user\".id = card_request.uid").
OrderBy(opts.OrderBy).
Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).
Find(&r); err != nil {
return 0, nil, err
}
}

if len(r) == 0 {
return 0, r, err
}

for _, v := range r {
user, _ := GetUserByID(v.UID)
if user != nil {
v.UserName = user.Name
}

}

//find related specs
if opts.NeedSpec {
requestIds := make([]int64, 0, len(r))
for _, v := range r {
requestIds = append(requestIds, v.ID)
}

specs := make([]RequestSpecInfo, 0)

if err := x.Cols("resource_specification.id", "resource_specification.source_spec_id",
"resource_specification.acc_cards_num", "resource_specification.cpu_cores",
"resource_specification.mem_gi_b", "resource_specification.gpu_mem_gi_b",
"resource_specification.share_mem_gi_b", "resource_specification.unit_price",
"resource_specification.status", "resource_specification.updated_time",
"card_request_spec.request_id", "resource_queue.cluster",
"resource_queue.ai_center_code", "resource_queue.acc_card_type",
"resource_queue.id as queue_id", "resource_queue.compute_resource",
"resource_queue.queue_code", "resource_queue.queue_name",
"resource_queue.queue_type", "resource_queue.ai_center_name",
"resource_queue.has_internet",
).In("card_request_spec.request_id", requestIds).
Join("INNER", "card_request_spec", "card_request_spec.spec_id = resource_specification.id").
Join("INNER", "resource_queue", "resource_queue.id = resource_specification.queue_id").
OrderBy("resource_specification.acc_cards_num").
Find(&specs); err != nil {
return 0, nil, err
}

specsMap := make(map[int64][]RequestSpecInfo, 0)
for _, v := range specs {
if _, ok := specsMap[v.RequestId]; !ok {
specsMap[v.RequestId] = []RequestSpecInfo{v}
} else {
specsMap[v.RequestId] = append(specsMap[v.RequestId], v)
}
}

for i, v := range r {
s := specsMap[v.ID]
if s == nil {
s = make([]RequestSpecInfo, 0)
}
r[i].Specs = s
}
}

return count, r, nil
}

func UpdateCardRequest(cardRequest *CardRequest) error {
_, err := x.ID(cardRequest.ID).Cols("compute_resource", "contact", "card_type", "acc_cards_num", "disk_capacity", "resource_type", "begin_date", "end_date", "phone_number", "email_address", "org", "description", "begin_unix", "end_unix").Update(cardRequest)
return err
}

+ 33
- 4
models/cloudbrain.go View File

@@ -271,6 +271,7 @@ type Cloudbrain struct {
EngineID int64 //引擎id
ImageID string //grampus image_id
AiCenter string //grampus ai center: center_id+center_name
QueueCode string
FailedReason string `xorm:"text"`

TrainUrl string //输出模型的obs路径
@@ -1869,9 +1870,24 @@ type GrampusNotebookInfo struct {
UserID string `json:"userId"`
Tasks []GrampusNotebookTask `json:"tasks"`
}

const (
GrampusNetAccess = "1"
GrampusNotNetAccess = "2"

GrampusPoolTypePublic = "1"
GrampusPoolTypeExclusive = "2"
)

type Center struct {
ID string `json:"id"`
Name string `json:"name"`
ID string `json:"id"`
Name string `json:"name"`
ResourceSpec []struct {
ID string `json:"id"`
PoolType string `json:"poolType"`
Name string `json:"name"`
IsNetAccess string `json:"isNetAccess"`
} `json:"resourceSpec"`
}
type GrampusSpec struct {
CreatedAt int64 `json:"createdAt"`
@@ -1925,9 +1941,21 @@ type GetGrampusResourceSpecsResult struct {

type GetGrampusAiCentersResult struct {
GrampusResult
Infos []GrampusAiCenter `json:"aiCenterInfos"`
TotalSize int `json:"totalSize"`
Infos []GrampusAiCenter `json:"aiCenterInfos"`
}

type GrampusResourceQueue struct {
QueueCode string
QueueName string
QueueType string
AiCenterCode string
AiCenterName string
ComputeResource string
AccCardType string
HasInternet int //0 unknown;1 no internet;2 has internet

}

type AICenterImage struct {
AICenterID string `json:"aiCenterId"`
ImageUrl string `json:"imageUrl"`
@@ -2028,6 +2056,7 @@ type GrampusNotebookTask struct {
Capacity int `json:"capacity"`
CenterID []string `json:"centerID"`
CenterName []string `json:"centerName"`
PoolId string `json:"poolId"`
Code GrampusDataset `json:"code"`
Datasets []GrampusDataset `json:"datasets"`
OutPut GrampusDataset `json:"output"`


+ 4
- 0
models/cloudbrain_spec.go View File

@@ -18,6 +18,8 @@ type CloudbrainSpec struct {
UnitPrice int
QueueId int64
QueueCode string
QueueName string
QueueType string
Cluster string
HasInternet int
AiCenterCode string `xorm:"index"`
@@ -163,6 +165,8 @@ func UpdateCloudbrainSpec(cloudbrainId int64, s *Specification) (int64, error) {
UnitPrice: s.UnitPrice,
QueueId: s.QueueId,
QueueCode: s.QueueCode,
QueueName: s.QueueName,
QueueType: s.QueueType,
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,


+ 3
- 0
models/models.go View File

@@ -177,6 +177,9 @@ func init() {
new(ModelartsDeploy),
new(ModelartsDeployQueue),
new(CloudbrainConfig),
new(ResourceExclusivePool),
new(CardRequest),
new(CardRequestSpec),
)

tablesStatistic = append(tablesStatistic,


+ 37
- 38
models/resource_queue.go View File

@@ -2,18 +2,23 @@ package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"encoding/json"
"errors"
"strconv"
"strings"
"xorm.io/builder"
)

const (
QueueTypePublic = "public"
QueueTypeExclusive = "exclusive"
)

type ResourceQueue struct {
ID int64 `xorm:"pk autoincr"`
QueueCode string
QueueName string
QueueType string
Cluster string `xorm:"notnull"`
AiCenterCode string
AiCenterName string
@@ -34,6 +39,8 @@ func (r ResourceQueue) ConvertToRes() *ResourceQueueRes {
return &ResourceQueueRes{
ID: r.ID,
QueueCode: r.QueueCode,
QueueName: r.QueueName,
QueueType: r.QueueType,
Cluster: r.Cluster,
AiCenterCode: r.AiCenterCode,
AiCenterName: r.AiCenterName,
@@ -57,6 +64,8 @@ type ResourceQueueReq struct {
CreatorId int64
IsAutomaticSync bool
Remark string
QueueName string
QueueType string
}

func (r ResourceQueueReq) ToDTO() ResourceQueue {
@@ -72,6 +81,8 @@ func (r ResourceQueueReq) ToDTO() ResourceQueue {
Remark: r.Remark,
CreatedBy: r.CreatorId,
UpdatedBy: r.CreatorId,
QueueName: r.QueueName,
QueueType: r.QueueType,
}
if r.Cluster == OpenICluster {
if r.AiCenterCode == AICenterOfCloudBrainOne {
@@ -92,6 +103,7 @@ type SearchResourceQueueOptions struct {
ComputeResource string
AccCardType string
HasInternet SpecInternetQuery
QueueType string
}

type ResourceQueueListRes struct {
@@ -102,6 +114,8 @@ type ResourceQueueListRes struct {
type ResourceQueueCodesRes struct {
ID int64
QueueCode string
QueueName string
QueueType string
Cluster string
AiCenterCode string
AiCenterName string
@@ -136,6 +150,8 @@ func NewResourceQueueListRes(totalSize int64, list []ResourceQueue) *ResourceQue
type ResourceQueueRes struct {
ID int64
QueueCode string
QueueType string
QueueName string
Cluster string
AiCenterCode string
AiCenterName string
@@ -155,7 +171,7 @@ func UpdateResourceQueueById(queueId int64, queue ResourceQueue) (int64, error)
return x.ID(queueId).Update(&queue)
}
func UpdateResourceCardsTotalNumAndInternetStatus(queueId int64, queue ResourceQueue) (int64, error) {
return x.ID(queueId).Cols("cards_total_num", "remark", "has_internet").Update(&queue)
return x.ID(queueId).Cols("cards_total_num", "remark", "has_internet", "queue_type", "queue_name").Update(&queue)
}

func SearchResourceQueue(opts SearchResourceQueueOptions) (int64, []ResourceQueue, error) {
@@ -180,6 +196,9 @@ func SearchResourceQueue(opts SearchResourceQueueOptions) (int64, []ResourceQueu
} else if opts.HasInternet == QueryHasInternetSpecs {
cond = cond.And(builder.Eq{"has_internet": HasInternet})
}
if opts.QueueType != "" {
cond = cond.And(builder.Eq{"queue_type": opts.QueueType})
}
n, err := x.Where(cond).Unscoped().Count(&ResourceQueue{})
if err != nil {
return 0, nil, err
@@ -366,39 +385,15 @@ func GetResourceAiCenters() ([]ResourceAiCenterRes, error) {
return r, nil
}

type SpecificationSpecialQueueConfig struct {
SpecialQueues []SpecialQueue `json:"special_queues"`
}

type SpecialQueue struct {
OrgName string `json:"org_name"`
JobType string `json:"job_type"`
Cluster string `json:"cluster"`
QueueId int64 `json:"queue_id"`
ComputeResource string `json:"compute_resource"`
}

var specialQueueConfig SpecificationSpecialQueueConfig
var specialQueueConfigFlag = false

func GetSpecialQueueConfig() SpecificationSpecialQueueConfig {
if !specialQueueConfigFlag {
if err := json.Unmarshal([]byte(setting.SPECIFICATION_SPECIAL_QUEUE), &specialQueueConfig); err != nil {
log.Error("json.Unmarshal specialQueueConfig error.%v", err)
}
specialQueueConfigFlag = true
}
return specialQueueConfig
}

func GetSpecialQueueIds(opts FindSpecsOptions) []SpecialQueue {
config := GetSpecialQueueConfig()
if len(config.SpecialQueues) == 0 {
return []SpecialQueue{}
func GetExclusiveQueueIds(opts FindSpecsOptions) []*ResourceExclusivePool {
pools, err := FindExclusivePools()
if err != nil {
log.Error("GetSpecialQueueIds FindSpecialQueueConfig err.%v", err)
return nil
}

queues := make([]SpecialQueue, 0)
for _, queue := range config.SpecialQueues {
queues := make([]*ResourceExclusivePool, 0)
for _, queue := range pools {
if queue.JobType != string(opts.JobType) {
continue
}
@@ -414,18 +409,22 @@ func GetSpecialQueueIds(opts FindSpecsOptions) []SpecialQueue {
return queues
}

func IsUserInSpecialPool(userId int64) bool {
func IsUserInExclusivePool(userId int64) bool {
userOrgs, err := GetOrgsByUserID(userId, true)
if err != nil {
log.Error("GetSpecialQueueIds GetOrgsByUserID error.%v", err)
return false
}
config := GetSpecialQueueConfig()
if len(config.SpecialQueues) == 0 {
pools, err := FindExclusivePools()
if err != nil {
log.Error("IsUserInSpecialPool FindExclusivePools err.%v", err)
return false
}
if len(pools) == 0 {
return false
}
for _, org := range userOrgs {
for _, queue := range config.SpecialQueues {
for _, queue := range pools {
if strings.ToLower(org.Name) == strings.ToLower(queue.OrgName) {
return true
}


+ 208
- 69
models/resource_scene.go View File

@@ -1,8 +1,10 @@
package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"errors"
"strings"
"xorm.io/builder"
)

@@ -11,18 +13,29 @@ const (
NotExclusive
)

const (
SceneTypePublic = "public"
SceneTypeExclusive = "exclusive"

SpecPublic = "public"
SpecExclusive = "exclusive"
)

type ResourceScene struct {
ID int64 `xorm:"pk autoincr"`
SceneName string
JobType string
IsExclusive bool
ExclusiveOrg string
CreatedTime timeutil.TimeStamp `xorm:"created"`
CreatedBy int64
UpdatedTime timeutil.TimeStamp `xorm:"updated"`
UpdatedBy int64
DeleteTime timeutil.TimeStamp `xorm:"deleted"`
DeletedBy int64
ID int64 `xorm:"pk autoincr"`
SceneName string
JobType string
Cluster string
ComputeResource string
IsSpecExclusive string
SceneType string //共享或者独占场景
ExclusiveOrg string
CreatedTime timeutil.TimeStamp `xorm:"created"`
CreatedBy int64
UpdatedTime timeutil.TimeStamp `xorm:"updated"`
UpdatedBy int64
DeleteTime timeutil.TimeStamp `xorm:"deleted"`
DeletedBy int64
}

type ResourceSceneSpec struct {
@@ -32,26 +45,47 @@ type ResourceSceneSpec struct {
CreatedTime timeutil.TimeStamp `xorm:"created"`
}

type ResourceExclusivePool struct {
ID int64 `xorm:"pk autoincr"`
SceneId int64
OrgName string
JobType string
Cluster string
QueueId int64
ComputeResource string
CreatedTime timeutil.TimeStamp `xorm:"created"`
CreatedBy int64
UpdatedTime timeutil.TimeStamp `xorm:"updated"`
UpdatedBy int64
DeleteTime timeutil.TimeStamp `xorm:"deleted"`
DeletedBy int64
}

type ResourceSceneReq struct {
ID int64
SceneName string
JobType string
IsExclusive bool
ExclusiveOrg string
CreatorId int64
SpecIds []int64
ID int64
SceneName string
JobType string
SceneType string
Cluster string
Resource string
ExclusiveQueueIds []int64
IsSpecExclusive string
ExclusiveOrg string
CreatorId int64
SpecIds []int64
}

type SearchResourceSceneOptions struct {
ListOptions
JobType string
IsExclusive int
IsSpecExclusive string
AiCenterCode string
QueueId int64
ComputeResource string
AccCardType string
Cluster string
HasInternet SpecInternetQuery
SceneType string
}

type ResourceSceneListRes struct {
@@ -67,12 +101,15 @@ func NewResourceSceneListRes(totalSize int64, list []ResourceSceneRes) *Resource
}

type ResourceSceneRes struct {
ID int64
SceneName string
JobType JobType
IsExclusive bool
ExclusiveOrg string
Specs []ResourceSpecInfo
ID int64
SceneName string
JobType JobType
IsSpecExclusive string
Cluster string
ComputeResource string
SceneType string //共享或者独占场景
ExclusiveOrg string
Specs []ResourceSpecInfo
}

func (ResourceSceneRes) TableName() string {
@@ -105,6 +142,8 @@ type ResourceSpecInfo struct {
AiCenterCode string
AiCenterName string
QueueCode string
QueueType string
QueueName string
QueueId int64
ComputeResource string
AccCardType string
@@ -119,6 +158,11 @@ func InsertResourceScene(r ResourceSceneReq) error {
sess := x.NewSession()
defer sess.Close()

err := sess.Begin()
if err != nil {
log.Error("InsertResourceScene start transaction err. %v", err)
return err
}
//check
specs := make([]ResourceSpecification, 0)
cond := builder.In("id", r.SpecIds)
@@ -130,34 +174,68 @@ func InsertResourceScene(r ResourceSceneReq) error {
}

rs := ResourceScene{
SceneName: r.SceneName,
JobType: r.JobType,
IsExclusive: r.IsExclusive,
ExclusiveOrg: r.ExclusiveOrg,
CreatedBy: r.CreatorId,
UpdatedBy: r.CreatorId,
}
_, err := sess.InsertOne(&rs)
SceneName: r.SceneName,
JobType: r.JobType,
IsSpecExclusive: r.IsSpecExclusive,
SceneType: r.SceneType,
Cluster: r.Cluster,
ComputeResource: r.Resource,
ExclusiveOrg: r.ExclusiveOrg,
CreatedBy: r.CreatorId,
UpdatedBy: r.CreatorId,
}
_, err = sess.InsertOne(&rs)
if err != nil {
sess.Rollback()
return err
}

if len(r.SpecIds) == 0 {
return sess.Commit()
}
rss := make([]ResourceSceneSpec, len(r.SpecIds))
for i, v := range r.SpecIds {
rss[i] = ResourceSceneSpec{
SceneId: rs.ID,
SpecId: v,
if len(r.SpecIds) > 0 {
rss := make([]ResourceSceneSpec, len(r.SpecIds))
for i, v := range r.SpecIds {
rss[i] = ResourceSceneSpec{
SceneId: rs.ID,
SpecId: v,
}
}

_, err = sess.Insert(&rss)
if err != nil {
sess.Rollback()
return err
}
}

_, err = sess.Insert(&rss)
if err != nil {
sess.Rollback()
return err
if r.SceneType == SceneTypeExclusive && r.ExclusiveOrg != "" && len(r.SpecIds) > 0 {
pools := make([]ResourceExclusivePool, 0)
queueIds := make([]int64, 0)
err = sess.Table("resource_specification").Distinct("queue_id").In("id", r.SpecIds).Find(&queueIds)
if err != nil {
sess.Rollback()
return err
}
for _, org := range strings.Split(r.ExclusiveOrg, ";") {
if org == "" {
continue
}
for _, id := range queueIds {
pools = append(pools, ResourceExclusivePool{
SceneId: rs.ID,
OrgName: org,
JobType: r.JobType,
Cluster: r.Cluster,
QueueId: id,
ComputeResource: r.Resource,
CreatedBy: r.CreatorId,
UpdatedBy: r.CreatorId,
})
}
}
_, err = sess.Insert(pools)
if err != nil {
sess.Rollback()
return err
}
}

return sess.Commit()
@@ -172,7 +250,11 @@ func UpdateResourceScene(r ResourceSceneReq) error {
}
sess.Close()
}()

err = sess.Begin()
if err != nil {
log.Error("UpdateResourceScene start transaction err. %v", err)
return err
}
// find old scene
old := ResourceScene{}
if has, _ := sess.ID(r.ID).Get(&old); !has {
@@ -190,36 +272,69 @@ func UpdateResourceScene(r ResourceSceneReq) error {

//update scene
rs := ResourceScene{
SceneName: r.SceneName,
IsExclusive: r.IsExclusive,
ExclusiveOrg: r.ExclusiveOrg,
SceneName: r.SceneName,
IsSpecExclusive: r.IsSpecExclusive,
ExclusiveOrg: r.ExclusiveOrg,
SceneType: r.SceneType,
}
if _, err = sess.ID(r.ID).UseBool("is_exclusive").Update(&rs); err != nil {
if _, err = sess.ID(r.ID).UseBool("is_spec_exclusive").Update(&rs); err != nil {
return err
}

//delete scene spec relation
if _, err = sess.Where("scene_id = ? ", r.ID).Delete(&ResourceSceneSpec{}); err != nil {
sess.Rollback()
return err
}

if len(r.SpecIds) == 0 {
return sess.Commit()
}
//build new scene spec relation
rss := make([]ResourceSceneSpec, len(r.SpecIds))
for i, v := range r.SpecIds {
rss[i] = ResourceSceneSpec{
SceneId: r.ID,
SpecId: v,
if len(r.SpecIds) > 0 {
//build new scene spec relation
rss := make([]ResourceSceneSpec, len(r.SpecIds))
for i, v := range r.SpecIds {
rss[i] = ResourceSceneSpec{
SceneId: r.ID,
SpecId: v,
}
}
if _, err = sess.Insert(&rss); err != nil {
return err
}

}
if _, err = sess.Insert(&rss); err != nil {
sess.Rollback()
if _, err = sess.Where("scene_id = ? ", r.ID).Delete(&ResourceExclusivePool{}); err != nil {
return err
}

if r.SceneType == SceneTypeExclusive && r.ExclusiveOrg != "" && len(r.SpecIds) > 0 {
pools := make([]ResourceExclusivePool, 0)
queueIds := make([]int64, 0)
err = sess.Table("resource_specification").Distinct("queue_id").In("id", r.SpecIds).Find(&queueIds)
if err != nil {
return err
}
for _, org := range strings.Split(r.ExclusiveOrg, ";") {
if org == "" {
continue
}
for _, id := range queueIds {
pools = append(pools, ResourceExclusivePool{
SceneId: r.ID,
OrgName: org,
JobType: r.JobType,
Cluster: r.Cluster,
QueueId: id,
ComputeResource: r.Resource,
CreatedBy: r.CreatorId,
UpdatedBy: r.CreatorId,
})
}
}
_, err = sess.Insert(pools)
if err != nil {
return err
}
}

return sess.Commit()
}

@@ -232,6 +347,11 @@ func DeleteResourceScene(sceneId int64) error {
}
sess.Close()
}()
err = sess.Begin()
if err != nil {
log.Error("DeleteResourceScene start transaction err. %v", err)
return err
}

if _, err = sess.ID(sceneId).Delete(&ResourceScene{}); err != nil {
return err
@@ -239,6 +359,9 @@ func DeleteResourceScene(sceneId int64) error {
if _, err = sess.Where("scene_id = ? ", sceneId).Delete(&ResourceSceneSpec{}); err != nil {
return err
}
if _, err = sess.Where("scene_id = ? ", sceneId).Delete(&ResourceExclusivePool{}); err != nil {
return err
}
return sess.Commit()
}

@@ -250,10 +373,8 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen
if opts.JobType != "" {
cond = cond.And(builder.Eq{"resource_scene.job_type": opts.JobType})
}
if opts.IsExclusive == Exclusive {
cond = cond.And(builder.Eq{"resource_scene.is_exclusive": 1})
} else if opts.IsExclusive == NotExclusive {
cond = cond.And(builder.Eq{"resource_scene.is_exclusive": 0})
if opts.IsSpecExclusive != "" {
cond = cond.And(builder.Eq{"resource_scene.is_spec_exclusive": opts.IsSpecExclusive})
}
if opts.AiCenterCode != "" {
cond = cond.And(builder.Eq{"resource_queue.ai_center_code": opts.AiCenterCode})
@@ -275,9 +396,12 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen
} else if opts.HasInternet == QueryNoInternetSpecs {
cond = cond.And(builder.Eq{"resource_queue.has_internet": NoInternet})
}
if opts.SceneType != "" {
cond = cond.And(builder.Eq{"resource_scene.scene_type": opts.SceneType})
}
cond = cond.And(builder.NewCond().Or(builder.Eq{"resource_scene.delete_time": 0}).Or(builder.IsNull{"resource_scene.delete_time"}))
cols := []string{"resource_scene.id", "resource_scene.scene_name", "resource_scene.job_type", "resource_scene.is_exclusive",
"resource_scene.exclusive_org"}
cols := []string{"resource_scene.id", "resource_scene.scene_name", "resource_scene.job_type", "resource_scene.is_spec_exclusive",
"resource_scene.exclusive_org", "resource_scene.scene_type", "resource_scene.cluster", "resource_scene.compute_resource"}
count, err := x.Where(cond).
Distinct("resource_scene.id").
Join("INNER", "resource_scene_spec", "resource_scene_spec.scene_id = resource_scene.id").
@@ -302,6 +426,7 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen
if len(r) == 0 {
return 0, r, err
}

//find related specs
sceneIds := make([]int64, 0, len(r))
for _, v := range r {
@@ -319,7 +444,7 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen
"resource_queue.ai_center_code", "resource_queue.acc_card_type",
"resource_queue.id as queue_id", "resource_queue.compute_resource",
"resource_queue.queue_code", "resource_queue.ai_center_name",
"resource_queue.has_internet",
"resource_queue.has_internet", "resource_queue.queue_name", "resource_queue.queue_type",
).In("resource_scene_spec.scene_id", sceneIds).
Join("INNER", "resource_scene_spec", "resource_scene_spec.spec_id = resource_specification.id").
Join("INNER", "resource_queue", "resource_queue.ID = resource_specification.queue_id").
@@ -347,3 +472,17 @@ func SearchResourceScene(opts SearchResourceSceneOptions) (int64, []ResourceScen

return count, r, nil
}

func FindExclusivePools() ([]*ResourceExclusivePool, error) {
sq := make([]*ResourceExclusivePool, 0)

err := x.Find(&sq)
if err != nil {
return nil, err
}
return sq, nil
}

func InsertExclusivePools(queue []ResourceExclusivePool) (int64, error) {
return x.Insert(&queue)
}

+ 78
- 37
models/resource_specification.go View File

@@ -193,6 +193,8 @@ func (r ResourceSpecAndQueue) ConvertToResourceSpecInfo() *ResourceSpecInfo {
AiCenterCode: r.AiCenterCode,
AiCenterName: r.AiCenterName,
QueueCode: r.QueueCode,
QueueType: r.QueueType,
QueueName: r.QueueName,
QueueId: r.QueueId,
ComputeResource: r.ComputeResource,
AccCardType: r.AccCardType,
@@ -223,6 +225,7 @@ type FindSpecsOptions struct {
RequestAll bool
SpecStatus int
HasInternet SpecInternetQuery //0 all,1 no internet,2 has internet
SceneType string
}

type Specification struct {
@@ -238,48 +241,37 @@ type Specification struct {
UnitPrice int
QueueId int64
QueueCode string
QueueName string
QueueType string
HasInternet int
Cluster string
AiCenterCode string
AiCenterName string
IsExclusive bool
ExclusiveOrg string
//specs that have the same sourceSpecId, computeResource and cluster as current spec
RelatedSpecs []*Specification
}

func (Specification) TableName() string {
return "resource_specification"
}

func (s *Specification) loadRelatedSpecs(jobType JobType, hasInternet SpecInternetQuery) {
if s.RelatedSpecs != nil {
return
}
func (s *Specification) findRelatedSpecs(opts FindSpecsOptions, userId int64) []*Specification {
defaultSpecs := make([]*Specification, 0)
if s.SourceSpecId == "" {
s.RelatedSpecs = defaultSpecs
return
return defaultSpecs
}
//是否需要网络的调度策略如下:
//需要联网时只能调度到有网的分中心;不需要联网时可以调度到所有的分中心
if hasInternet == QueryNoInternetSpecs {
hasInternet = QueryAllSpecs
isUserSpecial := IsUserInExclusivePool(userId)
if isUserSpecial {
opts.SceneType = SceneTypeExclusive
} else {
opts.SceneType = SceneTypePublic
}
r, err := FindSpecs(FindSpecsOptions{
ComputeResource: s.ComputeResource,
Cluster: s.Cluster,
SourceSpecId: s.SourceSpecId,
RequestAll: false,
SpecStatus: SpecOnShelf,
JobType: jobType,
HasInternet: hasInternet,
})

r, err := FindSpecs(opts)
if err != nil {
s.RelatedSpecs = defaultSpecs
return
return defaultSpecs
}
s.RelatedSpecs = r
return r
}

func (s *Specification) ToShowString() string {
@@ -297,6 +289,22 @@ func (s *Specification) ToShowString() string {
return specName
}

func (s *Specification) ParseResourceQueue() ResourceQueue {
return ResourceQueue{
ID: s.QueueId,
QueueCode: s.QueueCode,
QueueName: s.QueueName,
QueueType: s.QueueType,
Cluster: s.Cluster,
AiCenterCode: s.AiCenterCode,
AiCenterName: s.AiCenterName,
ComputeResource: s.ComputeResource,
AccCardType: s.AccCardType,
CardsTotalNum: s.AccCardsNum,
HasInternet: s.HasInternet,
}
}

func GetAvailableCenterIdsByASpec(ID int64) ([]string, error) {
spec, err := GetResourceSpecification(&ResourceSpecification{
ID: ID})
@@ -321,14 +329,39 @@ type GetAvailableCenterIdOpts struct {
}

func (s *Specification) GetAvailableCenterIds(opts GetAvailableCenterIdOpts) []string {
s.loadRelatedSpecs(opts.JobType, opts.HasInternet)
queues := s.GetAvailableQueues(opts)
centerIds := make([]string, 0)
for _, v := range queues {
centerIds = append(centerIds, v.AiCenterCode)
}
return centerIds
}

func (s *Specification) GetAvailableQueues(opts GetAvailableCenterIdOpts) []ResourceQueue {
//是否需要网络的调度策略如下:
//需要联网时只能调度到有网的分中心;不需要联网时可以调度到所有的分中心
hasInternet := opts.HasInternet
if hasInternet == QueryNoInternetSpecs {
hasInternet = QueryAllSpecs
}

specOpts := FindSpecsOptions{
ComputeResource: s.ComputeResource,
Cluster: s.Cluster,
SourceSpecId: s.SourceSpecId,
RequestAll: false,
SpecStatus: SpecOnShelf,
JobType: opts.JobType,
HasInternet: hasInternet,
}
relatedSpecs := s.findRelatedSpecs(specOpts, opts.UserId)

if len(s.RelatedSpecs) == 0 {
return make([]string, 0)
if len(relatedSpecs) == 0 {
return make([]ResourceQueue, 0)
}

//filter exclusive specs
specs := FilterExclusiveSpecs(s.RelatedSpecs, opts.UserId)
specs := FilterExclusiveSpecs(relatedSpecs, opts.UserId)

specs = HandleSpecialQueues(specs, opts.UserId, FindSpecsOptions{
JobType: opts.JobType,
@@ -336,11 +369,16 @@ func (s *Specification) GetAvailableCenterIds(opts GetAvailableCenterIdOpts) []s
ComputeResource: s.ComputeResource,
})

centerIds := make([]string, len(specs))
for i, v := range specs {
centerIds[i] = v.AiCenterCode
queueMap := make(map[int64]string, len(specs))
queues := make([]ResourceQueue, 0)
for _, v := range specs {
if _, ok := queueMap[v.QueueId]; ok {
continue
}
queues = append(queues, v.ParseResourceQueue())
queueMap[v.QueueId] = ""
}
return centerIds
return queues
}

func FilterExclusiveSpecs(r []*Specification, userId int64) []*Specification {
@@ -408,17 +446,17 @@ func HandleSpecialQueues(specs []*Specification, userId int64, opts FindSpecsOpt
if len(specs) == 0 {
return specs
}
isUserInSpecialPool := IsUserInSpecialPool(userId)
isUserInSpecialPool := IsUserInExclusivePool(userId)
if isUserInSpecialPool {
specs = handleSpecialUserSpecs(specs, userId, opts)
specs = handleExclusiveUserSpecs(specs, userId, opts)
} else {
specs = handleNormalUserSpecs(specs, opts)
}
return specs
}

func handleSpecialUserSpecs(specs []*Specification, userId int64, opts FindSpecsOptions) []*Specification {
specialQueues := GetSpecialQueueIds(opts)
func handleExclusiveUserSpecs(specs []*Specification, userId int64, opts FindSpecsOptions) []*Specification {
specialQueues := GetExclusiveQueueIds(opts)
userOrgs, err := GetOrgsByUserID(userId, true)
if err != nil {
log.Error("handleSpecialUserSpecs GetOrgsByUserID error.%v", err)
@@ -437,7 +475,7 @@ func handleSpecialUserSpecs(specs []*Specification, userId int64, opts FindSpecs
}

func handleNormalUserSpecs(specs []*Specification, opts FindSpecsOptions) []*Specification {
queues := GetSpecialQueueIds(opts)
queues := GetExclusiveQueueIds(opts)
queueIds := make([]int64, 0)
for _, queue := range queues {
queueIds = append(queueIds, queue.QueueId)
@@ -687,6 +725,9 @@ func FindSpecs(opts FindSpecsOptions) ([]*Specification, error) {
} else if opts.HasInternet == QueryHasInternetSpecs {
cond = cond.And(builder.Eq{"resource_queue.has_internet": HasInternet})
}
if opts.SceneType != "" {
cond = cond.And(builder.Eq{"resource_scene.scene_type": opts.SceneType})
}

r := make([]*Specification, 0)
s := x.Where(cond).


+ 66
- 278
models/user_business_analysis.go View File

@@ -2,6 +2,7 @@ package models

import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
@@ -351,10 +352,10 @@ func QueryUserStaticDataForUserDefine(opts *UserBusinessAnalysisQueryOptions, wi
}
CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix)
SolveIssueCountMap := querySolveIssue(start_unix, end_unix)
CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix)
CreateRepoCountMap, _, _, _ := queryUserCreateRepo(start_unix, end_unix)
LoginCountMap := queryLoginCount(start_unix, end_unix)
OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap, _ := queryCloudBrainTask(start_unix, end_unix)
AiModelManageMap := queryUserModel(start_unix, end_unix)
AiModelConvertMap := queryUserModelConvert(start_unix, end_unix)

@@ -628,11 +629,11 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS

CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix)
SolveIssueCountMap := querySolveIssue(start_unix, end_unix)
CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix)
CreateRepoCountMap, _, _, _ := queryUserCreateRepo(start_unix, end_unix)
LoginCountMap := queryLoginCount(start_unix, end_unix)

OpenIIndexMap := queryUserRepoOpenIIndex(startTime.Unix(), end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap, _ := queryCloudBrainTask(start_unix, end_unix)
AiModelManageMap := queryUserModel(start_unix, end_unix)
AiModelConvertMap := queryUserModelConvert(start_unix, end_unix)

@@ -771,132 +772,6 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount))
}

func RefreshUserYearTable(pageStartTime time.Time, pageEndTime time.Time) {
sess := x.NewSession()
defer sess.Close()
log.Info("RefreshUserYearTable start....")
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()

log.Info("UserYear StartTime:" + pageStartTime.Format("2006-01-02 15:04:05"))
log.Info("UserYear EndTime time:" + pageEndTime.Format("2006-01-02 15:04:05"))

start_unix := pageStartTime.Unix()
end_unix := pageEndTime.Unix()

CodeMergeCountMap := queryPullRequest(start_unix, end_unix)
CommitCountMap := queryCommitAction(start_unix, end_unix, 5)
mostActiveMap := queryMostActiveCommitAction(start_unix, end_unix)
IssueCountMap := queryCreateIssue(start_unix, end_unix)

CommentCountMap := queryComment(start_unix, end_unix)

CommitCodeSizeMap, err := GetAllUserKPIStats(pageStartTime, pageEndTime)
if err != nil {
log.Info("query commit code errr.")
} else {
log.Info("query commit code size, len=" + fmt.Sprint(len(CommitCodeSizeMap)))
}
CommitDatasetSizeMap, CommitDatasetNumMap, dataSetDownloadMap := queryDatasetSize(start_unix, end_unix)
SolveIssueCountMap := querySolveIssue(start_unix, end_unix)
CreateRepoCountMap, DetailInfoMap, MostDownloadMap := queryUserCreateRepo(start_unix, end_unix)

CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix)

_, CollectedDataset := queryDatasetStars(start_unix, end_unix)
_, CreatedDataset := queryRecommedDataSet(start_unix, end_unix)

bonusMap := getBonusMap()
log.Info("truncate all data from table:user_summary_current_year ")
statictisSess.Exec("TRUNCATE TABLE user_summary_current_year")

cond := "type != 1 and is_active=true"
count, err := sess.Where(cond).Count(new(User))
if err != nil {
log.Info("query user error. return.")
return
}
var indexTotal int64
indexTotal = 0
for {
sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
userList := make([]*User, 0)
sess.Find(&userList)
for _, userRecord := range userList {
var dateRecordAll UserBusinessAnalysisAll
dateRecordAll.ID = userRecord.ID
dateRecordAll.Email = userRecord.Email
dateRecordAll.Phone = userRecord.PhoneNumber
dateRecordAll.RegistDate = userRecord.CreatedUnix
dateRecordAll.Name = userRecord.Name

dateRecordAll.CodeMergeCount = getMapValue(dateRecordAll.ID, CodeMergeCountMap)
dateRecordAll.CommitCount = getMapValue(dateRecordAll.ID, CommitCountMap)
dateRecordAll.IssueCount = getMapValue(dateRecordAll.ID, IssueCountMap)
dateRecordAll.CommentCount = getMapValue(dateRecordAll.ID, CommentCountMap)

if _, ok := CommitCodeSizeMap[dateRecordAll.Email]; !ok {
dateRecordAll.CommitCodeSize = 0
} else {
dateRecordAll.CommitCodeSize = int(CommitCodeSizeMap[dateRecordAll.Email].CommitLines)
}
//dateRecordAll.CommitCodeSize = getMapValue(dateRecordAll.ID, CommitCodeSizeMap)
dateRecordAll.CommitDatasetSize = getMapValue(dateRecordAll.ID, CommitDatasetSizeMap)
dateRecordAll.CommitDatasetNum = getMapValue(dateRecordAll.ID, CommitDatasetNumMap)
dateRecordAll.SolveIssueCount = getMapValue(dateRecordAll.ID, SolveIssueCountMap)
dateRecordAll.CreateRepoCount = getMapValue(dateRecordAll.ID, CreateRepoCountMap)

dateRecordAll.CloudBrainTaskNum = getMapValue(dateRecordAll.ID, CloudBrainTaskMap)
dateRecordAll.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuDebugJob", CloudBrainTaskItemMap)
dateRecordAll.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuDebugJob", CloudBrainTaskItemMap)
dateRecordAll.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuTrainJob", CloudBrainTaskItemMap)
dateRecordAll.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuTrainJob", CloudBrainTaskItemMap)
dateRecordAll.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap)
dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap)
dateRecordAll.CloudBrainRunTime = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_CloudBrainRunTime", CloudBrainTaskItemMap)

//年度数据
subTime := time.Now().UTC().Sub(dateRecordAll.RegistDate.AsTime().UTC())
mostActiveDay := ""
if userInfo, ok := mostActiveMap[dateRecordAll.ID]; ok {
mostActiveDay = getMostActiveJson(userInfo)
}
scoreMap := make(map[string]float64)
repoInfo := getRepoDetailInfo(DetailInfoMap, dateRecordAll.ID, MostDownloadMap)
dataSetInfo, datasetscore := getDataSetInfo(dateRecordAll.ID, CreatedDataset, dataSetDownloadMap, CommitDatasetNumMap, CollectedDataset)
scoreMap["datasetscore"] = datasetscore
codeInfo, codescore := getCodeInfo(&dateRecordAll)
scoreMap["codescore"] = codescore
cloudBrainInfo := getCloudBrainInfo(&dateRecordAll, CloudBrainTaskItemMap, scoreMap)
playARoll := getPlayARoll(bonusMap, dateRecordAll.Name, scoreMap)
exteral := 0
if int(subTime.Hours())%24 > 0 {
exteral = 1
}
re := &UserSummaryCurrentYear{
ID: dateRecordAll.ID,
Name: dateRecordAll.Name,
Email: dateRecordAll.Email,
Phone: dateRecordAll.Phone,
RegistDate: dateRecordAll.RegistDate,
DateCount: int(subTime.Hours())/24 + exteral,
MostActiveDay: mostActiveDay,
RepoInfo: repoInfo,
DataSetInfo: dataSetInfo,
CodeInfo: codeInfo,
CloudBrainInfo: cloudBrainInfo,
PlayARoll: playARoll,
}
statictisSess.Insert(re)
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
log.Info("update user year data finished. ")
}

func getBonusWeekDataMap() map[int64][]int {
bonusMap := make(map[int64][]int)
url := setting.RecommentRepoAddr + "bonus/weekdata/record.txt"
@@ -967,47 +842,6 @@ func getWeekAndNum(name string) (int, int) {
return 0, 0
}

func getBonusMap() map[string]map[string]int {
bonusMap := make(map[string]map[string]int)
url := setting.RecommentRepoAddr + "bonus/record.txt"
content, err := GetContentFromPromote(url)
if err == nil {
filenames := strings.Split(content, "\n")
for i := 0; i < len(filenames); i++ {
if strings.HasSuffix(filenames[i], "\r") {
filenames[i] = filenames[i][0 : len(filenames[i])-len("\r")]
}
url = setting.RecommentRepoAddr + "bonus/" + filenames[i]
csvContent, err1 := GetContentFromPromote(url)
if err1 == nil {
//read csv
lines := strings.Split(csvContent, "\n")
for j := 1; j < len(lines); j++ {
if strings.HasSuffix(lines[j], "\r") {
lines[j] = lines[j][0 : len(lines[j])-len("\r")]
}
aLine := strings.Split(lines[j], ",")
if len(aLine) < 7 {
continue
}
userName := aLine[1]
//email := lines[2]
record, ok := bonusMap[userName]
if !ok {
record = make(map[string]int)
bonusMap[userName] = record
}
record["times"] = getMapKeyStringValue("times", record) + getIntValue(aLine[3])
record["total_bonus"] = getMapKeyStringValue("total_bonus", record) + getIntValue(aLine[4])
record["total_cardtime"] = getMapKeyStringValue("total_cardtime", record) + getIntValue(aLine[5])
record["total_giveup"] = getMapKeyStringValue("total_giveup", record) + getIntValue(aLine[6])
}
}
}
}
return bonusMap
}

func getIntValue(val string) int {
i, err := strconv.Atoi(val)
if err == nil {
@@ -1024,32 +858,14 @@ func getInt64Value(val string) int64 {
return 0
}

func getPlayARoll(bonusMap map[string]map[string]int, userName string, scoreMap map[string]float64) string {
bonusInfo := make(map[string]string)
record, ok := bonusMap[userName]
if ok {
rollscore := 0.0
bonusInfo["times"] = fmt.Sprint(record["times"])
if record["times"] >= 4 {
rollscore = float64(record["times"]) / float64(4)
}
scoreMap["rollscore"] = rollscore
bonusInfo["total_bonus"] = fmt.Sprint(record["total_bonus"])
bonusInfo["total_cardtime"] = fmt.Sprint(record["total_cardtime"])
bonusInfo["total_giveup"] = fmt.Sprint(record["total_giveup"])
bonusInfoJson, _ := json.Marshal(bonusInfo)
return string(bonusInfoJson)
} else {
return ""
}
}

func getCloudBrainInfo(dateRecordAll *UserBusinessAnalysisAll, CloudBrainTaskItemMap map[string]int, scoreMap map[string]float64) string {
//2023:有XX 个使用启智集群资源,有XX 个使用智算网络集群,使用过的计算资源有GPU NPU GCU
//2023:你的所有任务累计运行了XXX卡时,其中 GPU资源运行XX卡时 NPU资源运行XX卡时 GCU资源运行XX卡时
func getCloudBrainInfo(dateRecordAll *UserBusinessAnalysisAll, CloudBrainTaskItemMap map[string]int, scoreMap map[string]float64, resourceItemMap map[string]int) string {
trainscore := 0.0
debugscore := 0.0
runtime := 0.0
if dateRecordAll.CloudBrainTaskNum > 0 {
cloudBrainInfo := make(map[string]string)
cloudBrainInfo := make(map[string]interface{})
cloudBrainInfo["create_task_num"] = fmt.Sprint(dateRecordAll.CloudBrainTaskNum)
cloudBrainInfo["debug_task_num"] = fmt.Sprint(dateRecordAll.GpuDebugJob + dateRecordAll.NpuDebugJob)
if dateRecordAll.GpuDebugJob+dateRecordAll.NpuDebugJob >= 50 {
@@ -1066,6 +882,9 @@ func getCloudBrainInfo(dateRecordAll *UserBusinessAnalysisAll, CloudBrainTaskIte
cloudBrainInfo["CloudBrainOne"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_CloudBrainOne"])
cloudBrainInfo["CloudBrainTwo"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_CloudBrainTwo"])
cloudBrainInfo["C2Net"] = fmt.Sprint(CloudBrainTaskItemMap[fmt.Sprint(dateRecordAll.ID)+"_C2Net"])
if resourceItemMap != nil {
cloudBrainInfo["computer_resource"] = resourceItemMap
}
cloudBrainInfoJson, _ := json.Marshal(cloudBrainInfo)
scoreMap["trainscore"] = trainscore
scoreMap["debugscore"] = debugscore
@@ -1079,25 +898,6 @@ func getCloudBrainInfo(dateRecordAll *UserBusinessAnalysisAll, CloudBrainTaskIte
}
}

func getCodeInfo(dateRecordAll *UserBusinessAnalysisAll) (string, float64) {
if dateRecordAll.CommitCount > 0 {
codeInfo := make(map[string]string)
codeInfo["commit_count"] = fmt.Sprint(dateRecordAll.CommitCount)
codeInfo["commit_line"] = fmt.Sprint(dateRecordAll.CommitCodeSize)
score := 0.0
score = float64(dateRecordAll.CommitCodeSize) / float64(dateRecordAll.CommitCount) / float64(20000)
if score < (float64(dateRecordAll.CommitCount) / float64(100)) {
score = float64(dateRecordAll.CommitCount) / float64(100)
}
codeInfo["score"] = fmt.Sprintf("%.2f", score)

codeInfoJson, _ := json.Marshal(codeInfo)
return string(codeInfoJson), score
} else {
return "", 0
}
}

func getDataSetInfo(userId int64, CreatedDataset map[int64]int, dataSetDownloadMap map[int64]int, CommitDatasetNumMap map[int64]int, CollectedDataset map[int64]int) (string, float64) {
datasetInfo := make(map[string]string)
score := 0.0
@@ -1123,60 +923,6 @@ func getDataSetInfo(userId int64, CreatedDataset map[int64]int, dataSetDownloadM
}
}

func getRepoDetailInfo(repoDetailInfoMap map[string]int, userId int64, mostDownload map[int64]string) string {
repoDetailInfo := make(map[string]string)
if total, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total"]; ok {
repoDetailInfo["repo_total"] = fmt.Sprint(total)
}
if private, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_private"]; ok {
repoDetailInfo["repo_is_private"] = fmt.Sprint(private)
}
if public, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_public"]; ok {
repoDetailInfo["repo_is_public"] = fmt.Sprint(public)
}
if download, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total_download"]; ok {
repoDetailInfo["repo_total_download"] = fmt.Sprint(download)
}
if mostdownload, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_most_download"]; ok {
repoDetailInfo["repo_most_download_count"] = fmt.Sprint(mostdownload)
}
if mostdownloadName, ok := mostDownload[userId]; ok {
repoDetailInfo["repo_most_download_name"] = mostdownloadName
}
if len(repoDetailInfo) > 0 {
repoDetailInfoJson, _ := json.Marshal(repoDetailInfo)
return string(repoDetailInfoJson)
} else {
return ""
}
}

func getMostActiveJson(userInfo map[string]int) string {
mostActiveMap := make(map[string]string)
if day, ok := userInfo["hour_day"]; ok {
hour := userInfo["hour_hour"]
month := userInfo["hour_month"]
year := userInfo["hour_year"]
delete(userInfo, "hour_day")
delete(userInfo, "hour_hour")
delete(userInfo, "hour_month")
delete(userInfo, "hour_year")
mostActiveMap["before_dawn"] = fmt.Sprint(year) + "/" + fmt.Sprint(month) + "/" + fmt.Sprint(day) + " " + fmt.Sprint(hour)
}
max := 0
max_day := ""
for key, value := range userInfo {
if value > max {
max = value
max_day = key
}
}
mostActiveMap["most_active_day"] = max_day
mostActiveMap["most_active_num"] = fmt.Sprint(max)
mostActiveMapJson, _ := json.Marshal(mostActiveMap)
return string(mostActiveMapJson)
}

func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) {
updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex*100) + " where id=" + fmt.Sprint(userId)
statictisSess.Exec(updateSql)
@@ -1281,10 +1027,10 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time,
}
CommitDatasetSizeMap, CommitDatasetNumMap, _ := queryDatasetSize(start_unix, end_unix)
SolveIssueCountMap := querySolveIssue(start_unix, end_unix)
CreateRepoCountMap, _, _ := queryUserCreateRepo(start_unix, end_unix)
CreateRepoCountMap, _, _, _ := queryUserCreateRepo(start_unix, end_unix)
LoginCountMap := queryLoginCount(start_unix, end_unix)
OpenIIndexMap := queryUserRepoOpenIIndex(start_unix, end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap := queryCloudBrainTask(start_unix, end_unix)
CloudBrainTaskMap, CloudBrainTaskItemMap, _ := queryCloudBrainTask(start_unix, end_unix)
AiModelManageMap := queryUserModel(start_unix, end_unix)
AiModelConvertMap := queryUserModelConvert(start_unix, end_unix)

@@ -2308,7 +2054,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) (map[int64]int, map[int6
return resultSizeMap, resultNumMap, resultDownloadMap
}

func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[string]int, map[int64]string) {
func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[string]int, map[int64]string, map[string]map[string]interface{}) {
sess := x.NewSession()
defer sess.Close()
resultMap := make(map[int64]int)
@@ -2316,16 +2062,18 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[s
detailInfoMap := make(map[string]int)
mostDownloadMap := make(map[int64]string)

fourSeasonMap := make(map[string]map[string]interface{})

cond := "is_fork=false and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := sess.Where(cond).Count(new(Repository))
if err != nil {
log.Info("query Repository error. return.")
return resultMap, detailInfoMap, mostDownloadMap
return resultMap, detailInfoMap, mostDownloadMap, fourSeasonMap
}
var indexTotal int64
indexTotal = 0
for {
sess.Select("id,owner_id,name,is_private,clone_cnt,alias").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
sess.Select("id,owner_id,name,is_private,clone_cnt,alias,created_unix").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
repoList := make([]*Repository, 0)
sess.Find(&repoList)
log.Info("query Repository size=" + fmt.Sprint(len(repoList)))
@@ -2350,6 +2098,7 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[s
detailInfoMap[key] = int(repoRecord.CloneCnt)
mostDownloadMap[repoRecord.OwnerID] = repoRecord.DisplayName()
}
setFourSeasonData(repoRecord, fourSeasonMap)
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
@@ -2357,7 +2106,29 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) (map[int64]int, map[s
}
}

return resultMap, detailInfoMap, mostDownloadMap
return resultMap, detailInfoMap, mostDownloadMap, fourSeasonMap
}

func setFourSeasonData(repoRecord *Repository, fourSeason map[string]map[string]interface{}) {
key := ""
switch repoRecord.CreatedUnix.AsTime().Month() {
case time.January, time.February, time.March:
key = fmt.Sprint(repoRecord.OwnerID) + "_spring"
case time.April, time.May, time.June:
key = fmt.Sprint(repoRecord.OwnerID) + "_summer"
case time.July, time.August, time.September:
key = fmt.Sprint(repoRecord.OwnerID) + "_autumn"
case time.October, time.November, time.December:
key = fmt.Sprint(repoRecord.OwnerID) + "_winter"
default:
log.Info("no found")
}
repoInfo := make(map[string]interface{})
repoInfo["time"] = repoRecord.CreatedUnix
repoInfo["displayName"] = repoRecord.DisplayName()
if _, ok := fourSeason[key]; !ok {
fourSeason[key] = repoInfo
}
}

func queryUserRepoOpenIIndex(start_unix int64, end_unix int64) map[int64]float64 {
@@ -2521,17 +2292,18 @@ func isBenchMark(JobType string) bool {
return false
}

func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[string]int) {
func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[string]int, map[int64]map[string]int) {
sess := x.NewSession()
defer sess.Close()
resultMap := make(map[int64]int)
resultItemMap := make(map[string]int)
resourceItemMap := make(map[int64]map[string]int)

cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := sess.Where(cond).Unscoped().Count(new(Cloudbrain))
if err != nil {
log.Info("query cloudbrain error. return.")
return resultMap, resultItemMap
return resultMap, resultItemMap, resourceItemMap
}
log.Info("cloudbrain count=" + fmt.Sprint(count))
var indexTotal int64
@@ -2547,8 +2319,15 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s
} else {
resultMap[cloudTaskRecord.UserID] += 1
}
if _, ok := resourceItemMap[cloudTaskRecord.UserID]; !ok {
resourceItemMap[cloudTaskRecord.UserID] = make(map[string]int)
}

if cloudTaskRecord.Duration < 100000000 && cloudTaskRecord.Duration > 0 {
setMapKey("CloudBrainRunTime", cloudTaskRecord.UserID, int(cloudTaskRecord.Duration), resultItemMap)
resourceItemMap[cloudTaskRecord.UserID][cloudTaskRecord.ComputeResource] = resourceItemMap[cloudTaskRecord.UserID][cloudTaskRecord.ComputeResource] + int(cloudTaskRecord.Duration)
} else {
resourceItemMap[cloudTaskRecord.UserID][cloudTaskRecord.ComputeResource] = 0
}
if cloudTaskRecord.Type == 1 { //npu
setMapKey("CloudBrainTwo", cloudTaskRecord.UserID, 1, resultItemMap)
@@ -2583,6 +2362,8 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s
} else if cloudTaskRecord.ComputeResource == GPUResource {
if cloudTaskRecord.JobType == "TRAIN" {
setMapKey("GpuTrainJob", cloudTaskRecord.UserID, 1, resultItemMap)
} else if cloudTaskRecord.JobType == "ONLINEINFERENCE" {
setMapKey("GpuInferenceJob", cloudTaskRecord.UserID, 1, resultItemMap)
} else {
setMapKey("GpuDebugJob", cloudTaskRecord.UserID, 1, resultItemMap)
}
@@ -2594,7 +2375,7 @@ func queryCloudBrainTask(start_unix int64, end_unix int64) (map[int64]int, map[s
break
}
}
return resultMap, resultItemMap
return resultMap, resultItemMap, resourceItemMap
}

func queryUserInvitationCount(start_unix int64, end_unix int64) map[int64]int {
@@ -2674,13 +2455,20 @@ func GetContentFromPromote(url string) (string, error) {
}
}()
resp, err := http.Get(url)
if err != nil || resp.StatusCode != 200 {
if err != nil {
log.Info("Get organizations url error=" + err.Error())
return "", err
}

if resp == nil {
log.Info("respone is null")
return "", errors.New("resp is null")
}
if resp.StatusCode != 200 {
log.Info("respone code=" + fmt.Sprint(resp.StatusCode))
return "", errors.New("resp is null")
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Info("Get organizations url error=" + err.Error())
return "", err


+ 0
- 21
models/user_business_struct.go View File

@@ -2,27 +2,6 @@ package models

import "code.gitea.io/gitea/modules/timeutil"

type UserSummaryCurrentYear struct {
ID int64 `xorm:"pk"`
Email string `xorm:"NOT NULL"`
//user
Name string `xorm:"NOT NULL"`
Phone string `xorm:"NULL"`
//user
RegistDate timeutil.TimeStamp `xorm:"NOT NULL"`

DateCount int `xorm:"NOT NULL DEFAULT 0"`
MostActiveDay string `xorm:" NULL "` //08.05
RepoInfo string `xorm:"varchar(1000)"` //创建了XX 个项目,公开项目XX 个,私有项目XX 个累计被下载XXX 次,其中《XXXXXXX 》项目,获得了最高XXX 次下载
DataSetInfo string `xorm:"varchar(500)"` //创建了XX 个数据集,上传了XX 个数据集文件,累计被下载XX 次,被收藏XX 次
CodeInfo string `xorm:"varchar(500)"` //代码提交次数,提交总代码行数,最晚的提交时间
CloudBrainInfo string `xorm:"varchar(1000)"` //,创建了XX 个云脑任务,调试任务XX 个,训练任务XX 个,推理任务XX 个,累计运行了XXXX 卡时,累计节省xxxxx 元
//这些免费的算力资源分别有,XX% 来自鹏城云脑1,XX% 来自鹏城云脑2,XX% 来自智算网络
PlayARoll string `xorm:"varchar(500)"` //你参加了XX 次“我为开源打榜狂”活动,累计上榜XX 次,总共获得了社区XXX 元的激励
WeekBonusData string `xorm:"-"`
Label string `xorm:"varchar(500)"`
}

type UserBusinessAnalysisCurrentYear struct {
ID int64 `xorm:"pk"`
CountDate int64 `xorm:"pk"`


+ 640
- 0
models/user_year_summary.go View File

@@ -0,0 +1,640 @@
package models

import (
"encoding/json"
"fmt"
"sort"
"strings"
"time"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
)

type UserSummaryCurrentYear struct {
ID int64 `xorm:"pk"`
Email string `xorm:"NOT NULL"`
//user
Name string `xorm:"NOT NULL"`
Phone string `xorm:"NULL"`
//user
RegistDate timeutil.TimeStamp `xorm:"NOT NULL"`

DateCount int `xorm:"NOT NULL DEFAULT 0"`
MostActiveDay string `xorm:" NULL "` //08.05
RepoInfo string `xorm:"varchar(1000)"` //创建了XX 个项目,公开项目XX 个,私有项目XX 个累计被下载XXX 次,其中《XXXXXXX 》项目,获得了最高XXX 次下载
//2023:增加春、夏、秋、冬四季创建的项目信息。

DataSetInfo string `xorm:"varchar(500)"` //创建了XX 个数据集,上传了XX 个数据集文件,累计被下载XX 次,被收藏XX 次
CodeInfo string `xorm:"varchar(500)"` //代码提交次数,提交总代码行数,最晚的提交时间
//2023:所有人的代码行要进行排序,并设置每个人的排位

CloudBrainInfo string `xorm:"varchar(1000)"` //,创建了XX 个云脑任务,调试任务XX 个,训练任务XX 个,推理任务XX 个,累计运行了XXXX 卡时,累计节省xxxxx 元
//这些免费的算力资源分别有,XX% 来自鹏城云脑1,XX% 来自鹏城云脑2,XX% 来自智算网络
//2023:有XX 个使用启智集群资源,有XX 个使用智算网络集群,使用过的计算资源有GPU NPU GCU
//2023:你的所有任务累计运行了XXX卡时,其中 GPU资源运行XX卡时 NPU资源运行XX卡时 GCU资源运行XX卡时

PlayARoll string `xorm:"varchar(500)"` //你参加了XX 次“我为开源打榜狂”活动,累计上榜XX 次,总共获得了社区XXX 元的激励
WeekBonusData string `xorm:"-"`
Label string `xorm:"varchar(500)"`
IssueInfo string `xorm:"varchar(500)"` //2023:ISSUE相关信息,包括创建issue
ModelInfo string `xorm:"varchar(500)"` //2023:模型相关信息,包括创建的模型个数,要给出下载次数最多、引用次数最多的模型
LoginCount int `xorm:"NOT NULL DEFAULT 0"` //2023:当年的登录次数
ActionInfo string `xorm:"varchar(500)"` //2023:最勤奋的一个月,取个人活动页面最多的一个月,还要给出该月活动最多的一天。
AccumulatePointInfo string `xorm:"varchar(500)"` //2023:获取的积分总数及消耗的总数,还剩余多少积分。
ForumInfo string `xorm:"varchar(500)"` //2023: 你在OpenI论坛发表了XX篇贴 总共获得了XX次浏 XX次点赞 其中《XXXXX》 获得了最高XX次浏览
CourseInfo string `xorm:"varchar(500)"` //2023:你在OpenI学习了XX门课程 总共完成了XX章节实训 40%课程为机器学习,列出4个最多的分类
ActionDays string `xorm:"varchar(500)"` //2023:你在OpenI中有产生活动的天数。
}

func RefreshUserYearTable(pageStartTime time.Time, pageEndTime time.Time) {
sess := x.NewSession()
defer sess.Close()
log.Info("RefreshUserYearTable start....")
statictisSess := xStatistic.NewSession()
defer statictisSess.Close()
if time.Now().After(pageEndTime) {
log.Info("The time is exceed. not need to truncate.")
return
}
log.Info("UserYear StartTime:" + pageStartTime.Format("2006-01-02 15:04:05"))
log.Info("UserYear EndTime time:" + pageEndTime.Format("2006-01-02 15:04:05"))

start_unix := pageStartTime.Unix()
end_unix := pageEndTime.Unix()

CodeMergeCountMap := queryPullRequest(start_unix, end_unix)
CommitCountMap := queryCommitAction(start_unix, end_unix, 5)
mostActiveMap := queryMostActiveCommitAction(start_unix, end_unix)
IssueCountMap := queryCreateIssue(start_unix, end_unix)
UserYearModel := queryUserYearModel(start_unix, end_unix)
CommentCountMap := queryComment(start_unix, end_unix)
LoginMap := queryLoginCount(start_unix, end_unix)

existCommitCodeSize := queryCommitCodeSizeFromDb("public.user_business_analysis_current_year")

commitCodeArrays := make([]int, 0)
for _, v := range existCommitCodeSize {
commitCodeArrays = append(commitCodeArrays, v)
}
sort.Ints(commitCodeArrays)
//log.Info("query commit code size, len=" + fmt.Sprint(len(existCommitCodeSize)))

CommitDatasetSizeMap, CommitDatasetNumMap, dataSetDownloadMap := queryDatasetSize(start_unix, end_unix)
SolveIssueCountMap := querySolveIssue(start_unix, end_unix)
CreateRepoCountMap, DetailInfoMap, MostDownloadMap, fourSeasonMap := queryUserCreateRepo(start_unix, end_unix)

CloudBrainTaskMap, CloudBrainTaskItemMap, resourceItemMap := queryCloudBrainTask(start_unix, end_unix)

_, CollectedDataset := queryDatasetStars(start_unix, end_unix)
_, CreatedDataset := queryRecommedDataSet(start_unix, end_unix)

PointMap := queryPointInfo(start_unix, end_unix)

MostActiveDayMap := queryMostActiveMonth(start_unix, end_unix)

bonusMap := getBonusMap()
forumMap := getForumMap()
actionDays := queryActionDays(start_unix, end_unix)
log.Info("truncate all data from table:user_summary_current_year ")
statictisSess.Exec("TRUNCATE TABLE user_summary_current_year")

cond := "type != 1 and is_active=true"
count, err := sess.Where(cond).Count(new(User))
if err != nil {
log.Info("query user error. return.")
return
}
var indexTotal int64
indexTotal = 0
for {
sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
userList := make([]*User, 0)
sess.Find(&userList)
for _, userRecord := range userList {
var dateRecordAll UserBusinessAnalysisAll
dateRecordAll.ID = userRecord.ID
dateRecordAll.Email = userRecord.Email
dateRecordAll.Phone = userRecord.PhoneNumber
dateRecordAll.RegistDate = userRecord.CreatedUnix
dateRecordAll.Name = userRecord.Name

dateRecordAll.CodeMergeCount = getMapValue(dateRecordAll.ID, CodeMergeCountMap)
dateRecordAll.CommitCount = getMapValue(dateRecordAll.ID, CommitCountMap)
dateRecordAll.IssueCount = getMapValue(dateRecordAll.ID, IssueCountMap)
dateRecordAll.CommentCount = getMapValue(dateRecordAll.ID, CommentCountMap)

dateRecordAll.CommitCodeSize = getMapValue(dateRecordAll.ID, existCommitCodeSize)

dateRecordAll.CommitDatasetSize = getMapValue(dateRecordAll.ID, CommitDatasetSizeMap)
dateRecordAll.CommitDatasetNum = getMapValue(dateRecordAll.ID, CommitDatasetNumMap)
dateRecordAll.SolveIssueCount = getMapValue(dateRecordAll.ID, SolveIssueCountMap)
dateRecordAll.CreateRepoCount = getMapValue(dateRecordAll.ID, CreateRepoCountMap)
dateRecordAll.LoginCount = getMapValue(dateRecordAll.ID, LoginMap)
dateRecordAll.CloudBrainTaskNum = getMapValue(dateRecordAll.ID, CloudBrainTaskMap)
dateRecordAll.GpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuDebugJob", CloudBrainTaskItemMap)
dateRecordAll.NpuDebugJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuDebugJob", CloudBrainTaskItemMap)
dateRecordAll.GpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuTrainJob", CloudBrainTaskItemMap)
dateRecordAll.NpuTrainJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuTrainJob", CloudBrainTaskItemMap)
dateRecordAll.NpuInferenceJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_NpuInferenceJob", CloudBrainTaskItemMap)
dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap)
dateRecordAll.CloudBrainRunTime = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_CloudBrainRunTime", CloudBrainTaskItemMap)

//年度数据
subTime := time.Now().UTC().Sub(dateRecordAll.RegistDate.AsTime().UTC())
mostActiveDay := ""
if userInfo, ok := mostActiveMap[dateRecordAll.ID]; ok {
mostActiveDay = getMostActiveJson(userInfo)
}
scoreMap := make(map[string]float64)
repoInfo := getRepoDetailInfo(DetailInfoMap, dateRecordAll.ID, MostDownloadMap, fourSeasonMap)
dataSetInfo, datasetscore := getDataSetInfo(dateRecordAll.ID, CreatedDataset, dataSetDownloadMap, CommitDatasetNumMap, CollectedDataset)
scoreMap["datasetscore"] = datasetscore
codeInfo, codescore := getCodeInfo(&dateRecordAll, commitCodeArrays)
scoreMap["codescore"] = codescore
cloudBrainInfo := getCloudBrainInfo(&dateRecordAll, CloudBrainTaskItemMap, scoreMap, resourceItemMap[dateRecordAll.ID])
playARoll := getPlayARoll(bonusMap, dateRecordAll.Name, scoreMap)
forumInfo := getForumInfo(forumMap, dateRecordAll.Name)
exteral := 0
if int(subTime.Hours())%24 > 0 {
exteral = 1
}
re := &UserSummaryCurrentYear{
ID: dateRecordAll.ID,
Name: dateRecordAll.Name,
Email: dateRecordAll.Email,
Phone: dateRecordAll.Phone,
RegistDate: dateRecordAll.RegistDate,
DateCount: int(subTime.Hours())/24 + exteral,
MostActiveDay: mostActiveDay,
RepoInfo: repoInfo,
DataSetInfo: dataSetInfo,
CodeInfo: codeInfo,
CloudBrainInfo: cloudBrainInfo,
PlayARoll: playARoll,
IssueInfo: getIssueInfo(&dateRecordAll),
ModelInfo: getModelInfo(&dateRecordAll, UserYearModel),
LoginCount: dateRecordAll.LoginCount,
AccumulatePointInfo: getPointInfo(&dateRecordAll, PointMap),
ActionInfo: getActionInfo(&dateRecordAll, MostActiveDayMap),
ForumInfo: forumInfo,
ActionDays: getActionDaysInfo(actionDays[dateRecordAll.ID]),
}
statictisSess.Insert(re)
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
log.Info("update user year data finished. ")
}

func getActionDaysInfo(days int) string {
return fmt.Sprint(days)
}

func getForumInfo(forumMap map[string]map[string]interface{}, name string) string {
record, ok := forumMap[name]
if ok {
forumJson, _ := json.Marshal(record)
return string(forumJson)
}
return ""
}

func getActionInfo(dateRecordAll *UserBusinessAnalysisAll, MostActiveDayMap map[int64]map[string]int) string {
record, ok := MostActiveDayMap[dateRecordAll.ID]
if ok {
actionJson, _ := json.Marshal(record)
return string(actionJson)
}
return ""
}

func getPointInfo(dateRecordAll *UserBusinessAnalysisAll, PointMap map[int64]map[string]int) string {
record, ok := PointMap[dateRecordAll.ID]
if ok {
pointJson, _ := json.Marshal(record)
return string(pointJson)
}
return ""
}

func getModelInfo(dateRecordAll *UserBusinessAnalysisAll, userModelMap map[int64]map[string]interface{}) string {
record, ok := userModelMap[dateRecordAll.ID]
if ok {
userModelJson, _ := json.Marshal(record)
return string(userModelJson)
}
return ""
}

func getIssueInfo(dateRecordAll *UserBusinessAnalysisAll) string {
issueInfo := make(map[string]string)

issueInfo["create_count"] = fmt.Sprint(dateRecordAll.IssueCount)

issueInfoJson, _ := json.Marshal(issueInfo)
return string(issueInfoJson)
}

func getBonusMap() map[string]map[string]int {
bonusMap := make(map[string]map[string]int)
url := setting.RecommentRepoAddr + "bonus/2023.txt"
content, err := GetContentFromPromote(url)
if err == nil {
filenames := strings.Split(content, "\n")
for i := 0; i < len(filenames); i++ {
filenames[i] = strings.TrimSuffix(filenames[i], "\r")
url = setting.RecommentRepoAddr + "bonus/" + filenames[i]
log.Info("bonus url=" + url)
csvContent, err1 := GetContentFromPromote(url)
if err1 == nil {
//read csv
lines := strings.Split(csvContent, "\n")
for j := 1; j < len(lines); j++ {
newLine := strings.TrimSuffix(lines[j], "\r")
aLine := strings.Split(newLine, ",")
if len(aLine) < 9 {
continue
}
userName := aLine[1]
record, ok := bonusMap[userName]
if !ok {
record = make(map[string]int)
bonusMap[userName] = record
}
record["times"] = getMapKeyStringValue("times", record) + getIntValue(aLine[3])
record["total_bonus"] = getMapKeyStringValue("total_bonus", record) + getIntValue(aLine[4])
record["total_cardtime"] = getMapKeyStringValue("total_cardtime", record) + getIntValue(aLine[5])
record["total_giveup"] = getMapKeyStringValue("total_giveup", record) + getIntValue(aLine[6])
record["total_award_title"] = getMapKeyStringValue("total_award_title", record) + getIntValue(aLine[8])
}
}
}
}
return bonusMap
}

func getForumMap() map[string]map[string]interface{} {
sep := "(-2023-)"
forumMap := make(map[string]map[string]interface{})
url := setting.RecommentRepoAddr + "forum/data.txt"
content, err := GetContentFromPromote(url)
if err == nil {
lines := strings.Split(content, "\n")
for i := 0; i < len(lines); i++ {
newLine := strings.TrimSuffix(lines[i], "\r")
//read a line
aLine := strings.Split(newLine, sep)
if len(aLine) < 6 {
continue
}
userName := aLine[0]
record, ok := forumMap[userName]
if !ok {
record = make(map[string]interface{})
forumMap[userName] = record
}
record["max_subject"] = aLine[1]
record["total_view_num"] = getIntValue(aLine[2])
record["total_num"] = getIntValue(aLine[3])
record["total_star_num"] = getIntValue(aLine[4])
record["max_view_count"] = getIntValue(aLine[5])
}
}
return forumMap
}

func getPlayARoll(bonusMap map[string]map[string]int, userName string, scoreMap map[string]float64) string {
bonusInfo := make(map[string]string)
record, ok := bonusMap[userName]
if ok {
rollscore := 0.0
bonusInfo["times"] = fmt.Sprint(record["times"])
if record["times"] >= 4 {
rollscore = float64(record["times"]) / float64(4)
}
scoreMap["rollscore"] = rollscore
bonusInfo["total_bonus"] = fmt.Sprint(record["total_bonus"])
bonusInfo["total_cardtime"] = fmt.Sprint(record["total_cardtime"])
bonusInfo["total_giveup"] = fmt.Sprint(record["total_giveup"])
bonusInfo["total_award_title"] = fmt.Sprint(record["total_award_title"])
bonusInfoJson, _ := json.Marshal(bonusInfo)
return string(bonusInfoJson)
} else {
return ""
}
}

func queryUserYearModel(start_unix int64, end_unix int64) map[int64]map[string]interface{} {
sess := x.NewSession()
defer sess.Close()
resultMap := make(map[int64]map[string]interface{})
cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := sess.Where(cond).Count(new(AiModelManage))
if err != nil {
log.Info("query AiModelManage error. return.")
return resultMap
}
var indexTotal int64
indexTotal = 0
for {
sess.Select("id,user_id,download_count,reference_count").Table("ai_model_manage").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
aiModelList := make([]*AiModelManage, 0)
sess.Find(&aiModelList)
log.Info("query user year AiModelManage size=" + fmt.Sprint(len(aiModelList)))
for _, aiModelRecord := range aiModelList {
if _, ok := resultMap[aiModelRecord.UserId]; !ok {
modelmap := make(map[string]interface{})
modelmap["count"] = 1
modelmap["max_download_count"] = aiModelRecord.DownloadCount
modelmap["max_reference_count"] = aiModelRecord.ReferenceCount
modelmap["name"] = aiModelRecord.Name
resultMap[aiModelRecord.UserId] = modelmap
} else {
resultMap[aiModelRecord.UserId]["count"] = resultMap[aiModelRecord.UserId]["count"].(int) + 1
if resultMap[aiModelRecord.UserId]["max_download_count"].(int) < aiModelRecord.DownloadCount {
resultMap[aiModelRecord.UserId]["max_download_count"] = aiModelRecord.DownloadCount
resultMap[aiModelRecord.UserId]["max_reference_count"] = aiModelRecord.ReferenceCount
resultMap[aiModelRecord.UserId]["name"] = aiModelRecord.Name
}
}
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
return resultMap
}

func queryPointInfo(start_unix int64, end_unix int64) map[int64]map[string]int {

sess := x.NewSession()
defer sess.Close()
scoreInfoMap := make(map[int64]map[string]int)

cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := sess.Where(cond).Count(new(PointAccountLog))
if err != nil {
log.Info("query PointAccountLog error. return.")
return scoreInfoMap
}
var indexTotal int64
indexTotal = 0
for {
sess.Select("id,user_id,type,points_amount,balance_after").Table("point_account_log").Where(cond).OrderBy("id desc").Limit(PAGE_SIZE, int(indexTotal))
pointAccountLogList := make([]*PointAccountLog, 0)
sess.Find(&pointAccountLogList)
log.Info("query user year AiModelManage size=" + fmt.Sprint(len(pointAccountLogList)))
for _, pointAccountLogRecord := range pointAccountLogList {
if _, ok := scoreInfoMap[pointAccountLogRecord.UserId]; !ok {
pointMap := make(map[string]int)
scoreInfoMap[pointAccountLogRecord.UserId] = pointMap
scoreInfoMap[pointAccountLogRecord.UserId]["total"] = int(pointAccountLogRecord.BalanceAfter)
}
if pointAccountLogRecord.Type == "increase" {
scoreInfoMap[pointAccountLogRecord.UserId]["increase"] += int(pointAccountLogRecord.PointsAmount)
}
if pointAccountLogRecord.Type == "decrease" {
scoreInfoMap[pointAccountLogRecord.UserId]["decrease"] += int(pointAccountLogRecord.PointsAmount)
}
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
return scoreInfoMap
}

func queryMostActiveMonth(start_unix int64, end_unix int64) map[int64]map[string]int {

sess := x.NewSession()
defer sess.Close()

actionInfoMap := make(map[int64]map[string]int)

cond := "(op_type<=" + fmt.Sprint(17) + " or op_type>=21) and user_id=act_user_id and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := sess.Where(cond).Count(new(Action))
if err != nil {
log.Info("query Action error. return.")
return actionInfoMap
}
var indexTotal int64
indexTotal = 0
for {
actionList, err := sess.QueryInterface("select id,user_id,op_type,act_user_id,created_unix from public.action where " + cond + " order by id asc limit " + fmt.Sprint(PAGE_SIZE) + " offset " + fmt.Sprint(indexTotal))
if err != nil {
log.Info("error:" + err.Error())
continue
}
log.Info("query action size=" + fmt.Sprint(len(actionList)))
for _, actionRecord := range actionList {
userId := convertInterfaceToInt64(actionRecord["user_id"])
created_unix := convertInterfaceToInt64(actionRecord["created_unix"])
created_time := time.Unix(created_unix, 0)
str_time := created_time.Format("20060102")
str_time = str_time[4:]
if _, ok := actionInfoMap[userId]; !ok {
actionMap := make(map[string]int)
actionInfoMap[userId] = actionMap
}
actionInfoMap[userId][str_time] += 1
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
returnMap := make(map[int64]map[string]int)
for k, v := range actionInfoMap {
returnMap[k] = getActionMostActiveDayMap(v)
}
return returnMap
}

func getActionMostActiveDayMap(record map[string]int) map[string]int {
reMap := make(map[string]int)
monthMap := make(map[string]int)
for k, v := range record {
month := k[0:2]
monthMap[month] += v
}
maxMonthCount := 0
maxMonthCountKey := ""
for k, v := range monthMap {
if v > maxMonthCount {
maxMonthCount = v
maxMonthCountKey = k
}
}
reMap[maxMonthCountKey] = maxMonthCount

maxDayCount := 0
maxDayCountKey := ""
for k, v := range record {
month := k[0:2]
if month == maxMonthCountKey {
if v > maxDayCount {
maxDayCount = v
maxDayCountKey = k
}
}
}
reMap[maxDayCountKey] = maxDayCount

return reMap
}

func getRepoDetailInfo(repoDetailInfoMap map[string]int, userId int64, mostDownload map[int64]string, fourSeason map[string]map[string]interface{}) string {
repoDetailInfo := make(map[string]interface{})
if total, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total"]; ok {
repoDetailInfo["repo_total"] = fmt.Sprint(total)
}
if private, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_private"]; ok {
repoDetailInfo["repo_is_private"] = fmt.Sprint(private)
}
if public, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_is_public"]; ok {
repoDetailInfo["repo_is_public"] = fmt.Sprint(public)
}
if download, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_total_download"]; ok {
repoDetailInfo["repo_total_download"] = fmt.Sprint(download)
}
if mostdownload, ok := repoDetailInfoMap[fmt.Sprint(userId)+"_most_download"]; ok {
repoDetailInfo["repo_most_download_count"] = fmt.Sprint(mostdownload)
}
if mostdownloadName, ok := mostDownload[userId]; ok {
repoDetailInfo["repo_most_download_name"] = mostdownloadName
}
if springRepo, ok := fourSeason[fmt.Sprint(userId)+"_spring"]; ok {
repoDetailInfo["repo_spring"] = springRepo
}
if summerRepo, ok := fourSeason[fmt.Sprint(userId)+"_summer"]; ok {
repoDetailInfo["repo_summer"] = summerRepo
}
if autumnRepo, ok := fourSeason[fmt.Sprint(userId)+"_autumn"]; ok {
repoDetailInfo["repo_autumn"] = autumnRepo
}
if winterRepo, ok := fourSeason[fmt.Sprint(userId)+"_winter"]; ok {
repoDetailInfo["repo_winter"] = winterRepo
}
if len(repoDetailInfo) > 0 {
repoDetailInfoJson, _ := json.Marshal(repoDetailInfo)
return string(repoDetailInfoJson)
} else {
return ""
}
}

func getCodeInfo(dateRecordAll *UserBusinessAnalysisAll, commitCodeArrays []int) (string, float64) {
if dateRecordAll.CommitCount > 0 {
codeInfo := make(map[string]string)
codeInfo["commit_count"] = fmt.Sprint(dateRecordAll.CommitCount)
codeInfo["commit_line"] = fmt.Sprint(dateRecordAll.CommitCodeSize)
score := 0.0
score = float64(dateRecordAll.CommitCodeSize) / float64(dateRecordAll.CommitCount) / float64(20000)
if score < (float64(dateRecordAll.CommitCount) / float64(100)) {
score = float64(dateRecordAll.CommitCount) / float64(100)
}
log.Info("len(commitCodeArrays)=" + fmt.Sprint(len(commitCodeArrays)))
location := binarySearch(commitCodeArrays, dateRecordAll.CommitCodeSize)

codeInfo["location"] = fmt.Sprintf("%.2f", float64(location+1)/float64(len(commitCodeArrays)))

codeInfo["score"] = fmt.Sprintf("%.2f", score)

codeInfoJson, _ := json.Marshal(codeInfo)
return string(codeInfoJson), score
} else {
return "", 0
}
}

func binarySearch(arr []int, target int) int {
low := 0
high := len(arr) - 1
mid := 0
for low <= high {
mid = (low + high) / 2
if arr[mid] == target {
return mid
} else if arr[mid] < target {
low = mid + 1
} else {
high = mid - 1
}
}
return mid // 如果没有找到,返回-1
}

func getMostActiveJson(userInfo map[string]int) string {
mostActiveMap := make(map[string]string)
if day, ok := userInfo["hour_day"]; ok {
hour := userInfo["hour_hour"]
month := userInfo["hour_month"]
year := userInfo["hour_year"]
delete(userInfo, "hour_day")
delete(userInfo, "hour_hour")
delete(userInfo, "hour_month")
delete(userInfo, "hour_year")
mostActiveMap["before_dawn"] = fmt.Sprint(year) + "/" + fmt.Sprint(month) + "/" + fmt.Sprint(day) + " " + fmt.Sprint(hour)
}
max := 0
max_day := ""
for key, value := range userInfo {
if value > max {
max = value
max_day = key
}
}
mostActiveMap["most_active_day"] = max_day
mostActiveMap["most_active_num"] = fmt.Sprint(max)
mostActiveMapJson, _ := json.Marshal(mostActiveMap)
return string(mostActiveMapJson)
}

func queryActionDays(start_unix int64, end_unix int64) map[int64]int {
sess := x.NewSession()
defer sess.Close()
resultMap := make(map[int64]int)
timeMap := make(map[int64]string)
cond := "user_id=act_user_id and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix)
count, err := sess.Where(cond).Count(new(Action))
if err != nil {
log.Info("query action error. return.")
return resultMap
}
var indexTotal int64
indexTotal = 0
for {
actionList, err := sess.QueryInterface("select id,user_id,op_type,act_user_id,created_unix from public.action where " + cond + " order by id asc limit " + fmt.Sprint(PAGE_SIZE) + " offset " + fmt.Sprint(indexTotal))
if err != nil {
log.Info("error:" + err.Error())
continue
}
log.Info("query action size=" + fmt.Sprint(len(actionList)))
for _, actionRecord := range actionList {
userId := convertInterfaceToInt64(actionRecord["user_id"])
created_unix := convertInterfaceToInt64(actionRecord["created_unix"])
created_time := time.Unix(created_unix, 0)
time_str := created_time.Format("2006-01-02")
if timeMap[userId] != time_str {
if _, ok := resultMap[userId]; !ok {
resultMap[userId] = 1
} else {
resultMap[userId] += 1
}
timeMap[userId] = time_str
}
}
indexTotal += PAGE_SIZE
if indexTotal >= count {
break
}
}
return resultMap
}

+ 49
- 2
modules/grampus/resty.go View File

@@ -238,11 +238,11 @@ func GetResourceSpecs(processorType string) (*models.GetGrampusResourceSpecsResu
retry := 0

sendjob:
_, err := client.R().
res, err := client.R().
SetAuthToken(TOKEN).
SetResult(&result).
Get(HOST + urlGetResourceSpecs + "?processorType=" + processorType)
log.Info("%+v", res)
if err != nil {
return nil, fmt.Errorf("resty GetResourceSpecs: %v", err)
}
@@ -503,6 +503,53 @@ sendjob:
return &result, nil
}

func GetResourceQueue() ([]models.GrampusResourceQueue, error) {
res, err := GetResourceSpecs("")
if err != nil {
return nil, err
}
queueList := make([]models.GrampusResourceQueue, 0)
queueMap := make(map[string]string, 0)
for _, spec := range res.Infos {
for _, c := range spec.Centers {
centerId := c.ID
computeResource := models.ParseComputeResourceFormGrampus(spec.SpecInfo.AccDeviceKind)
if centerId == "" || computeResource == "" {
continue
}
for _, queue := range c.ResourceSpec {
queueCode := queue.ID
accCardType := strings.ToUpper(spec.SpecInfo.AccDeviceModel)
key := centerId + "_" + computeResource + "_" + accCardType + "_" + queueCode
if _, has := queueMap[key]; has {
continue
}
var hasInternet = int(models.NoInternet)
if queue.IsNetAccess == models.GrampusNetAccess {
hasInternet = int(models.HasInternet)
}
var queueType = models.QueueTypePublic
if queue.PoolType == models.GrampusPoolTypeExclusive {
queueType = models.QueueTypeExclusive
}
queueMap[key] = ""
queueList = append(queueList, models.GrampusResourceQueue{
QueueCode: queueCode,
QueueName: queue.Name,
QueueType: queueType,
AiCenterCode: centerId,
AiCenterName: c.Name,
ComputeResource: computeResource,
AccCardType: accCardType,
HasInternet: hasInternet,
})
}

}
}
return queueList, nil
}

func GetDebugJobEvents(jobID string) (*models.GetGrampusDebugJobEventsResponse, error) {
checkSetting()
client := getRestyClient()


+ 1
- 1
modules/minio_ext/constants.go View File

@@ -40,7 +40,7 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5

// maxMultipartPutObjectSize - maximum size 5TiB of object for
// Multipart operation.
const MaxMultipartPutObjectSize = 1024 * 1024 * 1024 * 200
const MaxMultipartPutObjectSize = 1024 * 1024 * 1024 * 250

// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload


+ 1
- 1
modules/storage/minio.go View File

@@ -22,7 +22,7 @@ var (
)

const (
PresignedGetUrlExpireTime = time.Hour * 24 * 1
PresignedGetUrlExpireTime = time.Hour * 24 * 7
PresignedPutUrlExpireTime = time.Hour * 24 * 7
)



+ 3
- 3
modules/storage/obs.go View File

@@ -599,7 +599,7 @@ func ObsGenMultiPartSignedUrl(objectName string, uploadId string, partNumber int
input.Bucket = setting.Bucket
input.Key = objectName
//strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
input.Expires = 24 * 60 * 60
input.Expires = 7 * 24 * 60 * 60
input.Method = obs.HttpMethodPut

input.QueryParams = map[string]string{
@@ -622,7 +622,7 @@ func GetObsCreateSignedUrlByBucketAndKey(bucket, key string) (string, error) {
input.Bucket = bucket
input.Key = key

input.Expires = 24 * 60 * 60
input.Expires = 7 * 24 * 60 * 60
input.Method = obs.HttpMethodGet
comma := strings.LastIndex(key, "/")
filename := key
@@ -652,7 +652,7 @@ func ObsGetPreSignedUrl(objectName, fileName string) (string, error) {
input.Key = objectName
//strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
input.Bucket = setting.Bucket
input.Expires = 24 * 60 * 60
input.Expires = 7 * 24 * 60 * 60

fileName = url.PathEscape(fileName)
reqParams := make(map[string]string)


+ 42
- 0
modules/structs/card_requests.go View File

@@ -0,0 +1,42 @@
package structs

type CardReq struct {
ID int64 `json:"id"`
ComputeResource string `json:"compute_resource" binding:"Required"`
CardType string `json:"card_type" binding:"Required"`
AccCardsNum string `json:"acc_cards_num" binding:"Required"`
DiskCapacity int64 `json:"disk_capacity"`
ResourceType int `json:"resource_type" binding:"Required"`
BeginDate string `json:"begin_date" binding:"Required"`
EndDate string `json:"end_date" binding:"Required"`
Contact string `json:"contact" binding:"Required"`
PhoneNumber string `json:"phone_number" binding:"Required"`
EmailAddress string `json:"email_address" binding:"Required;Email;MaxSize(254)"`
Org string `json:"org" binding:"MaxSize(500)"`
Description string `json:"description" binding:"MaxSize(3000)"`
Review string `json:"review"`
SpecIds []int64 `json:"spec_ids"`
}

type RequestSpecInfo struct {
ID int64
SourceSpecId string
AccCardsNum int
CpuCores int
MemGiB float32
GPUMemGiB float32
ShareMemGiB float32
UnitPrice int
Status int
UpdatedTime int64
RequestId int64
//queue
Cluster string
AiCenterCode string
AiCenterName string
QueueCode string
QueueId int64
ComputeResource string
AccCardType string
HasInternet int
}

+ 7
- 0
options/locale/locale_en-US.ini View File

@@ -338,6 +338,8 @@ robot = Robot
federated_learning = Federated learning
data_mining = Data mining
RISC-V_development = RISC-V development
computing_power = Computing power
computing_resources = Computing resources
domestic_computing_power = Domestic computing power

[auth]
@@ -852,6 +854,11 @@ email_notifications.onmention = Only Email on Mention
email_notifications.disable = Disable Email Notifications
email_notifications.submit = Set Email Preference

[card_request]
create_fail=Failed to create card requirement.
update_fail=Failed to update card requirement.
update_fail_no_record=The record does not exist.

[dataset]
alert = To initiate a cloud brain task, please upload the dataset in zip format.
dataset = Dataset


+ 7
- 0
options/locale/locale_zh-CN.ini View File

@@ -341,6 +341,8 @@ robot = 机器人
federated_learning = 联邦学习
data_mining = 数据挖掘
RISC-V_development = RISC-V开发
computing_power = 算力
computing_resources = 算力资源
domestic_computing_power = 国产算力

[auth]
@@ -855,6 +857,11 @@ email_notifications.enable=启用邮件通知
email_notifications.onmention=只在被提到时邮件通知
email_notifications.disable=停用邮件通知
email_notifications.submit=邮件通知设置
[card_request]
create_fail=创建算力需求失败。
update_fail=更新算力需求失败。
update_fail_no_record=修改的记录不存在。


[dataset]
alert=如果要发起云脑任务,请上传zip格式的数据集


BIN
public/img/ros-hmci/homepage1.png View File

Before After
Width: 498  |  Height: 352  |  Size: 148 KiB Width: 498  |  Height: 352  |  Size: 92 KiB

BIN
public/img/ros-hmci/mbz605.png View File

Before After
Width: 3840  |  Height: 1228  |  Size: 458 KiB Width: 3840  |  Height: 1228  |  Size: 2.0 MiB

+ 30
- 5
routers/admin/resources.go View File

@@ -41,18 +41,27 @@ func GetScenePage(ctx *context.Context) {

func GetResourceQueueList(ctx *context.Context) {
page := ctx.QueryInt("page")
pageSize := ctx.QueryInt("pageSize")
cluster := ctx.Query("cluster")
aiCenterCode := ctx.Query("center")
computeResource := ctx.Query("resource")
accCardType := ctx.Query("card")
hasInternet := ctx.QueryInt("hasInternet")
queueType := ctx.Query("queueType")

if pageSize > 1000 {
log.Error("GetResourceQueueList pageSize too large.")
ctx.JSON(http.StatusOK, response.ServerError("pageSize too large"))
return
}
list, err := resource.GetResourceQueueList(models.SearchResourceQueueOptions{
ListOptions: models.ListOptions{Page: page, PageSize: 10},
ListOptions: models.ListOptions{Page: page, PageSize: pageSize},
Cluster: cluster,
AiCenterCode: aiCenterCode,
ComputeResource: computeResource,
AccCardType: accCardType,
HasInternet: models.SpecInternetQuery(hasInternet),
QueueType: queueType,
})
if err != nil {
log.Error("GetResourceQueueList error.%v", err)
@@ -118,6 +127,7 @@ func SyncGrampusQueue(ctx *context.Context) {

func GetResourceSpecificationList(ctx *context.Context) {
page := ctx.QueryInt("page")
pageSize := ctx.QueryInt("pageSize")
queue := ctx.QueryInt64("queue")
status := ctx.QueryInt("status")
cluster := ctx.Query("cluster")
@@ -126,8 +136,14 @@ func GetResourceSpecificationList(ctx *context.Context) {
computeResource := ctx.Query("resource")
cardType := ctx.Query("cardType")
hasInternet := ctx.QueryInt("hasInternet")

if pageSize > 1000 {
log.Error("GetResourceSpecificationList pageSize too large.")
ctx.JSON(http.StatusOK, response.ServerError("pageSize too large"))
return
}
list, err := resource.GetResourceSpecificationList(models.SearchResourceSpecificationOptions{
ListOptions: models.ListOptions{Page: page, PageSize: 10},
ListOptions: models.ListOptions{Page: page, PageSize: pageSize},
QueueId: queue,
Status: status,
Cluster: cluster,
@@ -231,24 +247,33 @@ func SyncGrampusSpecs(ctx *context.Context) {

func GetResourceSceneList(ctx *context.Context) {
page := ctx.QueryInt("page")
pageSize := ctx.QueryInt("pageSize")
jobType := ctx.Query("jobType")
aiCenterCode := ctx.Query("center")
queueId := ctx.QueryInt64("queue")
isExclusive := ctx.QueryInt("IsExclusive")
isExclusive := ctx.Query("isSpecExclusive")
sceneType := ctx.Query("sceneType")
computeResource := ctx.Query("resource")
cardType := ctx.Query("cardType")
cluster := ctx.Query("cluster")
hasInternet := ctx.QueryInt("hasInternet")

if pageSize > 1000 {
log.Error("GetResourceSceneList pageSize too large.")
ctx.JSON(http.StatusOK, response.ServerError("pageSize too large"))
return
}
list, err := resource.GetResourceSceneList(models.SearchResourceSceneOptions{
ListOptions: models.ListOptions{Page: page, PageSize: 10},
ListOptions: models.ListOptions{Page: page, PageSize: pageSize},
JobType: jobType,
IsExclusive: isExclusive,
IsSpecExclusive: isExclusive,
AiCenterCode: aiCenterCode,
QueueId: queueId,
ComputeResource: computeResource,
AccCardType: cardType,
Cluster: cluster,
HasInternet: models.SpecInternetQuery(hasInternet),
SceneType: sceneType,
})
if err != nil {
log.Error("GetResourceSceneList error.%v", err)


+ 268
- 0
routers/card_request/card_request.go View File

@@ -0,0 +1,268 @@
package card_request

import (
"net/http"
"strings"
"time"

"code.gitea.io/gitea/modules/setting"

"code.gitea.io/gitea/modules/log"

api "code.gitea.io/gitea/modules/structs"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
cardrequestservice "code.gitea.io/gitea/services/card_request"
"golang.org/x/exp/slices"
)

func GetCreationInfo(ctx *context.Context) {

data, err := cardrequestservice.GetCreationInfo()

if err != nil {
log.Error("can not get creation info", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}

ctx.JSON(http.StatusOK, models.BaseMessageWithDataApi{Data: data})

}

func GetCardRequestList(ctx *context.Context) {

page := ctx.QueryInt("page")
if page < 1 {
page = 1
}
pageSize := ctx.QueryInt("pageSize")
if pageSize < 1 {
pageSize = setting.UI.DatasetPagingNum
}

opts := &models.CardRequestOptions{
OrderBy: models.OrderByIDDesc,
NeedSpec: false,
}
opts.ListOptions = models.ListOptions{
Page: page,
PageSize: pageSize,
}
getRequestShowList(ctx, opts, false)
}

func GetMyCardRequestList(ctx *context.Context) {

page := ctx.QueryInt("page")
if page < 1 {
page = 1
}
pageSize := ctx.QueryInt("pageSize")
if pageSize < 1 {
pageSize = setting.UI.DatasetPagingNum
}

opts := &models.CardRequestOptions{
UserID: ctx.User.ID,
OrderBy: models.OrderByIDDesc,
NeedSpec: true,
}
opts.ListOptions = models.ListOptions{
Page: page,
PageSize: pageSize,
}
getRequestShowList(ctx, opts, true)
}

func GetAdminCardRequestList(ctx *context.Context) {

page := ctx.QueryInt("page")
if page < 1 {
page = 1
}
pageSize := ctx.QueryInt("pageSize")
if pageSize < 1 {
pageSize = setting.UI.DatasetPagingNum
}
useBeginTime, _ := time.Parse(cardrequestservice.DATE_LAYOUT, ctx.Query("useBeginTime"))
useBeginTime.Unix()

opts := &models.CardRequestOptions{
OrderBy: models.OrderByStatus,
NeedSpec: true,
Keyword: strings.Trim(ctx.Query("q"), " "),
AiCenterCode: ctx.Query("center"),
Cluster: ctx.Query("cluster"),
ComputeResource: ctx.Query("resource"),
AccCardType: ctx.Query("cardType"),
QueueId: ctx.QueryInt64("queue"),
UseBeginTime: getTimeUnix(ctx.Query("useBeginTime")),
UseEndTime: getTimeUnix(ctx.Query("useEndTime")),
BeginTimeUnix: getTimeUnix(ctx.Query("beginTime")),
EndTimeUnix: getTimeUnix(ctx.Query("endTime")),
}
opts.ListOptions = models.ListOptions{
Page: page,
PageSize: pageSize,
}
getRequestShowList(ctx, opts, true)
}

func CreateCardRequest(ctx *context.Context, cardReq api.CardReq) {
data, err := cardrequestservice.GetCreationInfo()
if err != nil {
log.Error("can not get creation info", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}

if v, ok := data[cardReq.ComputeResource]; !ok {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("repo.parameter_is_wrong")))
return
} else {
if !slices.Contains(v, cardReq.CardType) {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("repo.parameter_is_wrong")))
return
}
}

if cardReq.ResourceType != models.RESOURCE_TYPE_SHARE && cardReq.ResourceType != models.RESOURCE_TYPE_EXCLUSIVE {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("repo.parameter_is_wrong")))
return
}

err = cardrequestservice.CreateCardRequest(cardReq, ctx.User.ID)
if err != nil {
log.Error("can not create card request", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("card_request.create_fail")))
} else {
ctx.JSON(http.StatusOK, models.BaseOKMessageApi)
}

}

func UpdateCardRequestAndSpec(ctx *context.Context, cardReq api.CardReq) {

id := ctx.ParamsInt64(":id")
action := ctx.Query("action")

cardReq.ID = id
var err error
switch action {
case "agree":
err = cardrequestservice.AgreeRequest(cardReq)
case "disagree":
err = cardrequestservice.DisagreeRequest(cardReq)
case "modify":
err = cardrequestservice.UpdateCardRequestAdmin(cardReq)
}

if err != nil {
log.Error("Update error. %v", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("card_request.update_fail")))
return
}
ctx.JSON(http.StatusOK, models.BaseOKMessageApi)

}

func UpdateCardRequest(ctx *context.Context, cardReq api.CardReq) {
id := ctx.ParamsInt64(":id")

cardRequestInDB, err := models.GetCardRequestById(id)
if err != nil {
log.Error("can not get card request record", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("card_request.update_fail_no_record")))
return
}
if ctx.User.ID != cardRequestInDB.UID || cardRequestInDB.Status == models.CARD_REQEST_DISAGREE {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("common_error.insufficient_permission")))
return
}

data, err := cardrequestservice.GetCreationInfo()
if err != nil {
log.Error("can not get creation info", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}

if v, ok := data[cardReq.ComputeResource]; !ok {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("repo.parameter_is_wrong")))
return
} else {
if !slices.Contains(v, cardReq.CardType) {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("repo.parameter_is_wrong")))
return
}
}

if cardReq.ResourceType != models.RESOURCE_TYPE_SHARE && cardReq.ResourceType != models.RESOURCE_TYPE_EXCLUSIVE {
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("repo.parameter_is_wrong")))
return
}
cardReq.ID = id
err = cardrequestservice.UpdateCardRequest(cardReq, cardRequestInDB)
if err != nil {
log.Error("Update card request failed", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(ctx.Tr("card_request.update_fail")))
return
}
ctx.JSON(http.StatusOK, models.BaseOKMessageApi)
}

func getRequestShowList(ctx *context.Context, opts *models.CardRequestOptions, containsAllParams bool) {
total, res, err := models.SearchCardRequest(opts)
if err != nil {
log.Error("search card request err", err)
ctx.JSON(http.StatusOK, models.BaseMessageWithDataApi{Data: models.CardRequestShowList{
CardRequestList: []*models.CardRequestSpecShow{},
}})
return
}

var show = make([]*models.CardRequestSpecShow, 0)
for _, v := range res {

customShow := &models.CardRequestSpecShow{
ID: v.ID,
ComputeResource: v.ComputeResource,
CardType: v.CardType,
AccCardsNum: v.AccCardsNum,
BeginDate: v.BeginDate,
EndDate: v.EndDate,
CreatedUnix: v.CreatedUnix,
Status: v.Status,
}
if containsAllParams {
customShow.UID = v.UID
customShow.UserName = v.UserName
customShow.Review = v.Review
customShow.PhoneNumber = v.PhoneNumber
customShow.EmailAddress = v.EmailAddress
customShow.Contact = v.Contact
customShow.Specs = v.Specs
customShow.Org = v.Org
customShow.Description = v.Description
customShow.ResourceType = v.ResourceType
customShow.DiskCapacity = v.DiskCapacity

}

show = append(show, customShow)
}
ctx.JSON(http.StatusOK, models.BaseMessageWithDataApi{Data: models.CardRequestShowList{
Total: total,
CardRequestList: show,
}})
}

func getTimeUnix(value string) int64 {
timeParse, err := time.Parse(cardrequestservice.DATE_LAYOUT, value)
if err != nil {
return 0
}
return timeParse.Unix()
}

+ 9
- 4
routers/home.go View File

@@ -55,9 +55,10 @@ const (
tplRepoSquare base.TplName = "explore/repos/square"
tplRepoSearch base.TplName = "explore/repos/search"
tplRoshmci base.TplName = "explore/ros-hmci"

tplExploreCenterMap base.TplName = "explore/center_map"
tplExploreDomestic base.TplName = "explore/domestic"

tplComputingPowerDemand base.TplName = "computingpower/demand"
tplComputingPowerDomestic base.TplName = "computingpower/domestic"
)

// Home render home page
@@ -821,8 +822,12 @@ func ExploreImages(ctx *context.Context) {
ctx.HTML(200, tplExploreImages)
}

func ExploreDomestic(ctx *context.Context) {
ctx.HTML(200, tplExploreDomestic)
func ComputingPowerDemand(ctx *context.Context) {
ctx.HTML(200, tplComputingPowerDemand)
}

func ComputingPowerDomestic(ctx *context.Context) {
ctx.HTML(200, tplComputingPowerDomestic)
}

func ExploreDataAnalysisUserTrend(ctx *context.Context) {


+ 1
- 1
routers/repo/cloudbrain.go View File

@@ -1495,7 +1495,7 @@ func DeleteJobsByRepoID(repoID int64) {
log.Warn("Failed to get cloudBrain info", err)
return
}
DeleteJobs(cloudBrains)
ai_task.DelCloudbrains(cloudBrains)
}

/*


+ 8
- 4
routers/repo/user_data_analysis.go View File

@@ -21,7 +21,7 @@ import (
const (
PAGE_SIZE = 2000
Excel_File_Path = "/useranalysis/"
USER_YEAR = 2022
USER_YEAR = 2023
)

func getUserMetricsExcelHeader(ctx *context.Context) map[string]string {
@@ -721,10 +721,14 @@ func TimingCountDataByDateAndReCount(date string, isReCount bool) {
log.Info("endTime time:" + endTime.Format("2006-01-02 15:04:05"))
warnEmailMessage := "用户统计信息入库失败,请尽快定位。"

//startYear := time.Date(USER_YEAR, 1, 1, 0, 0, 0, 1, t.Location())
//endYear := startYear.AddDate(1, 0, 0)
startYear := time.Date(USER_YEAR, 1, 1, 0, 0, 0, 1, t.Location())
endYear := startYear.AddDate(1, 0, 0)

//models.RefreshUserYearTable(startYear, endYear)
if time.Now().Year() == 2024 {
log.Info("the day is 2024,so not update.")
return
}
models.RefreshUserYearTable(startYear, endYear)

//query wiki data
log.Info("start to time count data")


+ 24
- 3
routers/routes/routes.go View File

@@ -12,6 +12,8 @@ import (
"text/template"
"time"

"code.gitea.io/gitea/routers/card_request"

"code.gitea.io/gitea/routers/super_compute"

"code.gitea.io/gitea/routers/tech"
@@ -398,6 +400,12 @@ func RegisterRoutes(m *macaron.Macaron) {
})
})

m.Group("/computingpower", func() {
m.Get("/demand", routers.ComputingPowerDemand)
m.Get("/domestic", routers.ComputingPowerDomestic)
}, ignSignIn)
operationReq := context.Toggle(&context.ToggleOptions{SignInRequired: true, OperationRequired: true})
m.Group("/explore", func() {
m.Get("", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL + "/explore/repos")
@@ -431,7 +439,22 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/data_analysis/Overview", routers.ExploreDataAnalysisOverview)
m.Get("/data_analysis/BrainAnalysis", routers.ExploreDataAnalysisBrainAnalysis)
m.Get("/center_map", reqSignIn, routers.CenterMapUI)
m.Get("/domestic", routers.ExploreDomestic)

m.Group("/card_request", func() {
m.Get("/creation/required", card_request.GetCreationInfo)
m.Get("/list", card_request.GetCardRequestList)

}, ignSignIn)

m.Group("/card_request", func() {
m.Post("/create", binding.Bind(structs.CardReq{}), card_request.CreateCardRequest)
m.Get("/my_list", card_request.GetMyCardRequestList)
m.Get("/admin_list", operationReq, card_request.GetAdminCardRequestList)
m.Get("/specification/list", operationReq, admin.GetAllResourceSpecificationList)
m.Put("/update/:id", binding.Bind(structs.CardReq{}), card_request.UpdateCardRequest)
m.Put("/admin/update/:id", operationReq, bindIgnErr(structs.CardReq{}), card_request.UpdateCardRequestAndSpec)

}, reqSignIn)

}, ignSignIn)
m.Combo("/install", routers.InstallInit).Get(routers.Install).
@@ -720,8 +743,6 @@ func RegisterRoutes(m *macaron.Macaron) {
}, adminReq)
// ***** END: Admin *****

operationReq := context.Toggle(&context.ToggleOptions{SignInRequired: true, OperationRequired: true})

// ***** START: Operation *****
m.Group("/operation", func() {
m.Get("/config/recommend_org", operation.Organizations)


+ 148
- 19
services/ai_task_service/cluster/c2net.go View File

@@ -33,7 +33,11 @@ func init() {
}

func (c C2NetClusterAdapter) CreateNoteBook(req entity.CreateNoteBookTaskRequest) (*entity.CreateNoteBookTaskResponse, error) {
jobResult, err := grampus.CreateNotebookJob(convertNoteBookReq2Grampus(req))
newReq, err := convertNoteBookReq2Grampus(req)
if err != nil {
log.Error("CreateNoteBook err.req=%+v err=%v", req, err)
}
jobResult, err := grampus.CreateNotebookJob(newReq)
if err != nil {
log.Error("CreateNoteBook failed: %v", err.Error())
return nil, err
@@ -47,7 +51,12 @@ func (c C2NetClusterAdapter) CreateNoteBook(req entity.CreateNoteBookTaskRequest

func (c C2NetClusterAdapter) CreateOnlineInfer(req entity.CreateNoteBookTaskRequest) (*entity.CreateNoteBookTaskResponse, error) {
log.Info("start to CreateOnlineInfer ")
jobResult, err := grampus.CreateNotebookJob(convertOnlineInfer2Grampus(req))
newReq, err := convertOnlineInfer2Grampus(req)
if err != nil {
log.Error("CreateOnlineInfer err.req=%+v err=%v", req, err)
return nil, err
}
jobResult, err := grampus.CreateNotebookJob(newReq)
if err != nil {
log.Error("CreateNoteBook failed: %v", err.Error())
return nil, err
@@ -109,7 +118,7 @@ func ConvertGrampusImageToStandard(image models.GrampusImage) entity.ClusterImag
}
}

func convertNoteBookReq2Grampus(req entity.CreateNoteBookTaskRequest) models.CreateGrampusNotebookRequest {
func convertNoteBookReq2Grampus(req entity.CreateNoteBookTaskRequest) (models.CreateGrampusNotebookRequest, error) {
codePath := "/code"
if len(req.Tasks[0].Code) > 0 {
codePath = req.Tasks[0].Code[0].ContainerPath
@@ -129,23 +138,31 @@ func convertNoteBookReq2Grampus(req entity.CreateNoteBookTaskRequest) models.Cre
tasks := make([]models.GrampusNotebookTask, len(req.Tasks))
for i := 0; i < len(req.Tasks); i++ {
t := req.Tasks[i]
tasks[i] = convertNoteBookTask2Grampus(t, command)
task, err := convertNoteBookTask2Grampus(t, command)
if err != nil {
return models.CreateGrampusNotebookRequest{}, err
}
tasks[i] = task
}

return models.CreateGrampusNotebookRequest{Name: req.Name, Tasks: tasks}
return models.CreateGrampusNotebookRequest{Name: req.Name, Tasks: tasks}, nil
}

func convertOnlineInfer2Grampus(req entity.CreateNoteBookTaskRequest) models.CreateGrampusNotebookRequest {
func convertOnlineInfer2Grampus(req entity.CreateNoteBookTaskRequest) (models.CreateGrampusNotebookRequest, error) {

command := generateCommand(req.RepoName, req.Tasks[0].BootFile, req.PrimitiveDatasetName)

tasks := make([]models.GrampusNotebookTask, len(req.Tasks))
for i := 0; i < len(req.Tasks); i++ {
t := req.Tasks[i]
tasks[i] = convertNoteBookTask2Grampus(t, command)
task, err := convertNoteBookTask2Grampus(t, command)
if err != nil {
return models.CreateGrampusNotebookRequest{}, nil
}
tasks[i] = task
}

return models.CreateGrampusNotebookRequest{Name: req.Name, Tasks: tasks}
return models.CreateGrampusNotebookRequest{Name: req.Name, Tasks: tasks}, nil
}

func generateCommand(repoName, bootFile, datasetName string) string {
@@ -194,7 +211,7 @@ func getCopyCmd(datasetName, repoName, bootfilepath string) string {
return cmd
}

func convertNoteBookTask2Grampus(t entity.NoteBookTask, command string) models.GrampusNotebookTask {
func convertNoteBookTask2Grampus(t entity.NoteBookTask, command string) (models.GrampusNotebookTask, error) {
code := models.GrampusDataset{}
codeArray := convertContainerArray2GrampusArray(t.Code)
if codeArray != nil && len(codeArray) > 0 {
@@ -205,6 +222,10 @@ func convertNoteBookTask2Grampus(t entity.NoteBookTask, command string) models.G
if outputArray != nil && len(outputArray) > 0 {
output = outputArray[0]
}
centerIds, err := getGrampusAvailableCenterIds(t.Queues, t.ImageId, *models.GetComputeSourceInstance(t.Spec.ComputeResource), models.JobTypeDebug)
if err != nil {
return models.GrampusNotebookTask{}, err
}
return models.GrampusNotebookTask{
Name: t.Name,
ResourceSpecId: t.Spec.SourceSpecId,
@@ -216,8 +237,56 @@ func convertNoteBookTask2Grampus(t entity.NoteBookTask, command string) models.G
AutoStopDuration: t.AutoStopDuration,
Capacity: t.Capacity,
Command: command,
CenterID: t.CenterID,
CenterID: centerIds,
}, nil
}

func getGrampusAvailableCenterIds(queues []models.ResourceQueue, imageId string, computeSource models.ComputeSource, jobType models.JobType) ([]string, error) {
if len(queues) == 0 {
return []string{}, nil
}
var intersectionCenterIds []string
if imageId == "" {
for _, queue := range queues {
code := strings.TrimSuffix(queue.AiCenterCode+"+"+queue.QueueCode, "+")
intersectionCenterIds = append(intersectionCenterIds, code)
}
return intersectionCenterIds, nil
}

processType := computeSource.FullName
images, err := grampus.GetImages(processType, string(jobType))
if err != nil {
log.Warn("can not get image info from grampus", err)
return []string{}, err
}
var imageCenterIds []string
for _, image := range images.Infos {
if image.ID == imageId {
for _, centerInfo := range image.AICenterImage {
imageCenterIds = append(imageCenterIds, centerInfo.AICenterID)
}
break
}
}
if len(imageCenterIds) == 0 {
return []string{}, errors.New("image not available")
}

for _, queue := range queues {
for _, imageCenterId := range imageCenterIds {
if queue.AiCenterCode == imageCenterId {
code := strings.TrimSuffix(queue.AiCenterCode+"+"+queue.QueueCode, "+")
intersectionCenterIds = append(intersectionCenterIds, code)
break
}
}
}
if len(intersectionCenterIds) == 0 {
return intersectionCenterIds, errors.New("no center match")
}

return intersectionCenterIds, nil
}

func convertContainerArray2GrampusArray(containerDatas []entity.ContainerData) []models.GrampusDataset {
@@ -413,7 +482,12 @@ func parseC2NetEventsToOperationProfile(notebookEvents []models.GrampusJobEvents
}

func (c C2NetClusterAdapter) CreateTrainJob(req entity.CreateTrainTaskRequest) (*entity.CreateTrainTaskResponse, error) {
jobResult, err := grampus.CreateJob(convertTrainReq2Grampus(req))
newReq, err := convertTrainReq2Grampus(req)
if err != nil {
log.Error("CreateTrainJob err.req=%+v err=%v", req, err)
return nil, err
}
jobResult, err := grampus.CreateJob(newReq)
if err != nil {
log.Error("CreateNoteBook failed: %v", err.Error())
return nil, err
@@ -421,16 +495,20 @@ func (c C2NetClusterAdapter) CreateTrainJob(req entity.CreateTrainTaskRequest) (
return convertGrampus2TrainRes(jobResult), nil
}

func convertTrainReq2Grampus(req entity.CreateTrainTaskRequest) models.CreateGrampusJobRequest {
func convertTrainReq2Grampus(req entity.CreateTrainTaskRequest) (models.CreateGrampusJobRequest, error) {
command := generateGrampusTrainCommand(req)

tasks := make([]models.GrampusTasks, len(req.Tasks))
for i := 0; i < len(req.Tasks); i++ {
t := req.Tasks[i]
tasks[i] = convertTrainTask2Grampus(t, command)
task, err := convertTrainTask2Grampus(t, command)
if err != nil {
return models.CreateGrampusJobRequest{}, err
}
tasks[i] = task
}

return models.CreateGrampusJobRequest{Name: req.Name, Tasks: tasks}
return models.CreateGrampusJobRequest{Name: req.Name, Tasks: tasks}, nil
}

func generateGrampusTrainCommand(req entity.CreateTrainTaskRequest) string {
@@ -457,7 +535,7 @@ func generateGrampusTrainCommand(req entity.CreateTrainTaskRequest) string {
//export
Add(buildExportCommand(req.Name, computeResource)).
//exec code
Add(buildExecCodeCommand(path.Join(codePath, strings.ToLower(t.RepoName)), modelFilePath, t.BootFile, computeResource, req.Name, t.Params))
Add(buildExecCodeCommand(path.Join(codePath, strings.ToLower(t.RepoName)), modelFilePath, t.BootFile, computeResource, req.Name, t.Params, t.Datasets, datasetPath))

return builder.ToString()
}
@@ -526,6 +604,49 @@ func buildUnzipDatasetCommand(datasets []entity.ContainerData, datasetPath, comp
return builder
}

func buildDeleteUnzipDatasetCommand(builder *entity.CommandBuilder, datasets []entity.ContainerData, datasetPath, computeSource string) {
if computeSource == models.NPU {
return
}
if len(datasets) == 0 {
return
}
builder.Next(entity.NewCommand("cd", datasetPath)).
Next(entity.NewCommand("echo", "'start to delete unzip datasets'"))

fileDatasets := make([]entity.ContainerData, 0)
for _, dataset := range datasets {
if !dataset.IsDir {
fileDatasets = append(fileDatasets, dataset)
}
}
//单数据集
if len(fileDatasets) == 1 {

builder.Next(entity.NewCommand("find . ! -name", "'"+fileDatasets[0].Name+"'", "-type f -exec rm -f {} +"))
builder.Next(entity.NewCommand("find . -type d ! -name", "'"+fileDatasets[0].Name+"'", "-and ! -name . -and ! -name .. -exec rm -rf {} +"))

} else {
//多数据集
for i := 0; i < len(fileDatasets); i++ {

builder.Next(entity.NewCommand("rm", "-rf", "'"+getZipFileNameExcludeExt(fileDatasets[i].Name)+"'"))

}
}
builder.Next(entity.NewCommand("ls", "-l"))
builder.Next(entity.NewCommand("echo", "'delete unzip datasets finished'"))
}

func getZipFileNameExcludeExt(fileName string) string {
if strings.HasSuffix(fileName, ".tar.gz") {
return fileName[0 : len(fileName)-7]
} else if strings.HasSuffix(fileName, ".zip") {
return fileName[0 : len(fileName)-4]
}
return fileName
}

func buildExportCommand(jobName, computeResource string) *entity.CommandBuilder {
builder := &entity.CommandBuilder{}

@@ -539,7 +660,7 @@ func buildExportCommand(jobName, computeResource string) *entity.CommandBuilder
return builder
}

func buildExecCodeCommand(codeDirPath, modelFilePath, bootFile, computeResource, jobName string, params models.Parameters) *entity.CommandBuilder {
func buildExecCodeCommand(codeDirPath, modelFilePath, bootFile, computeResource, jobName string, params models.Parameters, datasets []entity.ContainerData, datasetPath string) *entity.CommandBuilder {
builder := &entity.CommandBuilder{}
builder.Next(entity.NewCommand("echo", "'start to exec code'"))

@@ -575,6 +696,9 @@ func buildExecCodeCommand(codeDirPath, modelFilePath, bootFile, computeResource,
}

builder.Next(entity.NewCommand("result=$?"))
//delete unzip dataset
buildDeleteUnzipDatasetCommand(builder, datasets, datasetPath, computeResource)

builder.Next(entity.NewCommand("bash", "-c", "\"[[ $result -eq 0 ]] && exit 0 || exit -1\""))
return builder
}
@@ -603,7 +727,12 @@ func getNpuModelObjectKey(jobName string) string {
return setting.CodePathPrefix + jobName + RemoteModelPath + "/" + models.ModelSuffix
}

func convertTrainTask2Grampus(t entity.TrainTask, command string) models.GrampusTasks {
func convertTrainTask2Grampus(t entity.TrainTask, command string) (models.GrampusTasks, error) {
centerIds, err := getGrampusAvailableCenterIds(t.Queues, t.ImageId, *models.GetComputeSourceInstance(t.Spec.ComputeResource), models.JobTypeTrain)
if err != nil {
return models.GrampusTasks{}, err
}

return models.GrampusTasks{
Name: t.Name,
ResourceSpecId: t.ResourceSpecId,
@@ -612,13 +741,13 @@ func convertTrainTask2Grampus(t entity.TrainTask, command string) models.Grampus
Datasets: convertContainerArray2GrampusArray(t.Datasets),
Code: convertContainerArray2Grampus(t.Code),
Command: command,
CenterID: t.CenterID,
CenterID: centerIds,
ReplicaNum: 1,
Models: convertContainerArray2GrampusArray(t.PreTrainModel),
BootFile: t.BootFile,
OutPut: convertContainerArray2Grampus(t.OutPut),
WorkServerNumber: t.WorkServerNumber,
}
}, nil
}

func convertGrampus2TrainRes(res *models.CreateGrampusJobResponse) *entity.CreateTrainTaskResponse {


+ 1
- 10
services/ai_task_service/cluster/cloudbrain_two.go View File

@@ -29,9 +29,6 @@ func init() {
}

func (c CloudbrainTwoClusterAdapter) CreateNoteBook(req entity.CreateNoteBookTaskRequest) (*entity.CreateNoteBookTaskResponse, error) {
if poolInfos == nil {
json.Unmarshal([]byte(setting.PoolInfos), &poolInfos)
}
t := req.Tasks[0]

var jobResult *models.CreateNotebookResult
@@ -52,19 +49,13 @@ func (c CloudbrainTwoClusterAdapter) CreateNoteBook(req entity.CreateNoteBookTas
WorkspaceID: "0",
})
} else {
var poolId = poolInfos.PoolInfo[0].PoolId
for _, poolInfo := range poolInfos.PoolInfo {
if poolInfo.PoolName == t.Spec.QueueCode {
poolId = poolInfo.PoolId
}
}
jobResult, err = cloudbrain_two.CreateNotebook2(models.CreateNotebook2Params{
JobName: req.Name,
Description: req.Description,
Flavor: t.Spec.SourceSpecId,
Duration: t.AutoStopDuration,
ImageID: t.ImageId,
PoolID: poolId,
PoolID: t.Spec.QueueCode,
Feature: models.NotebookFeature,
Volume: models.VolumeReq{
Capacity: setting.Capacity,


+ 1
- 0
services/ai_task_service/context/context.go View File

@@ -13,6 +13,7 @@ type CreationContext struct {
GitRepo *git.Repository
Repository *models.Repository
Spec *models.Specification
Queues []models.ResourceQueue
User *models.User
CommitID string
Response *entity.CreationResponse


+ 3
- 11
services/ai_task_service/task/cloudbrain_one_notebook_task.go View File

@@ -90,7 +90,7 @@ func (t CloudbrainOneNotebookTaskTemplate) Create(ctx *context.CreationContext)
Next(t.CheckDatasets).
Next(t.CheckBranchExists).
Next(t.InsertCloudbrainRecord4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.GetAvailableQueues, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
Operate(ctx)
if err != nil {
log.Error("create CloudbrainOneNotebookTask err.%v", err)
@@ -111,6 +111,7 @@ func (t CloudbrainOneNotebookTaskTemplate) Restart(ctx *context.CreationContext)
Next(t.LoadSpec).
Next(t.CheckPointBalance).
Next(t.BuildContainerData).
Next(t.GetAvailableQueues).
Next(t.CallRestartAPI).
Next(t.CreateCloudbrainRecord4Restart).
Next(t.NotifyCreation).
@@ -134,15 +135,6 @@ func (g CloudbrainOneNotebookTaskTemplate) CallCreationAPI(ctx *context.Creation
}
form := ctx.Request

centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}

req := entity.CreateNoteBookTaskRequest{
Name: form.JobName,
Tasks: []entity.NoteBookTask{
@@ -157,7 +149,7 @@ func (g CloudbrainOneNotebookTaskTemplate) CallCreationAPI(ctx *context.Creation
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
AutoStopDuration: autoStopDurationMs,
Capacity: setting.Capacity,
CenterID: centerIds,
Queues: ctx.Queues,
Spec: ctx.Spec,
},
},


+ 2
- 58
services/ai_task_service/task/cloudbrain_one_train_task.go View File

@@ -82,7 +82,7 @@ func (t CloudbrainOneTrainTaskTemplate) Create(ctx *context.CreationContext) (*e
Next(t.CheckDatasets).
Next(t.CheckModel).
Next(t.InsertCloudbrainRecord4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.GetAvailableQueues, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
Operate(ctx)
if err != nil {
log.Error("create GrampusNoteBookTask err.%v", err)
@@ -97,14 +97,6 @@ func (g CloudbrainOneTrainTaskTemplate) CallCreationAPI(ctx *context.CreationCon
return response.SYSTEM_ERROR
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
req := entity.CreateTrainTaskRequest{
Name: form.JobName,
DisplayJobName: form.DisplayJobName,
@@ -116,7 +108,7 @@ func (g CloudbrainOneTrainTaskTemplate) CallCreationAPI(ctx *context.CreationCon
ImageUrl: strings.TrimSpace(form.ImageUrl),
Datasets: ctx.GetContainerDataArray(entity.ContainerDataset),
Code: ctx.GetContainerDataArray(entity.ContainerCode),
CenterID: centerIds,
Queues: ctx.Queues,
PreTrainModel: ctx.GetContainerDataArray(entity.ContainerPreTrainModel),
BootFile: form.BootFile,
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
@@ -139,51 +131,3 @@ func (g CloudbrainOneTrainTaskTemplate) CallCreationAPI(ctx *context.CreationCon
}
return nil
}

func (g CloudbrainOneTrainTaskTemplate) CallRestartAPI(ctx *context.CreationContext) *response.BizError {
c := g.GetMyCluster()
if c == nil {
return response.SYSTEM_ERROR
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
req := entity.CreateTrainTaskRequest{
Name: form.JobName,
DisplayJobName: form.DisplayJobName,
Tasks: []entity.TrainTask{
{
Name: form.JobName,
ResourceSpecId: ctx.Spec.SourceSpecId,
ImageId: form.ImageID,
ImageUrl: strings.TrimSpace(form.ImageUrl),
Datasets: ctx.GetContainerDataArray(entity.ContainerDataset),
Code: ctx.GetContainerDataArray(entity.ContainerCode),
CenterID: centerIds,
PreTrainModel: ctx.GetContainerDataArray(entity.ContainerPreTrainModel),
BootFile: form.BootFile,
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
Params: form.ParamArray,
Spec: ctx.Spec,
},
},
}
createTime := timeutil.TimeStampNow()
res, err := c.CreateTrainJob(req)
if err != nil {
log.Error("CloudbrainOneTrainTaskTemplate CallRestartAPI err.req=%+v err=%v", req, err)
return response.NewBizError(err)
}
ctx.Response = &entity.CreationResponse{
JobID: res.JobID,
Status: res.Status,
CreateTime: createTime,
}
return nil
}

+ 3
- 28
services/ai_task_service/task/cloudbrain_two_train_task.go View File

@@ -1,14 +1,11 @@
package task

import (
"encoding/json"
"strings"

"code.gitea.io/gitea/entity"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/modelarts"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/routers/response"
"code.gitea.io/gitea/services/ai_task_service/context"
@@ -86,7 +83,7 @@ func (t CloudbrainTwoTrainTaskTemplate) Create(ctx *context.CreationContext) (*e
Next(t.CheckDatasets).
Next(t.CheckModel).
Next(t.InsertCloudbrainRecord4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.GetAvailableQueues, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
Operate(ctx)
if err != nil {
log.Error("create GrampusNoteBookTask err.%v", err)
@@ -100,29 +97,7 @@ func (g CloudbrainTwoTrainTaskTemplate) CallCreationAPI(ctx *context.CreationCon
if c == nil {
return response.SYSTEM_ERROR
}
var resourcePools modelarts.ResourcePool
if err := json.Unmarshal([]byte(setting.ResourcePools), &resourcePools); err != nil {
log.Error("Unmarshal error. %v", err)
return response.NewBizError(err)
} else if len(resourcePools.Info) == 0 {
log.Error("UresourcePools.Info is empty. %v", err)
return response.SYSTEM_ERROR
}
modelarts_poolid := resourcePools.Info[0].ID
for _, t := range resourcePools.Info {
if t.Value == ctx.Spec.QueueCode {
modelarts_poolid = t.ID
}
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
req := entity.CreateTrainTaskRequest{
Name: form.JobName,
DisplayJobName: form.DisplayJobName,
@@ -136,13 +111,13 @@ func (g CloudbrainTwoTrainTaskTemplate) CallCreationAPI(ctx *context.CreationCon
Datasets: ctx.GetContainerDataArray(entity.ContainerDataset),
Code: ctx.GetContainerDataArray(entity.ContainerCode),
LogPath: ctx.GetContainerDataArray(entity.ContainerLogPath),
CenterID: centerIds,
Queues: ctx.Queues,
PreTrainModel: ctx.GetContainerDataArray(entity.ContainerPreTrainModel),
BootFile: form.BootFile,
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
Params: form.ParamArray,
Spec: ctx.Spec,
PoolId: modelarts_poolid,
PoolId: ctx.Spec.QueueCode,
WorkServerNumber: form.WorkServerNumber,
},
},


+ 2
- 10
services/ai_task_service/task/grampus_notebook_task.go View File

@@ -156,7 +156,7 @@ func (t GrampusNoteBookTaskTemplate) Create(ctx *context.CreationContext) (*enti
Next(t.CheckBranchExists).
Next(t.CheckModel).
Next(t.InsertCloudbrainRecord4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.GetAvailableQueues, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
Operate(ctx)
if err != nil {
log.Error("create GrampusNoteBookTask err.%v", err)
@@ -199,14 +199,6 @@ func (g GrampusNoteBookTaskTemplate) CallCreationAPI(ctx *context.CreationContex
return response.SYSTEM_ERROR
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
imageUrl := strings.TrimSpace(form.ImageUrl)
if form.ImageID != "" {
imageUrl = ""
@@ -223,7 +215,7 @@ func (g GrampusNoteBookTaskTemplate) CallCreationAPI(ctx *context.CreationContex
Code: ctx.GetContainerDataArray(entity.ContainerCode),
AutoStopDuration: autoStopDurationMs,
Capacity: setting.Capacity,
CenterID: centerIds,
Queues: ctx.Queues,
Spec: ctx.Spec,
},
},


+ 2
- 10
services/ai_task_service/task/grampus_online_infer_task.go View File

@@ -80,7 +80,7 @@ func (t GrampusOnlineInferTaskTemplate) Create(ctx *context.CreationContext) (*e
Next(t.CheckBranchExists).
Next(t.CheckModel).
Next(t.InsertCloudbrainRecord4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.GetAvailableQueues, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
Operate(ctx)
if err != nil {
log.Error("create GrampusNoteBookTask err.%v", err)
@@ -97,14 +97,6 @@ func (g GrampusOnlineInferTaskTemplate) CallCreationAPI(ctx *context.CreationCon
return response.SYSTEM_ERROR
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
imageUrl := strings.TrimSpace(form.ImageUrl)
if form.ImageID != "" {
imageUrl = ""
@@ -129,7 +121,7 @@ func (g GrampusOnlineInferTaskTemplate) CallCreationAPI(ctx *context.CreationCon
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
AutoStopDuration: -1,
Capacity: setting.Capacity,
CenterID: centerIds,
Queues: ctx.Queues,
Spec: ctx.Spec,
BootFile: ctx.Request.BootFile,
},


+ 2
- 58
services/ai_task_service/task/grampus_train_task.go View File

@@ -116,7 +116,7 @@ func (t GrampusTrainTaskTemplate) Create(ctx *context.CreationContext) (*entity.
Next(t.CheckDatasets).
Next(t.CheckModel).
Next(t.InsertCloudbrainRecord4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
AsyncNextWithErrFun(t.BuildContainerData, t.GetAvailableQueues, t.CallCreationAPI, t.AfterCallCreationAPI4Async, t.NotifyCreation, t.HandleErr4Async).
Operate(ctx)
if err != nil {
log.Error("create GrampusTrainTaskTemplate err.%v", err)
@@ -131,14 +131,6 @@ func (g GrampusTrainTaskTemplate) CallCreationAPI(ctx *context.CreationContext)
return response.SYSTEM_ERROR
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
imageUrl := strings.TrimSpace(form.ImageUrl)
if form.ImageID != "" {
imageUrl = ""
@@ -154,7 +146,7 @@ func (g GrampusTrainTaskTemplate) CallCreationAPI(ctx *context.CreationContext)
ImageUrl: imageUrl,
Datasets: ctx.GetContainerDataArray(entity.ContainerDataset),
Code: ctx.GetContainerDataArray(entity.ContainerCode),
CenterID: centerIds,
Queues: ctx.Queues,
PreTrainModel: ctx.GetContainerDataArray(entity.ContainerPreTrainModel),
BootFile: form.BootFile,
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
@@ -179,51 +171,3 @@ func (g GrampusTrainTaskTemplate) CallCreationAPI(ctx *context.CreationContext)
}
return nil
}

func (g GrampusTrainTaskTemplate) CallRestartAPI(ctx *context.CreationContext) *response.BizError {
c := g.GetMyCluster()
if c == nil {
return response.SYSTEM_ERROR
}
form := ctx.Request
centerIds, bizErr := GetAvailableCenterIds(ctx.Spec, models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: g.JobType,
HasInternet: form.HasInternet,
}, form.ComputeSource, form.ImageID, g.ClusterType)
if bizErr != nil {
return bizErr
}
req := entity.CreateTrainTaskRequest{
Name: form.JobName,
DisplayJobName: form.DisplayJobName,
Tasks: []entity.TrainTask{
{
Name: form.JobName,
ResourceSpecId: ctx.Spec.SourceSpecId,
ImageId: form.ImageID,
ImageUrl: strings.TrimSpace(form.ImageUrl),
Datasets: ctx.GetContainerDataArray(entity.ContainerDataset),
Code: ctx.GetContainerDataArray(entity.ContainerCode),
CenterID: centerIds,
PreTrainModel: ctx.GetContainerDataArray(entity.ContainerPreTrainModel),
BootFile: form.BootFile,
OutPut: ctx.GetContainerDataArray(entity.ContainerOutPutPath),
Params: form.ParamArray,
Spec: ctx.Spec,
},
},
}
createTime := timeutil.TimeStampNow()
res, err := c.CreateTrainJob(req)
if err != nil {
log.Error("GrampusTrainTaskTemplate CallRestartAPI err.req=%+v err=%v", req, err)
return response.NewBizError(err)
}
ctx.Response = &entity.CreationResponse{
JobID: res.JobID,
Status: res.Status,
CreateTime: createTime,
}
return nil
}

+ 10
- 0
services/ai_task_service/task/opt_handler.go View File

@@ -43,6 +43,7 @@ type CreationHandler interface {
NotifyCreation(ctx *context.CreationContext) *response.BizError
HandleErr4Async(ctx *context.CreationContext) *response.BizError
CheckNotebookCount(ctx *context.CreationContext) *response.BizError
GetAvailableQueues(ctx *context.CreationContext) *response.BizError
}

//DefaultCreationHandler CreationHandler的默认实现,公共逻辑可以在此结构体中实现
@@ -633,6 +634,15 @@ func (DefaultCreationHandler) CheckPointBalance(ctx *context.CreationContext) *r
return nil
}

func (DefaultCreationHandler) GetAvailableQueues(ctx *context.CreationContext) *response.BizError {
ctx.Queues = ctx.Spec.GetAvailableQueues(models.GetAvailableCenterIdOpts{
UserId: ctx.User.ID,
JobType: ctx.Request.JobType,
HasInternet: ctx.Request.HasInternet,
})
return nil
}

func (DefaultCreationHandler) CallCreationAPI(ctx *context.CreationContext) *response.BizError {
log.Error("CallCreationAPI not implements")
return response.SYSTEM_ERROR


+ 9
- 2
services/ai_task_service/task/task_extend.go View File

@@ -139,6 +139,12 @@ func getCloudBrainDatasetInfo4Local(uuid string, datasetname string, isNeedDown

//根据实际调度的智算中心修正规格
func correctAITaskSpec(task *models.Cloudbrain) {
defer func() {
if err := recover(); err != nil {
combinedErr := fmt.Errorf("%s\n%s", err, log.Stack(2))
log.Error("PANIC:%v", combinedErr)
}
}()
if task.AiCenter == "" {
return
}
@@ -161,13 +167,14 @@ func correctAITaskSpec(task *models.Cloudbrain) {
log.Error("correctAITaskSpec GetCloudbrainSpecByID spec is empty.taskId=%d ", task.ID)
return
}
if oldSpec.AiCenterCode == realCenterCode {
if oldSpec.AiCenterCode == realCenterCode && oldSpec.QueueCode == task.QueueCode {
return
}
//智算中心不一样时才需要处理
//所属资源池队列不一样时才需要处理
r, err := models.FindSpecs(models.FindSpecsOptions{
SourceSpecId: oldSpec.SourceSpecId,
AiCenterCode: realCenterCode,
QueueCode: task.QueueCode,
})
if err != nil {
log.Error("correctAITaskSpec FindSpecs err.taskId=%d err=%v", task.ID, err)


+ 11
- 44
services/ai_task_service/task/task_service.go View File

@@ -11,7 +11,6 @@ import (
"strings"

"code.gitea.io/gitea/entity"
"code.gitea.io/gitea/manager/client/grampus"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/convert"
"code.gitea.io/gitea/modules/git"
@@ -260,6 +259,7 @@ func UpdateByQueryResponse(res *entity.QueryTaskResponse, task *models.Cloudbrai
if res.CenterId != "" && res.CenterName != "" {
task.AiCenter = res.CenterId + "+" + res.CenterName
}
task.QueueCode = res.QueueCode
oldStatus := task.Status
newStatus := TransAITaskStatus(res.Status)

@@ -733,49 +733,6 @@ func SyncAITaskStatus() {
}
}
}
func GetAvailableCenterIds(specification *models.Specification, opts models.GetAvailableCenterIdOpts, computeSource *models.ComputeSource,
imageId string, clusterType entity.ClusterType) ([]string, *response.BizError) {
centerIds := specification.GetAvailableCenterIds(opts)

if len(centerIds) == 0 || imageId == "" || clusterType != entity.C2Net {
return centerIds, nil
}

processType := computeSource.FullName
images, err := grampus.GetImages(processType, string(opts.JobType))
if err != nil {
log.Warn("can not get image info from grampus", err)
return centerIds, nil
}
var imageCenterIds []string
for _, image := range images.Infos {
if image.ID == imageId {
for _, centerInfo := range image.AICenterImage {
imageCenterIds = append(imageCenterIds, centerInfo.AICenterID)
}
break
}
}
if len(imageCenterIds) == 0 {
return centerIds, nil
}

var intersectionCenterIds []string
for _, centerId := range centerIds {
for _, imageCenterId := range imageCenterIds {
if centerId == imageCenterId {
intersectionCenterIds = append(intersectionCenterIds, centerId)
break
}
}
}
if len(intersectionCenterIds) == 0 {
return intersectionCenterIds, response.NO_CENTER_MATCH
}

return intersectionCenterIds, nil

}

func HandleNoJobIdAITasks() {
defer func() {
@@ -832,6 +789,16 @@ func DelCloudbrain(task *models.Cloudbrain) *response.BizError {
return t.Delete(task.ID)
}

func DelCloudbrains(tasks []*models.Cloudbrain) *response.BizError {
for _, t := range tasks {
err := DelCloudbrain(t)
if err != nil {
log.Error("delete cloudbrain err.id=%d err=%v", t.ID, err)
}
}
return nil
}

func HandleNewAITaskStop(cloudbrainId int64) (result *entity.AITaskBriefInfo, isHandled bool, err error) {
task, err := models.GetCloudbrainByCloudbrainID(cloudbrainId)
if err != nil {


+ 146
- 0
services/card_request/card_request.go View File

@@ -0,0 +1,146 @@
package card_request

import (
"time"

"code.gitea.io/gitea/models"
api "code.gitea.io/gitea/modules/structs"
)

const DATE_LAYOUT = "2006-01-02"

func GetCreationInfo() (map[string][]string, error) {

xpuInfoBase, err := models.GetXPUInfos()

if err != nil {
return nil, err
}

var xpuInfoMap = make(map[string][]string)

for _, xpuInfo := range xpuInfoBase {
if _, ok := xpuInfoMap[xpuInfo.ResourceType]; ok {
xpuInfoMap[xpuInfo.ResourceType] = append(xpuInfoMap[xpuInfo.ResourceType], xpuInfo.CardTypeShow)
} else {
xpuInfoMap[xpuInfo.ResourceType] = []string{xpuInfo.CardTypeShow}
}
}
return xpuInfoMap, nil

}
func AgreeRequest(cardReq api.CardReq) error {
return models.AgreeCardRequest(models.CardRequestReview{
ID: cardReq.ID,
SpecIds: cardReq.SpecIds,
})
}

func DisagreeRequest(cardReq api.CardReq) error {
return models.DisagreeCardRequest(models.CardRequestReview{
ID: cardReq.ID,
Review: cardReq.Review,
})
}

func UpdateCardRequestAdmin(cardReq api.CardReq) error {
request := models.CardRequest{
ID: cardReq.ID,
ComputeResource: cardReq.ComputeResource,
CardType: cardReq.CardType,
AccCardsNum: cardReq.AccCardsNum,
EmailAddress: cardReq.EmailAddress,
DiskCapacity: cardReq.DiskCapacity,
Contact: cardReq.Contact,
PhoneNumber: cardReq.PhoneNumber,
BeginDate: cardReq.BeginDate,
EndDate: cardReq.EndDate,
Description: cardReq.Description,
Org: cardReq.Org,
ResourceType: cardReq.ResourceType,
}
beginTime, err := time.Parse(DATE_LAYOUT, cardReq.BeginDate)
if err != nil {
return err
}
endTime, err := time.Parse(DATE_LAYOUT, cardReq.EndDate)
if err != nil {
return err

}
request.BeginUnix = beginTime.Unix()
request.EndUnix = endTime.Unix()
return models.UpdateCardRequest(&request)
}

func UpdateCardRequest(cardReq api.CardReq, request *models.CardRequest) error {

request.DiskCapacity = cardReq.DiskCapacity
request.Org = cardReq.Org
request.Description = cardReq.Description

if request.Status == models.CARD_REQUEST_COMMIT {

request.ComputeResource = cardReq.ComputeResource
request.CardType = cardReq.CardType
request.AccCardsNum = cardReq.AccCardsNum
request.ResourceType = cardReq.ResourceType
request.BeginDate = cardReq.BeginDate
request.EndDate = cardReq.EndDate

request.Contact = cardReq.Contact
request.EmailAddress = cardReq.EmailAddress
request.PhoneNumber = cardReq.PhoneNumber

beginTime, err := time.Parse(DATE_LAYOUT, cardReq.BeginDate)
if err != nil {
return err
}
endTime, err := time.Parse(DATE_LAYOUT, cardReq.EndDate)
if err != nil {
return err

}
request.BeginUnix = beginTime.Unix()
request.EndUnix = endTime.Unix()

}

return models.UpdateCardRequest(request)

}

func CreateCardRequest(cardReq api.CardReq, uid int64) error {

bean := &models.CardRequest{
UID: uid,
ComputeResource: cardReq.ComputeResource,
CardType: cardReq.CardType,
AccCardsNum: cardReq.AccCardsNum,
EmailAddress: cardReq.EmailAddress,
DiskCapacity: cardReq.DiskCapacity,
Contact: cardReq.Contact,
PhoneNumber: cardReq.PhoneNumber,
BeginDate: cardReq.BeginDate,
EndDate: cardReq.EndDate,
Description: cardReq.Description,
Org: cardReq.Org,
ResourceType: cardReq.ResourceType,
Status: models.CARD_REQUEST_COMMIT,
}

beginTime, err := time.Parse(DATE_LAYOUT, cardReq.BeginDate)
if err != nil {
return err
}
endTime, err := time.Parse(DATE_LAYOUT, cardReq.EndDate)
if err != nil {
return err

}
bean.BeginUnix = beginTime.Unix()
bean.EndUnix = endTime.Unix()

return models.CreateCardRequest(bean)

}

+ 43
- 47
services/cloudbrain/resource/resource_queue.go View File

@@ -5,7 +5,6 @@ import (
"code.gitea.io/gitea/modules/grampus"
"code.gitea.io/gitea/modules/log"
"fmt"
"strings"
)

func AddResourceQueue(req models.ResourceQueueReq) error {
@@ -18,6 +17,8 @@ func AddResourceQueue(req models.ResourceQueueReq) error {
func UpdateResourceQueue(queueId int64, req models.ResourceQueueReq) error {
if _, err := models.UpdateResourceCardsTotalNumAndInternetStatus(queueId, models.ResourceQueue{
CardsTotalNum: req.CardsTotalNum,
QueueType: req.QueueType,
QueueName: req.QueueName,
Remark: req.Remark,
HasInternet: req.HasInternet,
}); err != nil {
@@ -54,8 +55,9 @@ func GetResourceAiCenters() ([]models.ResourceAiCenterRes, error) {
}

func SyncGrampusQueue(doerId int64) error {
r, err := grampus.GetAiCenters(1, 100)
r, err := grampus.GetResourceQueue()
if err != nil {
log.Error("Get grampus resource queue failed.err=%v", err)
return err
}
log.Info("SyncGrampusQueue result = %+v", r)
@@ -63,56 +65,50 @@ func SyncGrampusQueue(doerId int64) error {
queueInsertList := make([]models.ResourceQueue, 0)
existIds := make([]int64, 0)

for _, center := range r.Infos {
for _, device := range center.AccDevices {
computeResource := models.ParseComputeResourceFormGrampus(device.Kind)
accCardType := strings.ToUpper(device.Model)
if computeResource == "" {
continue
}
//Determine if this quque already exists.if exist,update params
//if not exist,insert a new record
oldQueue, err := models.GetResourceQueue(&models.ResourceQueue{
for _, queue := range r {
computeResource := queue.ComputeResource
accCardType := queue.AccCardType
oldQueue, err := models.GetResourceQueue(&models.ResourceQueue{
Cluster: models.C2NetCluster,
AiCenterCode: queue.AiCenterCode,
ComputeResource: computeResource,
AccCardType: accCardType,
QueueCode: queue.QueueCode,
})
if err != nil {
return err
}

hasInternet := queue.HasInternet
if oldQueue == nil {
queueInsertList = append(queueInsertList, models.ResourceQueue{
Cluster: models.C2NetCluster,
AiCenterCode: center.Id,
AiCenterCode: queue.AiCenterCode,
AiCenterName: queue.AiCenterName,
ComputeResource: computeResource,
AccCardType: accCardType,
IsAutomaticSync: true,
HasInternet: hasInternet,
CreatedBy: doerId,
UpdatedBy: doerId,
QueueCode: queue.QueueCode,
QueueName: queue.QueueName,
QueueType: queue.QueueType,
})
} else {
existIds = append(existIds, oldQueue.ID)
queueUpdateList = append(queueUpdateList, models.ResourceQueue{
ID: oldQueue.ID,
ComputeResource: computeResource,
AiCenterName: queue.AiCenterName,
AccCardType: accCardType,
UpdatedBy: doerId,
HasInternet: hasInternet,
QueueName: queue.QueueName,
QueueType: queue.QueueType,
})
if err != nil {
return err
}

var hasInternet int
if center.IsNetAccess {
hasInternet = 2
} else {
hasInternet = 1
}
if oldQueue == nil {
queueInsertList = append(queueInsertList, models.ResourceQueue{
Cluster: models.C2NetCluster,
AiCenterCode: center.Id,
AiCenterName: center.Name,
ComputeResource: computeResource,
AccCardType: accCardType,
IsAutomaticSync: true,
HasInternet: hasInternet,
CreatedBy: doerId,
UpdatedBy: doerId,
})
} else {
existIds = append(existIds, oldQueue.ID)
queueUpdateList = append(queueUpdateList, models.ResourceQueue{
ID: oldQueue.ID,
ComputeResource: computeResource,
AiCenterName: center.Name,
AccCardType: accCardType,
UpdatedBy: doerId,
HasInternet: hasInternet,
})
}

}

}
return models.SyncGrampusQueues(queueUpdateList, queueInsertList, existIds)
}


+ 1
- 0
services/cloudbrain/resource/resource_scene.go View File

@@ -5,6 +5,7 @@ import (
)

func AddResourceScene(req models.ResourceSceneReq) error {

if err := models.InsertResourceScene(req); err != nil {
return err
}


+ 8
- 0
services/cloudbrain/resource/resource_specification.go View File

@@ -255,6 +255,14 @@ func AddSpecOperateLog(doerId int64, operateType string, newValue, oldValue *mod

func FindAvailableSpecs(userId int64, opts models.FindSpecsOptions) ([]*models.Specification, error) {
opts.SpecStatus = models.SpecOnShelf

isUserSpecial := models.IsUserInExclusivePool(userId)
if isUserSpecial {
opts.SceneType = models.SceneTypeExclusive
} else {
opts.SceneType = models.SceneTypePublic
}

r, err := models.FindSpecs(opts)
if err != nil {
log.Error("FindAvailableSpecs error.%v", err)


+ 1
- 1
templates/admin/navbar.tmpl View File

@@ -1,5 +1,5 @@
<div class="ui secondary pointing tabular top attached borderless menu stackable new-menu navbar" style="margin-top:0">
<div class="item-container">
<div class="item-container" style="position:sticky;top:0;">
<a class="{{if .PageIsAdminDashboard}}active{{end}} item" href="{{AppSubUrl}}/admin">
{{.i18n.Tr "admin.dashboard"}}
</a>


+ 19
- 6
templates/base/head_navbar.tmpl View File

@@ -7,7 +7,7 @@
<i class="sidebar icon"></i>
</div>
</div>
<div class="item brand" style="padding-right:1.2rem">
<div class="item brand" style="padding-right:0.6rem">
<a href="/">
<!-- <img class="ui mini image" style="height: 1.3rem;" src="{{StaticUrlPrefix}}/img/git-logo.svg"> -->
<div>
@@ -46,7 +46,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui simple dropdown item" id='dropdown_explore'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -54,7 +62,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
@@ -94,8 +101,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui simple dropdown item" id='dropdown_PageHome'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -103,7 +117,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>


+ 18
- 4
templates/base/head_navbar_fluid.tmpl View File

@@ -43,7 +43,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_explore'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -51,7 +59,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
@@ -89,7 +96,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_PageHome'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -97,7 +112,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>


+ 18
- 4
templates/base/head_navbar_home.tmpl View File

@@ -35,7 +35,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_explore'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -43,7 +51,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
@@ -83,7 +90,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_PageHome'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -91,7 +106,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>


+ 18
- 4
templates/base/head_navbar_pro.tmpl View File

@@ -45,7 +45,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_explore'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -53,7 +61,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
@@ -93,7 +100,15 @@
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
<a class="item" href="{{AppSubUrl}}/extension/mind"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_pengcheng"}}</span></a>
</div>
</div>
</div>
<div class="ui simple dropdown item" >
<span class="menu-new-dot">{{.i18n.Tr "explore.computing_power"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/computingpower/demand"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.computing_resources"}}</span></a>
<a class="item" href="{{AppSubUrl}}/computingpower/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_PageHome'>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
@@ -101,7 +116,6 @@
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
<a class="item" href="{{AppSubUrl}}/explore/organizations">{{.i18n.Tr "explore.organizations"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/images">{{.i18n.Tr "explore.images"}}</a>
<!--<a class="item" href="{{AppSubUrl}}/explore/domestic"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "explore.domestic_computing_power"}}</span></a>-->
{{if .IsOperator}}
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>


+ 8
- 0
templates/computingpower/demand.tmpl View File

@@ -0,0 +1,8 @@
{{template "base/head" .}}
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-computingpower-demand.css?v={{MD5 AppVer}}"/>
{{if .IsOperator}}
<script>window.IS_OPERATOR = true;</script>
{{end}}
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-computingpower-demand.js?v={{MD5 AppVer}}"></script>
{{template "base/footer" .}}

+ 10
- 0
templates/computingpower/domestic.tmpl View File

@@ -0,0 +1,10 @@
{{template "base/head" .}}
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-computingpower-domestic.css?v={{MD5 AppVer}}"/>
{{if .IsOperator}}
<script>window.IS_OPERATOR = true;</script>
{{end}}
<div>
<div id="__vue-root"></div>
</div>
<script src="{{StaticUrlPrefix}}/js/vp-computingpower-domestic.js?v={{MD5 AppVer}}"></script>
{{template "base/footer" .}}

+ 0
- 7
templates/explore/domestic.tmpl View File

@@ -1,7 +0,0 @@
{{template "base/head" .}}
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-explore-domestic.css?v={{MD5 AppVer}}"/>
<div>
<div id="__vue-root"></div>
</div>
<script src="{{StaticUrlPrefix}}/js/vp-explore-domestic.js?v={{MD5 AppVer}}"></script>
{{template "base/footer" .}}

+ 0
- 1
vendor/golang.org/x/crypto/acme/version_go112.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build go1.12
// +build go1.12

package acme



+ 0
- 1
vendor/golang.org/x/crypto/argon2/blamka_amd64.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build amd64 && gc && !purego
// +build amd64,gc,!purego

package argon2



+ 0
- 1
vendor/golang.org/x/crypto/argon2/blamka_amd64.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build amd64 && gc && !purego
// +build amd64,gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/argon2/blamka_ref.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc

package argon2



+ 0
- 1
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build go1.7 && amd64 && gc && !purego
// +build go1.7,amd64,gc,!purego

package blake2b



+ 0
- 1
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build go1.7 && amd64 && gc && !purego
// +build go1.7,amd64,gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !go1.7 && amd64 && gc && !purego
// +build !go1.7,amd64,gc,!purego

package blake2b



+ 0
- 1
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build amd64 && gc && !purego
// +build amd64,gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/blake2b/blake2b_ref.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc

package blake2b



+ 0
- 1
vendor/golang.org/x/crypto/blake2b/register.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build go1.9
// +build go1.9

package blake2b



+ 1
- 2
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go View File

@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build go1.11 && gc && !purego
// +build go1.11,gc,!purego
//go:build gc && !purego

package chacha20



+ 1
- 2
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s View File

@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build go1.11 && gc && !purego
// +build go1.11,gc,!purego
//go:build gc && !purego

#include "textflag.h"



+ 1
- 2
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go View File

@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego
//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego

package chacha20



+ 0
- 1
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

package chacha20



+ 0
- 1
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s View File

@@ -20,7 +20,6 @@
// due to the calling conventions and initialization of constants.

//go:build gc && !purego
// +build gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

package chacha20



+ 0
- 1
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

#include "go_asm.h"
#include "textflag.h"


+ 0
- 1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go View File

@@ -1,7 +1,6 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.

//go:build amd64 && gc && !purego
// +build amd64,gc,!purego

package field



+ 0
- 1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s View File

@@ -1,7 +1,6 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.

//go:build amd64 && gc && !purego
// +build amd64,gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !amd64 || !gc || purego
// +build !amd64 !gc purego

package field



+ 0
- 1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build arm64 && gc && !purego
// +build arm64,gc,!purego

package field



+ 0
- 1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build arm64 && gc && !purego
// +build arm64,gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !arm64 || !gc || purego
// +build !arm64 !gc purego

package field



+ 0
- 71
vendor/golang.org/x/crypto/ed25519/ed25519.go View File

@@ -1,71 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Beginning with Go 1.13, the functionality of this package was moved to the
// standard library as crypto/ed25519. This package only acts as a compatibility
// wrapper.
package ed25519

import (
"crypto/ed25519"
"io"
)

const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)

// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey

// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey

// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}

// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}

// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}

// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

+ 3
- 1
vendor/golang.org/x/crypto/hkdf/hkdf.go View File

@@ -56,7 +56,9 @@ func (f *hkdf) Read(p []byte) (int, error) {

// Fill the rest of the buffer
for len(p) > 0 {
f.expander.Reset()
if f.counter > 1 {
f.expander.Reset()
}
f.expander.Write(f.prev)
f.expander.Write(f.info)
f.expander.Write([]byte{f.counter})


+ 0
- 1
vendor/golang.org/x/crypto/internal/alias/alias.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !purego
// +build !purego

// Package alias implements memory aliasing tests.
package alias


+ 0
- 1
vendor/golang.org/x/crypto/internal/alias/alias_purego.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build purego
// +build purego

// Package alias implements memory aliasing tests.
package alias


+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !go1.13
// +build !go1.13

package poly1305



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build go1.13
// +build go1.13

package poly1305



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
// +build !amd64,!ppc64le,!s390x !gc purego

package poly1305



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

package poly1305



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

package poly1305



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

package poly1305



+ 0
- 1
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

#include "textflag.h"



+ 0
- 1
vendor/golang.org/x/crypto/sha3/hashes_generic.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !gc || purego || !s390x
// +build !gc purego !s390x

package sha3



+ 0
- 1
vendor/golang.org/x/crypto/sha3/keccakf.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc

package sha3



+ 0
- 1
vendor/golang.org/x/crypto/sha3/keccakf_amd64.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build amd64 && !purego && gc
// +build amd64,!purego,gc

package sha3



+ 0
- 1
vendor/golang.org/x/crypto/sha3/keccakf_amd64.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build amd64 && !purego && gc
// +build amd64,!purego,gc

// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage


+ 0
- 1
vendor/golang.org/x/crypto/sha3/register.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build go1.4
// +build go1.4

package sha3



+ 9
- 5
vendor/golang.org/x/crypto/sha3/sha3.go View File

@@ -121,11 +121,11 @@ func (d *state) padAndPermute(dsbyte byte) {
copyOut(d, d.buf)
}

// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
// Write absorbs more data into the hash's state. It panics if any
// output has already been read.
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
panic("sha3: Write after Read")
}
if d.buf == nil {
d.buf = d.storage.asBytes()[:0]
@@ -182,12 +182,16 @@ func (d *state) Read(out []byte) (n int, err error) {
}

// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
// number of output bytes. It panics if any output has already been read.
func (d *state) Sum(in []byte) []byte {
if d.state != spongeAbsorbing {
panic("sha3: Sum after Read")
}

// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen)
hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
dup.Read(hash)
return append(in, hash...)
}

+ 6
- 5
vendor/golang.org/x/crypto/sha3/sha3_s390x.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

package sha3

@@ -49,7 +48,7 @@ type asmState struct {
buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
rate int // equivalent to block size
storage [3072]byte // underlying storage for buf
outputLen int // output length if fixed, 0 if not
outputLen int // output length for full security
function code // KIMD/KLMD function code
state spongeDirection // whether the sponge is absorbing or squeezing
}
@@ -72,8 +71,10 @@ func newAsmState(function code) *asmState {
s.outputLen = 64
case shake_128:
s.rate = 168
s.outputLen = 32
case shake_256:
s.rate = 136
s.outputLen = 64
default:
panic("sha3: unrecognized function code")
}
@@ -108,7 +109,7 @@ func (s *asmState) resetBuf() {
// It never returns an error.
func (s *asmState) Write(b []byte) (int, error) {
if s.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
panic("sha3: Write after Read")
}
length := len(b)
for len(b) > 0 {
@@ -192,8 +193,8 @@ func (s *asmState) Read(out []byte) (n int, err error) {
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (s *asmState) Sum(b []byte) []byte {
if s.outputLen == 0 {
panic("sha3: cannot call Sum on SHAKE functions")
if s.state != spongeAbsorbing {
panic("sha3: Sum after Read")
}

// Copy the state to preserve the original.


+ 0
- 1
vendor/golang.org/x/crypto/sha3/sha3_s390x.s View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build gc && !purego
// +build gc,!purego

#include "textflag.h"



+ 14
- 15
vendor/golang.org/x/crypto/sha3/shake.go View File

@@ -17,26 +17,25 @@ package sha3

import (
"encoding/binary"
"hash"
"io"
)

// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
// ShakeHash defines the interface to hash functions that support
// arbitrary-length output. When used as a plain [hash.Hash], it
// produces minimum-length outputs that provide full-strength generic
// security.
type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io.Writer
hash.Hash

// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
// It never returns an error, but subsequent calls to Write or Sum
// will panic.
io.Reader

// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash

// Reset resets the ShakeHash to its initial state.
Reset()
}

// cSHAKE specific context
@@ -81,8 +80,8 @@ func leftEncode(value uint64) []byte {
return b[i-1:]
}

func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash {
c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}}
func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}

// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
@@ -119,7 +118,7 @@ func NewShake128() ShakeHash {
if h := newShake128Asm(); h != nil {
return h
}
return &state{rate: rate128, dsbyte: dsbyteShake}
return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake}
}

// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
@@ -129,7 +128,7 @@ func NewShake256() ShakeHash {
if h := newShake256Asm(); h != nil {
return h
}
return &state{rate: rate256, dsbyte: dsbyteShake}
return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake}
}

// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
@@ -142,7 +141,7 @@ func NewCShake128(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake128()
}
return newCShake(N, S, rate128, dsbyteCShake)
return newCShake(N, S, rate128, 32, dsbyteCShake)
}

// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
@@ -155,7 +154,7 @@ func NewCShake256(N, S []byte) ShakeHash {
if len(N) == 0 && len(S) == 0 {
return NewShake256()
}
return newCShake(N, S, rate256, dsbyteCShake)
return newCShake(N, S, rate256, 64, dsbyteCShake)
}

// ShakeSum128 writes an arbitrary-length digest of data into hash.


+ 0
- 1
vendor/golang.org/x/crypto/sha3/shake_generic.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build !gc || purego || !s390x
// +build !gc purego !s390x

package sha3



+ 0
- 1
vendor/golang.org/x/crypto/sha3/xor.go View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.

//go:build (!amd64 && !386 && !ppc64le) || purego
// +build !amd64,!386,!ppc64le purego

package sha3



+ 0
- 2
vendor/golang.org/x/crypto/sha3/xor_unaligned.go View File

@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.

//go:build (amd64 || 386 || ppc64le) && !purego
// +build amd64 386 ppc64le
// +build !purego

package sha3



Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save