encapsulate object storage service operations

This commit is contained in:
2025-01-17 18:42:36 +08:00
parent e31f95b943
commit eab806fb9b
78 changed files with 4178 additions and 5275 deletions

3817
.idea/GOHCache.xml generated

File diff suppressed because it is too large Load Diff

3
app/ai/rpc/generate.go Normal file
View File

@@ -0,0 +1,3 @@
package rpc
//go:generate goctl rpc protoc ai.proto --go_out=. --go-grpc_out=. --zrpc_out=. --client=true -m --style=go_zero

View File

@@ -414,6 +414,40 @@ type (
)
service auth {
@handler uploadImage
post /upload (UploadRequest)
post /phone/upload (UploadRequest)
}
// 文件上传配置请求参数
type (
StorageConfigRequest {
Type string `json:"type"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
Region string `json:"region"`
}
)
// 文件上传
@server (
group: storage // 微服务分组
prefix: /api/auth/storage // 微服务前缀
timeout: 10s // 超时时间
maxBytes: 104857600 // 最大请求大小
signature: false // 是否开启签名验证
middleware: SecurityHeadersMiddleware,CasbinVerifyMiddleware,AuthorizationMiddleware,NonceMiddleware // 注册中间件
MaxConns: true // 是否开启最大连接数限制
Recover: true // 是否开启自动恢复
jwt: Auth // 是否开启jwt验证
)
service auth {
// 上传文件
@handler uploadFile
post /uploads returns (string)
// 设置存储配置
@handler setStorageConfig
post /config (StorageConfigRequest) returns (string)
}

View File

@@ -96,6 +96,8 @@ Encrypt:
Key: p3380puliiep184buh8d5dvujeerqtem
# 向量 (16)
IV: spb7er04k2vz3dtk
PublicKey: api/etc/rsa_public_key.pem
PrivateKey: api/etc/rsa_private_key.pem
# Redis 配置
Redis:
# Redis 地址

View File

@@ -0,0 +1,29 @@
package file
import (
"net/http"
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
"github.com/zeromicro/go-zero/rest/httpx"
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
"schisandra-album-cloud-microservices/common/xhttp"
)
func SetStorageConfigHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.StorageConfigRequest
if err := httpx.Parse(r, &req); err != nil {
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
return
}
l := storage.NewSetStorageConfigLogic(r.Context(), svcCtx)
resp, err := l.SetStorageConfig(&req)
if err != nil {
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
} else {
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
}
}
}

View File

@@ -0,0 +1,21 @@
package file
import (
"net/http"
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
"schisandra-album-cloud-microservices/common/xhttp"
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
)
func UploadFileHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
l := storage.NewUploadFileLogic(r.Context(), svcCtx)
resp, err := l.UploadFile(r)
if err != nil {
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
} else {
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
}
}
}

View File

@@ -12,6 +12,7 @@ import (
comment "schisandra-album-cloud-microservices/app/auth/api/internal/handler/comment"
oauth "schisandra-album-cloud-microservices/app/auth/api/internal/handler/oauth"
sms "schisandra-album-cloud-microservices/app/auth/api/internal/handler/sms"
storage "schisandra-album-cloud-microservices/app/auth/api/internal/handler/storage"
token "schisandra-album-cloud-microservices/app/auth/api/internal/handler/token"
upscale "schisandra-album-cloud-microservices/app/auth/api/internal/handler/upscale"
user "schisandra-album-cloud-microservices/app/auth/api/internal/handler/user"
@@ -183,6 +184,28 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
rest.WithMaxBytes(1048576),
)
server.AddRoutes(
rest.WithMiddlewares(
[]rest.Middleware{serverCtx.SecurityHeadersMiddleware, serverCtx.CasbinVerifyMiddleware, serverCtx.AuthorizationMiddleware, serverCtx.NonceMiddleware},
[]rest.Route{
{
Method: http.MethodPost,
Path: "/config",
Handler: storage.SetStorageConfigHandler(serverCtx),
},
{
Method: http.MethodPost,
Path: "/uploads",
Handler: storage.UploadFileHandler(serverCtx),
},
}...,
),
rest.WithJwt(serverCtx.Config.Auth.AccessSecret),
rest.WithPrefix("/api/auth/storage"),
rest.WithTimeout(10000*time.Millisecond),
rest.WithMaxBytes(104857600),
)
server.AddRoutes(
rest.WithMiddlewares(
[]rest.Middleware{serverCtx.SecurityHeadersMiddleware, serverCtx.CasbinVerifyMiddleware, serverCtx.NonceMiddleware},
@@ -206,7 +229,7 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
[]rest.Route{
{
Method: http.MethodPost,
Path: "/upload",
Path: "/phone/upload",
Handler: upscale.UploadImageHandler(serverCtx),
},
}...,

View File

@@ -0,0 +1,29 @@
package storage
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
"schisandra-album-cloud-microservices/common/xhttp"
)
func SetStorageConfigHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.StorageConfigRequest
if err := httpx.Parse(r, &req); err != nil {
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
return
}
l := storage.NewSetStorageConfigLogic(r.Context(), svcCtx)
resp, err := l.SetStorageConfig(&req)
if err != nil {
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
} else {
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
}
}
}

View File

@@ -0,0 +1,21 @@
package storage
import (
"net/http"
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
"schisandra-album-cloud-microservices/common/xhttp"
)
func UploadFileHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
l := storage.NewUploadFileLogic(r.Context(), svcCtx)
resp, err := l.UploadFile(r)
if err != nil {
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
} else {
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
}
}
}

View File

@@ -97,11 +97,11 @@ func (l *WechatOffiaccountCallbackLogic) WechatOffiaccountCallback(r *http.Reque
// SendMessage 发送消息到客户端
func (l *WechatOffiaccountCallbackLogic) SendMessage(openId string, clientId string) error {
encryptClientId, err := encrypt.Encrypt(clientId, l.svcCtx.Config.Encrypt.Key, l.svcCtx.Config.Encrypt.IV)
encryptClientId, err := encrypt.Encrypt(clientId, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return err
}
encryptOpenId, err := encrypt.Encrypt(openId, l.svcCtx.Config.Encrypt.Key, l.svcCtx.Config.Encrypt.IV)
encryptOpenId, err := encrypt.Encrypt(openId, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return err
}

View File

@@ -0,0 +1,55 @@
package storage
import (
"context"
"errors"
"github.com/zeromicro/go-zero/core/logx"
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"schisandra-album-cloud-microservices/common/encrypt"
)
type SetStorageConfigLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewSetStorageConfigLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SetStorageConfigLogic {
return &SetStorageConfigLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *SetStorageConfigLogic) SetStorageConfig(req *types.StorageConfigRequest) (resp string, err error) {
uid, ok := l.ctx.Value("user_id").(string)
if !ok {
return "", errors.New("user_id not found")
}
accessKey, err := encrypt.Encrypt(req.AccessKey, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return "", err
}
secretKey, err := encrypt.Encrypt(req.SecretKey, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return "", err
}
ossConfig := &model.ScaStorageConfig{
UserID: uid,
Type: req.Type,
Endpoint: req.Endpoint,
Bucket: req.Bucket,
AccessKey: accessKey,
SecretKey: secretKey,
Region: req.Region,
}
err = l.svcCtx.DB.ScaStorageConfig.Create(ossConfig)
if err != nil {
return "", err
}
return "success", nil
}

View File

@@ -0,0 +1,75 @@
package storage
import (
"context"
"errors"
"github.com/zeromicro/go-zero/core/logx"
"net/http"
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
"schisandra-album-cloud-microservices/common/encrypt"
"schisandra-album-cloud-microservices/common/storage/config"
)
type UploadFileLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewUploadFileLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UploadFileLogic {
return &UploadFileLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UploadFileLogic) UploadFile(r *http.Request) (resp string, err error) {
uid, ok := l.ctx.Value("user_id").(string)
if !ok {
return "", errors.New("user_id not found")
}
file, header, err := r.FormFile("file")
if err != nil {
return "", errors.New("file not found")
}
defer file.Close()
//formValue := r.PostFormValue("result")
//
//var result types.File
//err = json.Unmarshal([]byte(formValue), &result)
//if err != nil {
// return "", errors.New("invalid result")
//}
//fmt.Println(result)
ossConfig := l.svcCtx.DB.ScaStorageConfig
dbConfig, err := ossConfig.Where(ossConfig.UserID.Eq(uid)).First()
if err != nil {
return "", errors.New("oss config not found")
}
accessKey, err := encrypt.Decrypt(dbConfig.AccessKey, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return "", errors.New("decrypt access key failed")
}
secretKey, err := encrypt.Decrypt(dbConfig.SecretKey, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return "", errors.New("decrypt secret key failed")
}
storageConfig := &config.StorageConfig{
Provider: dbConfig.Type,
Endpoint: dbConfig.Endpoint,
AccessKey: accessKey,
SecretKey: secretKey,
BucketName: dbConfig.Bucket,
Region: dbConfig.Region,
}
service, err := l.svcCtx.StorageManager.GetStorage(uid, storageConfig)
if err != nil {
return "", errors.New("get storage failed")
}
result, err := service.UploadFileSimple(l.ctx, dbConfig.Bucket, header.Filename, file, map[string]string{})
if err != nil {
return "", errors.New("upload file failed")
}
return *result.ContentMD5, nil
}

View File

@@ -36,7 +36,7 @@ func NewWechatOffiaccountLoginLogic(ctx context.Context, svcCtx *svc.ServiceCont
}
func (l *WechatOffiaccountLoginLogic) WechatOffiaccountLogin(r *http.Request, req *types.WechatOffiaccountLoginRequest) (resp *types.LoginResponse, err error) {
decryptedClientId, err := encrypt.Decrypt(req.ClientId, l.svcCtx.Config.Encrypt.Key, l.svcCtx.Config.Encrypt.IV)
decryptedClientId, err := encrypt.Decrypt(req.ClientId, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return nil, err
}
@@ -44,7 +44,7 @@ func (l *WechatOffiaccountLoginLogic) WechatOffiaccountLogin(r *http.Request, re
if clientId == "" {
return nil, errors2.New(http.StatusUnauthorized, i18n.FormatText(l.ctx, "login.loginFailed"))
}
Openid, err := encrypt.Decrypt(req.Openid, l.svcCtx.Config.Encrypt.Key, l.svcCtx.Config.Encrypt.IV)
Openid, err := encrypt.Decrypt(req.Openid, l.svcCtx.Config.Encrypt.Key)
if err != nil {
return nil, err
}

View File

@@ -20,6 +20,8 @@ import (
"schisandra-album-cloud-microservices/common/ip2region"
"schisandra-album-cloud-microservices/common/redisx"
"schisandra-album-cloud-microservices/common/sensitivex"
"schisandra-album-cloud-microservices/common/storage"
storage2 "schisandra-album-cloud-microservices/common/storage/manager"
"schisandra-album-cloud-microservices/common/wechat_official"
)
@@ -38,6 +40,7 @@ type ServiceContext struct {
RotateCaptcha rotate.Captcha
SlideCaptcha slide.Captcha
Sensitive *sensitive.Manager
StorageManager *storage2.Manager
}
func NewServiceContext(c config.Config) *ServiceContext {
@@ -59,5 +62,6 @@ func NewServiceContext(c config.Config) *ServiceContext {
SlideCaptcha: initialize.NewSlideCaptcha(),
MongoClient: mongodb.NewMongoDB(c.Mongo.Uri, c.Mongo.Username, c.Mongo.Password, c.Mongo.AuthSource, c.Mongo.Database),
Sensitive: sensitivex.NewSensitive(),
StorageManager: storage.InitStorageManager(),
}
}

View File

@@ -0,0 +1,14 @@
package types
// File represents a file uploaded by the user.
type File struct {
UID string `json:"uid"`
FileName string `json:"fileName"`
FileType string `json:"fileType"`
DetectionResult struct {
IsAnime bool `json:"isAnime"`
HasFace bool `json:"hasFace"`
ObjectArray []string `json:"objectArray"`
Landscape string `json:"landscape"`
}
}

View File

@@ -174,6 +174,15 @@ type SmsSendRequest struct {
Key string `json:"key"`
}
type StorageConfigRequest struct {
Type string `json:"type"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
Region string `json:"region"`
}
type UploadRequest struct {
Image string `json:"image"`
AccessToken string `json:"access_token"`

View File

@@ -1,6 +1,6 @@
# to build this docker image:
# docker build -f Dockerfile -t schisandra-cloud-album-server .
# docker build --build-arg OPENCV_VERSION="4.x" --build-arg OPENCV_FILE="https://github.com/opencv/opencv/archive/refs/heads/4.x.zip" --build-arg OPENCV_CONTRIB_FILE="https://github.com/opencv/opencv_contrib/archive/refs/heads/4.x.zip" -f Dockerfile -t schisandra-cloud-album-server .
# docker build -f opencv.Dockerfile -t schisandra-cloud-album-server .
# docker build --build-arg OPENCV_VERSION="4.x" --build-arg OPENCV_FILE="https://github.com/opencv/opencv/archive/refs/heads/4.x.zip" --build-arg OPENCV_CONTRIB_FILE="https://github.com/opencv/opencv_contrib/archive/refs/heads/4.x.zip" -f opencv.Dockerfile -t schisandra-cloud-album-server .
FROM ubuntu:20.04 AS opencv-builder

View File

@@ -11,7 +11,7 @@ import (
"gorm.io/gorm"
)
const MySQLDSN = "root:1611@(localhost:3306)/schisandra-cloud-album?charset=utf8mb4&parseTime=True&loc=Local"
const MySQLDSN = "root:LDQ20020618xxx@tcp(1.95.0.111:3306)/schisandra-cloud-album?charset=utf8mb4&parseTime=True&loc=Local"
func main() {
@@ -25,7 +25,7 @@ func main() {
if err != nil {
panic(err)
}
path := filepath.Join(dir, "app/auth/api/repository/mysql/", "query")
path := filepath.Join(dir, "app/auth/model/mysql/", "query")
// 生成实例
g := gen.NewGenerator(gen.Config{
// 相对执行`go run`时的路径, 会自动创建目录

View File

@@ -14,19 +14,19 @@ const TableNameScaAuthMenu = "sca_auth_menu"
// ScaAuthMenu mapped from table <sca_auth_menu>
type ScaAuthMenu struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
MenuName string `gorm:"column:menu_name;type:varchar(64);comment:名称" json:"menu_name"` // 名称
ParentID int64 `gorm:"column:parent_id;type:bigint;comment:父ID" json:"parent_id"` // 父ID
Type int64 `gorm:"column:type;type:tinyint;comment:类型" json:"type"` // 类型
Path string `gorm:"column:path;type:varchar(30);comment:路径" json:"path"` // 路径
Status int64 `gorm:"column:status;type:tinyint;comment:状态 0 启用 1 停用" json:"status"` // 状态 0 启用 1 停用
Icon string `gorm:"column:icon;type:varchar(128);comment:图标" json:"icon"` // 图标
MenuKey string `gorm:"column:menu_key;type:varchar(64);comment:关键字" json:"menu_key"` // 关键字
Order_ int64 `gorm:"column:order;type:int;comment:排序" json:"order"` // 排序
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
Remark string `gorm:"column:remark;type:varchar(255);comment:备注 描述" json:"remark"` // 备注 描述
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
MenuName string `gorm:"column:menu_name;type:varchar(64);comment:名称" json:"menu_name"` // 名称
ParentID int64 `gorm:"column:parent_id;type:bigint(20);comment:父ID" json:"parent_id"` // 父ID
Type int64 `gorm:"column:type;type:tinyint(4);comment:类型" json:"type"` // 类型
Path string `gorm:"column:path;type:varchar(30);comment:路径" json:"path"` // 路径
Status int64 `gorm:"column:status;type:tinyint(4);comment:状态 0 启用 1 停用" json:"status"` // 状态 0 启用 1 停用
Icon string `gorm:"column:icon;type:varchar(128);comment:图标" json:"icon"` // 图标
MenuKey string `gorm:"column:menu_key;type:varchar(64);comment:关键字" json:"menu_key"` // 关键字
Order_ int64 `gorm:"column:order;type:bigint(20);comment:排序" json:"order"` // 排序
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
Remark string `gorm:"column:remark;type:varchar(255);comment:备注 描述" json:"remark"` // 备注 描述
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaAuthMenu's table name

View File

@@ -8,7 +8,7 @@ const TableNameScaAuthPermissionRule = "sca_auth_permission_rule"
// ScaAuthPermissionRule mapped from table <sca_auth_permission_rule>
type ScaAuthPermissionRule struct {
ID int64 `gorm:"column:id;type:int;primaryKey;autoIncrement:true;primary_key" json:"id,string"`
ID int64 `gorm:"column:id;type:int(11);primaryKey;autoIncrement:true;primary_key" json:"id,string"`
Ptype string `gorm:"column:ptype;type:varchar(100);uniqueIndex:idx_sca_auth_permission_rule,priority:1;index:IDX_sca_auth_permission_rule_ptype,priority:1" json:"ptype"`
V0 string `gorm:"column:v0;type:varchar(100);uniqueIndex:idx_sca_auth_permission_rule,priority:2;index:IDX_sca_auth_permission_rule_v0,priority:1" json:"v0"`
V1 string `gorm:"column:v1;type:varchar(100);uniqueIndex:idx_sca_auth_permission_rule,priority:3;index:IDX_sca_auth_permission_rule_v1,priority:1" json:"v1"`

View File

@@ -14,12 +14,12 @@ const TableNameScaAuthRole = "sca_auth_role"
// ScaAuthRole mapped from table <sca_auth_role>
type ScaAuthRole struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
RoleName string `gorm:"column:role_name;type:varchar(32);not null;comment:角色名称" json:"role_name"` // 角色名称
RoleKey string `gorm:"column:role_key;type:varchar(64);not null;comment:角色关键字" json:"role_key"` // 角色关键字
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
RoleName string `gorm:"column:role_name;type:varchar(32);not null;comment:角色名称" json:"role_name"` // 角色名称
RoleKey string `gorm:"column:role_key;type:varchar(64);not null;comment:角色关键字" json:"role_key"` // 角色关键字
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaAuthRole's table name

View File

@@ -14,23 +14,23 @@ const TableNameScaAuthUser = "sca_auth_user"
// ScaAuthUser mapped from table <sca_auth_user>
type ScaAuthUser struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:自增ID;primary_key" json:"id,string"` // 自增ID
UID string `gorm:"column:uid;type:varchar(50);not null;uniqueIndex:uid,priority:1;comment:唯一ID" json:"uid"` // 唯一ID
Username string `gorm:"column:username;type:varchar(32);comment:用户名" json:"username"` // 用户名
Nickname string `gorm:"column:nickname;type:varchar(32);comment:昵称" json:"nickname"` // 昵称
Email string `gorm:"column:email;type:varchar(32);comment:邮箱" json:"email"` // 邮箱
Phone string `gorm:"column:phone;type:varchar(32);comment:电话" json:"phone"` // 电话
Password string `gorm:"column:password;type:varchar(64);comment:密码" json:"password"` // 密码
Gender int64 `gorm:"column:gender;type:tinyint;comment:性别" json:"gender"` // 性别
Avatar string `gorm:"column:avatar;type:longtext;comment:头像" json:"avatar"` // 头像
Status int64 `gorm:"column:status;type:tinyint;comment:状态 0 正常 1 封禁" json:"status"` // 状态 0 正常 1 封禁
Introduce string `gorm:"column:introduce;type:varchar(255);comment:介绍" json:"introduce"` // 介绍
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
Blog string `gorm:"column:blog;type:varchar(30);comment:博客" json:"blog"` // 博客
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
Company string `gorm:"column:company;type:varchar(50);comment:公司" json:"company"` // 公司
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:自增ID;primary_key" json:"id,string"` // 自增ID
UID string `gorm:"column:uid;type:varchar(50);not null;uniqueIndex:uid,priority:1;comment:唯一ID" json:"uid"` // 唯一ID
Username string `gorm:"column:username;type:varchar(32);comment:用户名" json:"username"` // 用户名
Nickname string `gorm:"column:nickname;type:varchar(32);comment:昵称" json:"nickname"` // 昵称
Email string `gorm:"column:email;type:varchar(32);comment:邮箱" json:"email"` // 邮箱
Phone string `gorm:"column:phone;type:varchar(32);comment:电话" json:"phone"` // 电话
Password string `gorm:"column:password;type:varchar(64);comment:密码" json:"password"` // 密码
Gender int64 `gorm:"column:gender;type:tinyint(4);comment:性别" json:"gender"` // 性别
Avatar string `gorm:"column:avatar;type:longtext;comment:头像" json:"avatar"` // 头像
Status int64 `gorm:"column:status;type:tinyint(4);comment:状态 0 正常 1 封禁" json:"status"` // 状态 0 正常 1 封禁
Introduce string `gorm:"column:introduce;type:varchar(255);comment:介绍" json:"introduce"` // 介绍
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
Blog string `gorm:"column:blog;type:varchar(30);comment:博客" json:"blog"` // 博客
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
Company string `gorm:"column:company;type:varchar(50);comment:公司" json:"company"` // 公司
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaAuthUser's table name

View File

@@ -14,23 +14,23 @@ const TableNameScaAuthUserDevice = "sca_auth_user_device"
// ScaAuthUserDevice mapped from table <sca_auth_user_device>
type ScaAuthUserDevice struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
UserID string `gorm:"column:user_id;type:varchar(20);not null;comment:用户ID" json:"user_id"` // 用户ID
IP string `gorm:"column:ip;type:varchar(20);comment:登录IP" json:"ip"` // 登录IP
Location string `gorm:"column:location;type:varchar(20);comment:地址" json:"location"` // 地址
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
Browser string `gorm:"column:browser;type:varchar(20);comment:浏览器" json:"browser"` // 浏览器
OperatingSystem string `gorm:"column:operating_system;type:varchar(20);comment:操作系统" json:"operating_system"` // 操作系统
BrowserVersion string `gorm:"column:browser_version;type:varchar(20);comment:浏览器版本" json:"browser_version"` // 浏览器版本
Mobile int64 `gorm:"column:mobile;type:tinyint(1);comment:是否为手机 0否1是" json:"mobile"` // 是否为手机 0否1是
Bot int64 `gorm:"column:bot;type:tinyint(1);comment:是否为bot 0否1是" json:"bot"` // 是否为bot 0否1是
Mozilla string `gorm:"column:mozilla;type:varchar(10);comment:火狐版本" json:"mozilla"` // 火狐版本
Platform string `gorm:"column:platform;type:varchar(20);comment:平台" json:"platform"` // 平台
EngineName string `gorm:"column:engine_name;type:varchar(20);comment:引擎名称" json:"engine_name"` // 引擎名称
EngineVersion string `gorm:"column:engine_version;type:varchar(20);comment:引擎版本" json:"engine_version"` // 引擎版本
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
UserID string `gorm:"column:user_id;type:varchar(20);not null;comment:用户ID" json:"user_id"` // 用户ID
IP string `gorm:"column:ip;type:varchar(20);comment:登录IP" json:"ip"` // 登录IP
Location string `gorm:"column:location;type:varchar(20);comment:地址" json:"location"` // 地址
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
Browser string `gorm:"column:browser;type:varchar(20);comment:浏览器" json:"browser"` // 浏览器
OperatingSystem string `gorm:"column:operating_system;type:varchar(20);comment:操作系统" json:"operating_system"` // 操作系统
BrowserVersion string `gorm:"column:browser_version;type:varchar(20);comment:浏览器版本" json:"browser_version"` // 浏览器版本
Mobile int64 `gorm:"column:mobile;type:tinyint(1);comment:是否为手机 0否1是" json:"mobile"` // 是否为手机 0否1是
Bot int64 `gorm:"column:bot;type:tinyint(1);comment:是否为bot 0否1是" json:"bot"` // 是否为bot 0否1是
Mozilla string `gorm:"column:mozilla;type:varchar(10);comment:火狐版本" json:"mozilla"` // 火狐版本
Platform string `gorm:"column:platform;type:varchar(20);comment:平台" json:"platform"` // 平台
EngineName string `gorm:"column:engine_name;type:varchar(20);comment:引擎名称" json:"engine_name"` // 引擎名称
EngineVersion string `gorm:"column:engine_version;type:varchar(20);comment:引擎版本" json:"engine_version"` // 引擎版本
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaAuthUserDevice's table name

View File

@@ -14,14 +14,14 @@ const TableNameScaAuthUserSocial = "sca_auth_user_social"
// ScaAuthUserSocial mapped from table <sca_auth_user_social>
type ScaAuthUserSocial struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
OpenID string `gorm:"column:open_id;type:varchar(50);not null;comment:第三方用户的 open id" json:"open_id"` // 第三方用户的 open id
Source string `gorm:"column:source;type:varchar(10);comment:第三方用户来源" json:"source"` // 第三方用户来源
Status int64 `gorm:"column:status;type:bigint;comment:状态 0正常 1 封禁" json:"status"` // 状态 0正常 1 封禁
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
OpenID string `gorm:"column:open_id;type:varchar(50);not null;comment:第三方用户的 open id" json:"open_id"` // 第三方用户的 open id
Source string `gorm:"column:source;type:varchar(10);comment:第三方用户来源" json:"source"` // 第三方用户来源
Status int64 `gorm:"column:status;type:bigint(20);comment:状态 0正常 1 封禁" json:"status"` // 状态 0正常 1 封禁
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaAuthUserSocial's table name

View File

@@ -12,11 +12,11 @@ const TableNameScaCommentLike = "sca_comment_likes"
// ScaCommentLike mapped from table <sca_comment_likes>
type ScaCommentLike struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键id;primary_key" json:"id,string"` // 主键id
TopicID string `gorm:"column:topic_id;type:varchar(50);not null;comment:话题ID" json:"topic_id"` // 话题ID
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
CommentID int64 `gorm:"column:comment_id;type:bigint;not null;comment:评论ID" json:"comment_id"` // 评论ID
LikeTime time.Time `gorm:"column:like_time;type:timestamp;comment:点赞时间" json:"like_time"` // 点赞时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键id;primary_key" json:"id,string"` // 主键id
TopicID string `gorm:"column:topic_id;type:varchar(50);not null;comment:话题ID" json:"topic_id"` // 话题ID
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
CommentID int64 `gorm:"column:comment_id;type:bigint(20);not null;comment:评论ID" json:"comment_id"` // 评论ID
LikeTime time.Time `gorm:"column:like_time;type:timestamp;comment:点赞时间" json:"like_time"` // 点赞时间
}
// TableName ScaCommentLike's table name

View File

@@ -15,27 +15,27 @@ const TableNameScaCommentReply = "sca_comment_reply"
// ScaCommentReply mapped from table <sca_comment_reply>
type ScaCommentReply struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:主键id;primary_key" json:"id,string"` // 主键id
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:评论用户id" json:"user_id"` // 评论用户id
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:评论话题id" json:"topic_id"` // 评论话题id
TopicType int64 `gorm:"column:topic_type;type:tinyint;comment:话题类型" json:"topic_type"` // 话题类型
Content string `gorm:"column:content;type:text;comment:评论内容" json:"content"` // 评论内容
CommentType int64 `gorm:"column:comment_type;type:bigint;comment:评论类型 0评论 1 回复" json:"comment_type"` // 评论类型 0评论 1 回复
ReplyTo int64 `gorm:"column:reply_to;type:bigint;comment:回复子评论ID" json:"reply_to"` // 回复子评论ID
ReplyID int64 `gorm:"column:reply_id;type:bigint;comment:回复父评论Id" json:"reply_id"` // 回复父评论Id
ReplyUser string `gorm:"column:reply_user;type:varchar(50);comment:回复人id" json:"reply_user"` // 回复人id
Author int64 `gorm:"column:author;type:tinyint;comment:评论回复是否作者 0否 1是" json:"author"` // 评论回复是否作者 0否 1是
Likes int64 `gorm:"column:likes;type:bigint;comment:点赞数" json:"likes"` // 点赞数
ReplyCount int64 `gorm:"column:reply_count;type:bigint;comment:回复数量" json:"reply_count"` // 回复数量
Browser string `gorm:"column:browser;type:varchar(50);comment:浏览器" json:"browser"` // 浏览器
OperatingSystem string `gorm:"column:operating_system;type:varchar(50);comment:操作系统" json:"operating_system"` // 操作系统
CommentIP string `gorm:"column:comment_ip;type:varchar(50);comment:IP地址" json:"comment_ip"` // IP地址
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
Version optimisticlock.Version `gorm:"column:version;type:bigint;comment:版本" json:"version"` // 版本
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:主键id;primary_key" json:"id,string"` // 主键id
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:评论用户id" json:"user_id"` // 评论用户id
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:评论话题id" json:"topic_id"` // 评论话题id
TopicType int64 `gorm:"column:topic_type;type:tinyint(4);comment:话题类型" json:"topic_type"` // 话题类型
Content string `gorm:"column:content;type:text;comment:评论内容" json:"content"` // 评论内容
CommentType int64 `gorm:"column:comment_type;type:bigint(20);comment:评论类型 0评论 1 回复" json:"comment_type"` // 评论类型 0评论 1 回复
ReplyTo int64 `gorm:"column:reply_to;type:bigint(20);comment:回复子评论ID" json:"reply_to"` // 回复子评论ID
ReplyID int64 `gorm:"column:reply_id;type:bigint(20);comment:回复父评论Id" json:"reply_id"` // 回复父评论Id
ReplyUser string `gorm:"column:reply_user;type:varchar(50);comment:回复人id" json:"reply_user"` // 回复人id
Author int64 `gorm:"column:author;type:tinyint(4);comment:评论回复是否作者 0否 1是" json:"author"` // 评论回复是否作者 0否 1是
Likes int64 `gorm:"column:likes;type:bigint(20);comment:点赞数" json:"likes"` // 点赞数
ReplyCount int64 `gorm:"column:reply_count;type:bigint(20);comment:回复数量" json:"reply_count"` // 回复数量
Browser string `gorm:"column:browser;type:varchar(50);comment:浏览器" json:"browser"` // 浏览器
OperatingSystem string `gorm:"column:operating_system;type:varchar(50);comment:操作系统" json:"operating_system"` // 操作系统
CommentIP string `gorm:"column:comment_ip;type:varchar(50);comment:IP地址" json:"comment_ip"` // IP地址
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
Version optimisticlock.Version `gorm:"column:version;type:bigint(20);comment:版本" json:"version"` // 版本
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaCommentReply's table name

View File

@@ -1,31 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaFileFolder = "sca_file_folder"
// ScaFileFolder mapped from table <sca_file_folder>
type ScaFileFolder struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
FolderName string `gorm:"column:folder_name;type:varchar(512);comment:文件夹名称" json:"folder_name"` // 文件夹名称
ParentFolderID int64 `gorm:"column:parent_folder_id;type:bigint;comment:父文件夹编号" json:"parent_folder_id"` // 父文件夹编号
FolderAddr string `gorm:"column:folder_addr;type:varchar(1024);comment:文件夹名称" json:"folder_addr"` // 文件夹名称
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户编号" json:"user_id"` // 用户编号
FolderSource int64 `gorm:"column:folder_source;type:int;comment:文件夹来源 0相册 1 评论" json:"folder_source"` // 文件夹来源 0相册 1 评论
CreatedAt *time.Time `gorm:"column:created_at;type:datetime;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt *time.Time `gorm:"column:updated_at;type:datetime;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaFileFolder's table name
func (*ScaFileFolder) TableName() string {
return TableNameScaFileFolder
}

View File

@@ -1,34 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaFileInfo = "sca_file_info"
// ScaFileInfo mapped from table <sca_file_info>
type ScaFileInfo struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
FileName string `gorm:"column:file_name;type:varchar(50);comment:文件名" json:"file_name"` // 文件名
FileSize float64 `gorm:"column:file_size;type:double;comment:文件大小" json:"file_size"` // 文件大小
FileTypeID int64 `gorm:"column:file_type_id;type:bigint;comment:文件类型编号" json:"file_type_id"` // 文件类型编号
UploadTime time.Time `gorm:"column:upload_time;type:datetime;comment:上传时间" json:"upload_time"` // 上传时间
FolderID int64 `gorm:"column:folder_id;type:bigint;comment:文件夹编号" json:"folder_id"` // 文件夹编号
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户编号" json:"user_id"` // 用户编号
FileSource int64 `gorm:"column:file_source;type:int;comment:文件来源 0 相册 1 评论" json:"file_source"` // 文件来源 0 相册 1 评论
Status int64 `gorm:"column:status;type:int;comment:文件状态" json:"status"` // 文件状态
CreatedAt *time.Time `gorm:"column:created_at;type:datetime;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt *time.Time `gorm:"column:updated_at;type:datetime;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaFileInfo's table name
func (*ScaFileInfo) TableName() string {
return TableNameScaFileInfo
}

View File

@@ -1,28 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"gorm.io/gorm"
)
const TableNameScaFileRecycle = "sca_file_recycle"
// ScaFileRecycle mapped from table <sca_file_recycle>
type ScaFileRecycle struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
FileID int64 `gorm:"column:file_id;type:bigint;comment:文件编号" json:"file_id"` // 文件编号
FolderID int64 `gorm:"column:folder_id;type:bigint;comment:文件夹编号" json:"folder_id"` // 文件夹编号
Type int64 `gorm:"column:type;type:int;comment:类型 0 文件 1 文件夹" json:"type"` // 类型 0 文件 1 文件夹
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户编号" json:"user_id"` // 用户编号
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
OriginalPath string `gorm:"column:original_path;type:varchar(1024);comment:原始路径" json:"original_path"` // 原始路径
FileSource int64 `gorm:"column:file_source;type:int;comment:文件来源 0 相册 1 评论" json:"file_source"` // 文件来源 0 相册 1 评论
}
// TableName ScaFileRecycle's table name
func (*ScaFileRecycle) TableName() string {
return TableNameScaFileRecycle
}

View File

@@ -1,29 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaFileType = "sca_file_type"
// ScaFileType mapped from table <sca_file_type>
type ScaFileType struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
TypeName string `gorm:"column:type_name;type:varchar(100);comment:类型名称" json:"type_name"` // 类型名称
MimeType string `gorm:"column:mime_type;type:varchar(50);comment:MIME 类型" json:"mime_type"` // MIME 类型
Status int64 `gorm:"column:status;type:int;comment:类型状态" json:"status"` // 类型状态
CreatedAt *time.Time `gorm:"column:created_at;type:datetime;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt *time.Time `gorm:"column:updated_at;type:datetime;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaFileType's table name
func (*ScaFileType) TableName() string {
return TableNameScaFileType
}

View File

@@ -14,15 +14,15 @@ const TableNameScaMessageReport = "sca_message_report"
// ScaMessageReport mapped from table <sca_message_report>
type ScaMessageReport struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户Id" json:"user_id"` // 用户Id
Type int64 `gorm:"column:type;type:tinyint;comment:举报类型 0评论 1 相册" json:"type"` // 举报类型 0评论 1 相册
CommentID int64 `gorm:"column:comment_id;type:bigint;comment:评论Id" json:"comment_id"` // 评论Id
Type int64 `gorm:"column:type;type:tinyint(4);comment:举报类型 0评论 1 相册" json:"type"` // 举报类型 0评论 1 相册
CommentID int64 `gorm:"column:comment_id;type:bigint(20);comment:评论Id" json:"comment_id"` // 评论Id
TopicID string `gorm:"column:topic_id;type:varchar(20);comment:话题Id" json:"topic_id"` // 话题Id
ReportType int64 `gorm:"column:report_type;type:tinyint;comment:举报" json:"report_type"` // 举报
ReportType int64 `gorm:"column:report_type;type:tinyint(4);comment:举报" json:"report_type"` // 举报
ReportContent string `gorm:"column:report_content;type:text;comment:举报说明内容" json:"report_content"` // 举报说明内容
ReportTag string `gorm:"column:report_tag;type:varchar(255);comment:举报标签" json:"report_tag"` // 举报标签
Status int64 `gorm:"column:status;type:tinyint;comment:状态0 未处理 1 已处理)" json:"status"` // 状态0 未处理 1 已处理)
Status int64 `gorm:"column:status;type:tinyint(4);comment:状态0 未处理 1 已处理)" json:"status"` // 状态0 未处理 1 已处理)
CreatedAt *time.Time `gorm:"column:created_at;type:timestamp;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt *time.Time `gorm:"column:updated_at;type:timestamp;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间

View File

@@ -0,0 +1,33 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaStorageConfig = "sca_storage_config"
// ScaStorageConfig mapped from table <sca_storage_config>
type ScaStorageConfig struct {
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
Type string `gorm:"column:type;type:varchar(50);comment:类型" json:"type"` // 类型
Endpoint string `gorm:"column:endpoint;type:varchar(50);comment:地址" json:"endpoint"` // 地址
AccessKey string `gorm:"column:access_key;type:varchar(100);comment:密钥key" json:"access_key"` // 密钥key
SecretKey string `gorm:"column:secret_key;type:varchar(100);comment:密钥" json:"secret_key"` // 密钥
Bucket string `gorm:"column:bucket;type:varchar(50);comment:存储桶" json:"bucket"` // 存储桶
Region string `gorm:"column:region;type:varchar(50);comment:地域" json:"region"` // 地域
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaStorageConfig's table name
func (*ScaStorageConfig) TableName() string {
return TableNameScaStorageConfig
}

View File

@@ -0,0 +1,40 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaStorageInfo = "sca_storage_info"
// ScaStorageInfo mapped from table <sca_storage_info>
type ScaStorageInfo struct {
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
Storage string `gorm:"column:storage;type:varchar(50);comment:存储空间" json:"storage"` // 存储空间
Bucket string `gorm:"column:bucket;type:varchar(50);comment:存储桶" json:"bucket"` // 存储桶
Type string `gorm:"column:type;type:varchar(50);comment:类型" json:"type"` // 类型
Path string `gorm:"column:path;type:varchar(255);comment:路径" json:"path"` // 路径
FileName string `gorm:"column:file_name;type:varchar(100);comment:名称" json:"file_name"` // 名称
Category string `gorm:"column:category;type:varchar(50);comment:分类" json:"category"` // 分类
Loaction string `gorm:"column:loaction;type:varchar(100);comment:地址" json:"loaction"` // 地址
Hash string `gorm:"column:hash;type:varchar(255);comment:哈希值" json:"hash"` // 哈希值
Anime string `gorm:"column:anime;type:varchar(50);comment:是否是动漫图片" json:"anime"` // 是否是动漫图片
HasFace string `gorm:"column:has_face;type:varchar(50);comment:是否人像" json:"has_face"` // 是否人像
FaceID int64 `gorm:"column:face_id;type:bigint(20);comment:人像ID" json:"face_id"` // 人像ID
Landscape string `gorm:"column:landscape;type:varchar(50);comment:风景类型" json:"landscape"` // 风景类型
Objects string `gorm:"column:objects;type:varchar(50);comment:对象识别" json:"objects"` // 对象识别
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaStorageInfo's table name
func (*ScaStorageInfo) TableName() string {
return TableNameScaStorageInfo
}

View File

@@ -0,0 +1,28 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaStorageTag = "sca_storage_tag"
// ScaStorageTag mapped from table <sca_storage_tag>
type ScaStorageTag struct {
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
FileID int64 `gorm:"column:file_id;type:bigint(20);comment:文件ID" json:"file_id"` // 文件ID
TagID int64 `gorm:"column:tag_id;type:bigint(20);comment:标签ID" json:"tag_id"` // 标签ID
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaStorageTag's table name
func (*ScaStorageTag) TableName() string {
return TableNameScaStorageTag
}

View File

@@ -0,0 +1,28 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"time"
"gorm.io/gorm"
)
const TableNameScaStorageTagInfo = "sca_storage_tag_info"
// ScaStorageTagInfo mapped from table <sca_storage_tag_info>
type ScaStorageTagInfo struct {
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
TagName string `gorm:"column:tag_name;type:varchar(50);not null;comment:标签名称" json:"tag_name"` // 标签名称
TagKey string `gorm:"column:tag_key;type:varchar(50);comment:标签关键字" json:"tag_key"` // 标签关键字
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaStorageTagInfo's table name
func (*ScaStorageTagInfo) TableName() string {
return TableNameScaStorageTagInfo
}

View File

@@ -16,10 +16,10 @@ const TableNameScaUserFollow = "sca_user_follows"
type ScaUserFollow struct {
FollowerID string `gorm:"column:follower_id;type:varchar(50);not null;comment:关注者" json:"follower_id"` // 关注者
FolloweeID string `gorm:"column:followee_id;type:varchar(50);not null;comment:被关注者" json:"followee_id"` // 被关注者
Status int64 `gorm:"column:status;type:tinyint unsigned;not null;comment:关注状态0 未互关 1 互关)" json:"status"` // 关注状态0 未互关 1 互关)
Status int64 `gorm:"column:status;type:tinyint(3) unsigned;not null;comment:关注状态0 未互关 1 互关)" json:"status"` // 关注状态0 未互关 1 互关)
CreatedAt *time.Time `gorm:"column:created_at;type:timestamp;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt *time.Time `gorm:"column:updated_at;type:timestamp;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;primary_key" json:"id,string"`
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;primary_key" json:"id,string"`
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}

View File

@@ -14,13 +14,13 @@ const TableNameScaUserLevel = "sca_user_level"
// ScaUserLevel mapped from table <sca_user_level>
type ScaUserLevel struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;comment:主键;primary_key" json:"id,string"` // 主键
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;comment:主键;primary_key" json:"id,string"` // 主键
UserID string `gorm:"column:user_id;type:varchar(50);comment:用户Id" json:"user_id"` // 用户Id
LevelType int64 `gorm:"column:level_type;type:tinyint unsigned;comment:等级类型" json:"level_type"` // 等级类型
Level int64 `gorm:"column:level;type:int;comment:等级" json:"level"` // 等级
LevelType int64 `gorm:"column:level_type;type:tinyint(3) unsigned;comment:等级类型" json:"level_type"` // 等级类型
Level int64 `gorm:"column:level;type:bigint(20);comment:等级" json:"level"` // 等级
LevelName string `gorm:"column:level_name;type:varchar(50);comment:等级名称" json:"level_name"` // 等级名称
ExpStart int64 `gorm:"column:exp_start;type:bigint;comment:开始经验值" json:"exp_start"` // 开始经验值
ExpEnd int64 `gorm:"column:exp_end;type:bigint;comment:结束经验值" json:"exp_end"` // 结束经验值
ExpStart int64 `gorm:"column:exp_start;type:bigint(20);comment:开始经验值" json:"exp_start"` // 开始经验值
ExpEnd int64 `gorm:"column:exp_end;type:bigint(20);comment:结束经验值" json:"exp_end"` // 结束经验值
Description string `gorm:"column:description;type:text;comment:等级描述" json:"description"` // 等级描述
CreatedAt *time.Time `gorm:"column:created_at;type:timestamp;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt *time.Time `gorm:"column:updated_at;type:timestamp;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间

View File

@@ -14,15 +14,15 @@ const TableNameScaUserMessage = "sca_user_message"
// ScaUserMessage mapped from table <sca_user_message>
type ScaUserMessage struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:话题Id" json:"topic_id"` // 话题Id
FromID string `gorm:"column:from_id;type:varchar(50);comment:来自人" json:"from_id"` // 来自人
ToID string `gorm:"column:to_id;type:varchar(50);comment:送达人" json:"to_id"` // 送达人
Content string `gorm:"column:content;type:text;comment:消息内容" json:"content"` // 消息内容
IsRead int64 `gorm:"column:is_read;type:tinyint;comment:是否已读" json:"is_read"` // 是否已读
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:话题Id" json:"topic_id"` // 话题Id
FromID string `gorm:"column:from_id;type:varchar(50);comment:来自人" json:"from_id"` // 来自人
ToID string `gorm:"column:to_id;type:varchar(50);comment:送达人" json:"to_id"` // 送达人
Content string `gorm:"column:content;type:text;comment:消息内容" json:"content"` // 消息内容
IsRead int64 `gorm:"column:is_read;type:tinyint(4);comment:是否已读" json:"is_read"` // 是否已读
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
}
// TableName ScaUserMessage's table name

View File

@@ -3,6 +3,7 @@ package mysql
import (
"log"
"os"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"schisandra-album-cloud-microservices/app/auth/model/mysql/query"
"time"
@@ -39,6 +40,8 @@ func NewMySQL(url string, maxOpenConn int, maxIdleConn int, client *redis.Client
sqlDB.SetMaxOpenConns(maxOpenConn)
sqlDB.SetMaxIdleConns(maxIdleConn)
useDB := query.Use(db)
// migrate
Migrate(db)
// cache
gormCache, err := cache.NewGorm2Cache(&config.CacheConfig{
CacheLevel: config.CacheLevelAll,
@@ -63,3 +66,26 @@ func NewMySQL(url string, maxOpenConn int, maxIdleConn int, client *redis.Client
return db, useDB
}
func Migrate(db *gorm.DB) {
err := db.AutoMigrate(
&model.ScaAuthUser{},
&model.ScaAuthRole{},
&model.ScaAuthPermissionRule{},
&model.ScaAuthMenu{},
&model.ScaAuthUserDevice{},
&model.ScaAuthUserSocial{},
&model.ScaCommentLike{},
&model.ScaCommentReply{},
&model.ScaStorageInfo{},
&model.ScaStorageTag{},
&model.ScaStorageTagInfo{},
&model.ScaMessageReport{},
&model.ScaStorageConfig{},
&model.ScaUserFollow{},
&model.ScaUserLevel{},
&model.ScaUserMessage{})
if err != nil {
panic(err)
}
}

View File

@@ -25,11 +25,11 @@ var (
ScaAuthUserSocial *scaAuthUserSocial
ScaCommentLike *scaCommentLike
ScaCommentReply *scaCommentReply
ScaFileFolder *scaFileFolder
ScaFileInfo *scaFileInfo
ScaFileRecycle *scaFileRecycle
ScaFileType *scaFileType
ScaMessageReport *scaMessageReport
ScaStorageConfig *scaStorageConfig
ScaStorageInfo *scaStorageInfo
ScaStorageTag *scaStorageTag
ScaStorageTagInfo *scaStorageTagInfo
ScaUserFollow *scaUserFollow
ScaUserLevel *scaUserLevel
ScaUserMessage *scaUserMessage
@@ -45,11 +45,11 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) {
ScaAuthUserSocial = &Q.ScaAuthUserSocial
ScaCommentLike = &Q.ScaCommentLike
ScaCommentReply = &Q.ScaCommentReply
ScaFileFolder = &Q.ScaFileFolder
ScaFileInfo = &Q.ScaFileInfo
ScaFileRecycle = &Q.ScaFileRecycle
ScaFileType = &Q.ScaFileType
ScaMessageReport = &Q.ScaMessageReport
ScaStorageConfig = &Q.ScaStorageConfig
ScaStorageInfo = &Q.ScaStorageInfo
ScaStorageTag = &Q.ScaStorageTag
ScaStorageTagInfo = &Q.ScaStorageTagInfo
ScaUserFollow = &Q.ScaUserFollow
ScaUserLevel = &Q.ScaUserLevel
ScaUserMessage = &Q.ScaUserMessage
@@ -66,11 +66,11 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query {
ScaAuthUserSocial: newScaAuthUserSocial(db, opts...),
ScaCommentLike: newScaCommentLike(db, opts...),
ScaCommentReply: newScaCommentReply(db, opts...),
ScaFileFolder: newScaFileFolder(db, opts...),
ScaFileInfo: newScaFileInfo(db, opts...),
ScaFileRecycle: newScaFileRecycle(db, opts...),
ScaFileType: newScaFileType(db, opts...),
ScaMessageReport: newScaMessageReport(db, opts...),
ScaStorageConfig: newScaStorageConfig(db, opts...),
ScaStorageInfo: newScaStorageInfo(db, opts...),
ScaStorageTag: newScaStorageTag(db, opts...),
ScaStorageTagInfo: newScaStorageTagInfo(db, opts...),
ScaUserFollow: newScaUserFollow(db, opts...),
ScaUserLevel: newScaUserLevel(db, opts...),
ScaUserMessage: newScaUserMessage(db, opts...),
@@ -88,11 +88,11 @@ type Query struct {
ScaAuthUserSocial scaAuthUserSocial
ScaCommentLike scaCommentLike
ScaCommentReply scaCommentReply
ScaFileFolder scaFileFolder
ScaFileInfo scaFileInfo
ScaFileRecycle scaFileRecycle
ScaFileType scaFileType
ScaMessageReport scaMessageReport
ScaStorageConfig scaStorageConfig
ScaStorageInfo scaStorageInfo
ScaStorageTag scaStorageTag
ScaStorageTagInfo scaStorageTagInfo
ScaUserFollow scaUserFollow
ScaUserLevel scaUserLevel
ScaUserMessage scaUserMessage
@@ -111,11 +111,11 @@ func (q *Query) clone(db *gorm.DB) *Query {
ScaAuthUserSocial: q.ScaAuthUserSocial.clone(db),
ScaCommentLike: q.ScaCommentLike.clone(db),
ScaCommentReply: q.ScaCommentReply.clone(db),
ScaFileFolder: q.ScaFileFolder.clone(db),
ScaFileInfo: q.ScaFileInfo.clone(db),
ScaFileRecycle: q.ScaFileRecycle.clone(db),
ScaFileType: q.ScaFileType.clone(db),
ScaMessageReport: q.ScaMessageReport.clone(db),
ScaStorageConfig: q.ScaStorageConfig.clone(db),
ScaStorageInfo: q.ScaStorageInfo.clone(db),
ScaStorageTag: q.ScaStorageTag.clone(db),
ScaStorageTagInfo: q.ScaStorageTagInfo.clone(db),
ScaUserFollow: q.ScaUserFollow.clone(db),
ScaUserLevel: q.ScaUserLevel.clone(db),
ScaUserMessage: q.ScaUserMessage.clone(db),
@@ -141,11 +141,11 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query {
ScaAuthUserSocial: q.ScaAuthUserSocial.replaceDB(db),
ScaCommentLike: q.ScaCommentLike.replaceDB(db),
ScaCommentReply: q.ScaCommentReply.replaceDB(db),
ScaFileFolder: q.ScaFileFolder.replaceDB(db),
ScaFileInfo: q.ScaFileInfo.replaceDB(db),
ScaFileRecycle: q.ScaFileRecycle.replaceDB(db),
ScaFileType: q.ScaFileType.replaceDB(db),
ScaMessageReport: q.ScaMessageReport.replaceDB(db),
ScaStorageConfig: q.ScaStorageConfig.replaceDB(db),
ScaStorageInfo: q.ScaStorageInfo.replaceDB(db),
ScaStorageTag: q.ScaStorageTag.replaceDB(db),
ScaStorageTagInfo: q.ScaStorageTagInfo.replaceDB(db),
ScaUserFollow: q.ScaUserFollow.replaceDB(db),
ScaUserLevel: q.ScaUserLevel.replaceDB(db),
ScaUserMessage: q.ScaUserMessage.replaceDB(db),
@@ -161,11 +161,11 @@ type queryCtx struct {
ScaAuthUserSocial IScaAuthUserSocialDo
ScaCommentLike IScaCommentLikeDo
ScaCommentReply IScaCommentReplyDo
ScaFileFolder IScaFileFolderDo
ScaFileInfo IScaFileInfoDo
ScaFileRecycle IScaFileRecycleDo
ScaFileType IScaFileTypeDo
ScaMessageReport IScaMessageReportDo
ScaStorageConfig IScaStorageConfigDo
ScaStorageInfo IScaStorageInfoDo
ScaStorageTag IScaStorageTagDo
ScaStorageTagInfo IScaStorageTagInfoDo
ScaUserFollow IScaUserFollowDo
ScaUserLevel IScaUserLevelDo
ScaUserMessage IScaUserMessageDo
@@ -181,11 +181,11 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx {
ScaAuthUserSocial: q.ScaAuthUserSocial.WithContext(ctx),
ScaCommentLike: q.ScaCommentLike.WithContext(ctx),
ScaCommentReply: q.ScaCommentReply.WithContext(ctx),
ScaFileFolder: q.ScaFileFolder.WithContext(ctx),
ScaFileInfo: q.ScaFileInfo.WithContext(ctx),
ScaFileRecycle: q.ScaFileRecycle.WithContext(ctx),
ScaFileType: q.ScaFileType.WithContext(ctx),
ScaMessageReport: q.ScaMessageReport.WithContext(ctx),
ScaStorageConfig: q.ScaStorageConfig.WithContext(ctx),
ScaStorageInfo: q.ScaStorageInfo.WithContext(ctx),
ScaStorageTag: q.ScaStorageTag.WithContext(ctx),
ScaStorageTagInfo: q.ScaStorageTagInfo.WithContext(ctx),
ScaUserFollow: q.ScaUserFollow.WithContext(ctx),
ScaUserLevel: q.ScaUserLevel.WithContext(ctx),
ScaUserMessage: q.ScaUserMessage.WithContext(ctx),

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaAuthMenu(db *gorm.DB, opts ...gen.DOOption) scaAuthMenu {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaAuthPermissionRule(db *gorm.DB, opts ...gen.DOOption) scaAuthPermissionRule {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaAuthRole(db *gorm.DB, opts ...gen.DOOption) scaAuthRole {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaAuthUser(db *gorm.DB, opts ...gen.DOOption) scaAuthUser {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaAuthUserDevice(db *gorm.DB, opts ...gen.DOOption) scaAuthUserDevice {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaAuthUserSocial(db *gorm.DB, opts ...gen.DOOption) scaAuthUserSocial {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaCommentLike(db *gorm.DB, opts ...gen.DOOption) scaCommentLike {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaCommentReply(db *gorm.DB, opts ...gen.DOOption) scaCommentReply {

View File

@@ -1,410 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
)
func newScaFileFolder(db *gorm.DB, opts ...gen.DOOption) scaFileFolder {
_scaFileFolder := scaFileFolder{}
_scaFileFolder.scaFileFolderDo.UseDB(db, opts...)
_scaFileFolder.scaFileFolderDo.UseModel(&model.ScaFileFolder{})
tableName := _scaFileFolder.scaFileFolderDo.TableName()
_scaFileFolder.ALL = field.NewAsterisk(tableName)
_scaFileFolder.ID = field.NewInt64(tableName, "id")
_scaFileFolder.FolderName = field.NewString(tableName, "folder_name")
_scaFileFolder.ParentFolderID = field.NewInt64(tableName, "parent_folder_id")
_scaFileFolder.FolderAddr = field.NewString(tableName, "folder_addr")
_scaFileFolder.UserID = field.NewString(tableName, "user_id")
_scaFileFolder.FolderSource = field.NewInt64(tableName, "folder_source")
_scaFileFolder.CreatedAt = field.NewTime(tableName, "created_at")
_scaFileFolder.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaFileFolder.DeletedAt = field.NewField(tableName, "deleted_at")
_scaFileFolder.fillFieldMap()
return _scaFileFolder
}
type scaFileFolder struct {
scaFileFolderDo
ALL field.Asterisk
ID field.Int64 // 主键
FolderName field.String // 文件夹名称
ParentFolderID field.Int64 // 父文件夹编号
FolderAddr field.String // 文件夹名称
UserID field.String // 用户编号
FolderSource field.Int64 // 文件夹来源 0相册 1 评论
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaFileFolder) Table(newTableName string) *scaFileFolder {
s.scaFileFolderDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaFileFolder) As(alias string) *scaFileFolder {
s.scaFileFolderDo.DO = *(s.scaFileFolderDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaFileFolder) updateTableName(table string) *scaFileFolder {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.FolderName = field.NewString(table, "folder_name")
s.ParentFolderID = field.NewInt64(table, "parent_folder_id")
s.FolderAddr = field.NewString(table, "folder_addr")
s.UserID = field.NewString(table, "user_id")
s.FolderSource = field.NewInt64(table, "folder_source")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaFileFolder) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaFileFolder) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 9)
s.fieldMap["id"] = s.ID
s.fieldMap["folder_name"] = s.FolderName
s.fieldMap["parent_folder_id"] = s.ParentFolderID
s.fieldMap["folder_addr"] = s.FolderAddr
s.fieldMap["user_id"] = s.UserID
s.fieldMap["folder_source"] = s.FolderSource
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaFileFolder) clone(db *gorm.DB) scaFileFolder {
s.scaFileFolderDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaFileFolder) replaceDB(db *gorm.DB) scaFileFolder {
s.scaFileFolderDo.ReplaceDB(db)
return s
}
type scaFileFolderDo struct{ gen.DO }
type IScaFileFolderDo interface {
gen.SubQuery
Debug() IScaFileFolderDo
WithContext(ctx context.Context) IScaFileFolderDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaFileFolderDo
WriteDB() IScaFileFolderDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaFileFolderDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaFileFolderDo
Not(conds ...gen.Condition) IScaFileFolderDo
Or(conds ...gen.Condition) IScaFileFolderDo
Select(conds ...field.Expr) IScaFileFolderDo
Where(conds ...gen.Condition) IScaFileFolderDo
Order(conds ...field.Expr) IScaFileFolderDo
Distinct(cols ...field.Expr) IScaFileFolderDo
Omit(cols ...field.Expr) IScaFileFolderDo
Join(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
Group(cols ...field.Expr) IScaFileFolderDo
Having(conds ...gen.Condition) IScaFileFolderDo
Limit(limit int) IScaFileFolderDo
Offset(offset int) IScaFileFolderDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileFolderDo
Unscoped() IScaFileFolderDo
Create(values ...*model.ScaFileFolder) error
CreateInBatches(values []*model.ScaFileFolder, batchSize int) error
Save(values ...*model.ScaFileFolder) error
First() (*model.ScaFileFolder, error)
Take() (*model.ScaFileFolder, error)
Last() (*model.ScaFileFolder, error)
Find() ([]*model.ScaFileFolder, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileFolder, err error)
FindInBatches(result *[]*model.ScaFileFolder, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaFileFolder) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaFileFolderDo
Assign(attrs ...field.AssignExpr) IScaFileFolderDo
Joins(fields ...field.RelationField) IScaFileFolderDo
Preload(fields ...field.RelationField) IScaFileFolderDo
FirstOrInit() (*model.ScaFileFolder, error)
FirstOrCreate() (*model.ScaFileFolder, error)
FindByPage(offset int, limit int) (result []*model.ScaFileFolder, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaFileFolderDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaFileFolderDo) Debug() IScaFileFolderDo {
return s.withDO(s.DO.Debug())
}
func (s scaFileFolderDo) WithContext(ctx context.Context) IScaFileFolderDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaFileFolderDo) ReadDB() IScaFileFolderDo {
return s.Clauses(dbresolver.Read)
}
func (s scaFileFolderDo) WriteDB() IScaFileFolderDo {
return s.Clauses(dbresolver.Write)
}
func (s scaFileFolderDo) Session(config *gorm.Session) IScaFileFolderDo {
return s.withDO(s.DO.Session(config))
}
func (s scaFileFolderDo) Clauses(conds ...clause.Expression) IScaFileFolderDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaFileFolderDo) Returning(value interface{}, columns ...string) IScaFileFolderDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaFileFolderDo) Not(conds ...gen.Condition) IScaFileFolderDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaFileFolderDo) Or(conds ...gen.Condition) IScaFileFolderDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaFileFolderDo) Select(conds ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaFileFolderDo) Where(conds ...gen.Condition) IScaFileFolderDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaFileFolderDo) Order(conds ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaFileFolderDo) Distinct(cols ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaFileFolderDo) Omit(cols ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaFileFolderDo) Join(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaFileFolderDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaFileFolderDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaFileFolderDo) Group(cols ...field.Expr) IScaFileFolderDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaFileFolderDo) Having(conds ...gen.Condition) IScaFileFolderDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaFileFolderDo) Limit(limit int) IScaFileFolderDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaFileFolderDo) Offset(offset int) IScaFileFolderDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaFileFolderDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileFolderDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaFileFolderDo) Unscoped() IScaFileFolderDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaFileFolderDo) Create(values ...*model.ScaFileFolder) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaFileFolderDo) CreateInBatches(values []*model.ScaFileFolder, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaFileFolderDo) Save(values ...*model.ScaFileFolder) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaFileFolderDo) First() (*model.ScaFileFolder, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileFolder), nil
}
}
func (s scaFileFolderDo) Take() (*model.ScaFileFolder, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileFolder), nil
}
}
func (s scaFileFolderDo) Last() (*model.ScaFileFolder, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileFolder), nil
}
}
func (s scaFileFolderDo) Find() ([]*model.ScaFileFolder, error) {
result, err := s.DO.Find()
return result.([]*model.ScaFileFolder), err
}
func (s scaFileFolderDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileFolder, err error) {
buf := make([]*model.ScaFileFolder, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaFileFolderDo) FindInBatches(result *[]*model.ScaFileFolder, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaFileFolderDo) Attrs(attrs ...field.AssignExpr) IScaFileFolderDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaFileFolderDo) Assign(attrs ...field.AssignExpr) IScaFileFolderDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaFileFolderDo) Joins(fields ...field.RelationField) IScaFileFolderDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaFileFolderDo) Preload(fields ...field.RelationField) IScaFileFolderDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaFileFolderDo) FirstOrInit() (*model.ScaFileFolder, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileFolder), nil
}
}
func (s scaFileFolderDo) FirstOrCreate() (*model.ScaFileFolder, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileFolder), nil
}
}
func (s scaFileFolderDo) FindByPage(offset int, limit int) (result []*model.ScaFileFolder, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaFileFolderDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaFileFolderDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaFileFolderDo) Delete(models ...*model.ScaFileFolder) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaFileFolderDo) withDO(do gen.Dao) *scaFileFolderDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -1,422 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
)
func newScaFileInfo(db *gorm.DB, opts ...gen.DOOption) scaFileInfo {
_scaFileInfo := scaFileInfo{}
_scaFileInfo.scaFileInfoDo.UseDB(db, opts...)
_scaFileInfo.scaFileInfoDo.UseModel(&model.ScaFileInfo{})
tableName := _scaFileInfo.scaFileInfoDo.TableName()
_scaFileInfo.ALL = field.NewAsterisk(tableName)
_scaFileInfo.ID = field.NewInt64(tableName, "id")
_scaFileInfo.FileName = field.NewString(tableName, "file_name")
_scaFileInfo.FileSize = field.NewFloat64(tableName, "file_size")
_scaFileInfo.FileTypeID = field.NewInt64(tableName, "file_type_id")
_scaFileInfo.UploadTime = field.NewTime(tableName, "upload_time")
_scaFileInfo.FolderID = field.NewInt64(tableName, "folder_id")
_scaFileInfo.UserID = field.NewString(tableName, "user_id")
_scaFileInfo.FileSource = field.NewInt64(tableName, "file_source")
_scaFileInfo.Status = field.NewInt64(tableName, "status")
_scaFileInfo.CreatedAt = field.NewTime(tableName, "created_at")
_scaFileInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaFileInfo.DeletedAt = field.NewField(tableName, "deleted_at")
_scaFileInfo.fillFieldMap()
return _scaFileInfo
}
type scaFileInfo struct {
scaFileInfoDo
ALL field.Asterisk
ID field.Int64 // 主键
FileName field.String // 文件名
FileSize field.Float64 // 文件大小
FileTypeID field.Int64 // 文件类型编号
UploadTime field.Time // 上传时间
FolderID field.Int64 // 文件夹编号
UserID field.String // 用户编号
FileSource field.Int64 // 文件来源 0 相册 1 评论
Status field.Int64 // 文件状态
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaFileInfo) Table(newTableName string) *scaFileInfo {
s.scaFileInfoDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaFileInfo) As(alias string) *scaFileInfo {
s.scaFileInfoDo.DO = *(s.scaFileInfoDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaFileInfo) updateTableName(table string) *scaFileInfo {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.FileName = field.NewString(table, "file_name")
s.FileSize = field.NewFloat64(table, "file_size")
s.FileTypeID = field.NewInt64(table, "file_type_id")
s.UploadTime = field.NewTime(table, "upload_time")
s.FolderID = field.NewInt64(table, "folder_id")
s.UserID = field.NewString(table, "user_id")
s.FileSource = field.NewInt64(table, "file_source")
s.Status = field.NewInt64(table, "status")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaFileInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaFileInfo) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 12)
s.fieldMap["id"] = s.ID
s.fieldMap["file_name"] = s.FileName
s.fieldMap["file_size"] = s.FileSize
s.fieldMap["file_type_id"] = s.FileTypeID
s.fieldMap["upload_time"] = s.UploadTime
s.fieldMap["folder_id"] = s.FolderID
s.fieldMap["user_id"] = s.UserID
s.fieldMap["file_source"] = s.FileSource
s.fieldMap["status"] = s.Status
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaFileInfo) clone(db *gorm.DB) scaFileInfo {
s.scaFileInfoDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaFileInfo) replaceDB(db *gorm.DB) scaFileInfo {
s.scaFileInfoDo.ReplaceDB(db)
return s
}
type scaFileInfoDo struct{ gen.DO }
type IScaFileInfoDo interface {
gen.SubQuery
Debug() IScaFileInfoDo
WithContext(ctx context.Context) IScaFileInfoDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaFileInfoDo
WriteDB() IScaFileInfoDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaFileInfoDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaFileInfoDo
Not(conds ...gen.Condition) IScaFileInfoDo
Or(conds ...gen.Condition) IScaFileInfoDo
Select(conds ...field.Expr) IScaFileInfoDo
Where(conds ...gen.Condition) IScaFileInfoDo
Order(conds ...field.Expr) IScaFileInfoDo
Distinct(cols ...field.Expr) IScaFileInfoDo
Omit(cols ...field.Expr) IScaFileInfoDo
Join(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
Group(cols ...field.Expr) IScaFileInfoDo
Having(conds ...gen.Condition) IScaFileInfoDo
Limit(limit int) IScaFileInfoDo
Offset(offset int) IScaFileInfoDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileInfoDo
Unscoped() IScaFileInfoDo
Create(values ...*model.ScaFileInfo) error
CreateInBatches(values []*model.ScaFileInfo, batchSize int) error
Save(values ...*model.ScaFileInfo) error
First() (*model.ScaFileInfo, error)
Take() (*model.ScaFileInfo, error)
Last() (*model.ScaFileInfo, error)
Find() ([]*model.ScaFileInfo, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileInfo, err error)
FindInBatches(result *[]*model.ScaFileInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaFileInfo) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaFileInfoDo
Assign(attrs ...field.AssignExpr) IScaFileInfoDo
Joins(fields ...field.RelationField) IScaFileInfoDo
Preload(fields ...field.RelationField) IScaFileInfoDo
FirstOrInit() (*model.ScaFileInfo, error)
FirstOrCreate() (*model.ScaFileInfo, error)
FindByPage(offset int, limit int) (result []*model.ScaFileInfo, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaFileInfoDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaFileInfoDo) Debug() IScaFileInfoDo {
return s.withDO(s.DO.Debug())
}
func (s scaFileInfoDo) WithContext(ctx context.Context) IScaFileInfoDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaFileInfoDo) ReadDB() IScaFileInfoDo {
return s.Clauses(dbresolver.Read)
}
func (s scaFileInfoDo) WriteDB() IScaFileInfoDo {
return s.Clauses(dbresolver.Write)
}
func (s scaFileInfoDo) Session(config *gorm.Session) IScaFileInfoDo {
return s.withDO(s.DO.Session(config))
}
func (s scaFileInfoDo) Clauses(conds ...clause.Expression) IScaFileInfoDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaFileInfoDo) Returning(value interface{}, columns ...string) IScaFileInfoDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaFileInfoDo) Not(conds ...gen.Condition) IScaFileInfoDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaFileInfoDo) Or(conds ...gen.Condition) IScaFileInfoDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaFileInfoDo) Select(conds ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaFileInfoDo) Where(conds ...gen.Condition) IScaFileInfoDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaFileInfoDo) Order(conds ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaFileInfoDo) Distinct(cols ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaFileInfoDo) Omit(cols ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaFileInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaFileInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaFileInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaFileInfoDo) Group(cols ...field.Expr) IScaFileInfoDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaFileInfoDo) Having(conds ...gen.Condition) IScaFileInfoDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaFileInfoDo) Limit(limit int) IScaFileInfoDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaFileInfoDo) Offset(offset int) IScaFileInfoDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaFileInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileInfoDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaFileInfoDo) Unscoped() IScaFileInfoDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaFileInfoDo) Create(values ...*model.ScaFileInfo) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaFileInfoDo) CreateInBatches(values []*model.ScaFileInfo, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaFileInfoDo) Save(values ...*model.ScaFileInfo) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaFileInfoDo) First() (*model.ScaFileInfo, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileInfo), nil
}
}
func (s scaFileInfoDo) Take() (*model.ScaFileInfo, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileInfo), nil
}
}
func (s scaFileInfoDo) Last() (*model.ScaFileInfo, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileInfo), nil
}
}
func (s scaFileInfoDo) Find() ([]*model.ScaFileInfo, error) {
result, err := s.DO.Find()
return result.([]*model.ScaFileInfo), err
}
func (s scaFileInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileInfo, err error) {
buf := make([]*model.ScaFileInfo, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaFileInfoDo) FindInBatches(result *[]*model.ScaFileInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaFileInfoDo) Attrs(attrs ...field.AssignExpr) IScaFileInfoDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaFileInfoDo) Assign(attrs ...field.AssignExpr) IScaFileInfoDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaFileInfoDo) Joins(fields ...field.RelationField) IScaFileInfoDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaFileInfoDo) Preload(fields ...field.RelationField) IScaFileInfoDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaFileInfoDo) FirstOrInit() (*model.ScaFileInfo, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileInfo), nil
}
}
func (s scaFileInfoDo) FirstOrCreate() (*model.ScaFileInfo, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileInfo), nil
}
}
func (s scaFileInfoDo) FindByPage(offset int, limit int) (result []*model.ScaFileInfo, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaFileInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaFileInfoDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaFileInfoDo) Delete(models ...*model.ScaFileInfo) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaFileInfoDo) withDO(do gen.Dao) *scaFileInfoDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -1,406 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
)
func newScaFileRecycle(db *gorm.DB, opts ...gen.DOOption) scaFileRecycle {
_scaFileRecycle := scaFileRecycle{}
_scaFileRecycle.scaFileRecycleDo.UseDB(db, opts...)
_scaFileRecycle.scaFileRecycleDo.UseModel(&model.ScaFileRecycle{})
tableName := _scaFileRecycle.scaFileRecycleDo.TableName()
_scaFileRecycle.ALL = field.NewAsterisk(tableName)
_scaFileRecycle.ID = field.NewInt64(tableName, "id")
_scaFileRecycle.FileID = field.NewInt64(tableName, "file_id")
_scaFileRecycle.FolderID = field.NewInt64(tableName, "folder_id")
_scaFileRecycle.Type = field.NewInt64(tableName, "type")
_scaFileRecycle.UserID = field.NewString(tableName, "user_id")
_scaFileRecycle.DeletedAt = field.NewField(tableName, "deleted_at")
_scaFileRecycle.OriginalPath = field.NewString(tableName, "original_path")
_scaFileRecycle.FileSource = field.NewInt64(tableName, "file_source")
_scaFileRecycle.fillFieldMap()
return _scaFileRecycle
}
type scaFileRecycle struct {
scaFileRecycleDo
ALL field.Asterisk
ID field.Int64 // 主键
FileID field.Int64 // 文件编号
FolderID field.Int64 // 文件夹编号
Type field.Int64 // 类型 0 文件 1 文件夹
UserID field.String // 用户编号
DeletedAt field.Field // 删除时间
OriginalPath field.String // 原始路径
FileSource field.Int64 // 文件来源 0 相册 1 评论
fieldMap map[string]field.Expr
}
func (s scaFileRecycle) Table(newTableName string) *scaFileRecycle {
s.scaFileRecycleDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaFileRecycle) As(alias string) *scaFileRecycle {
s.scaFileRecycleDo.DO = *(s.scaFileRecycleDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaFileRecycle) updateTableName(table string) *scaFileRecycle {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.FileID = field.NewInt64(table, "file_id")
s.FolderID = field.NewInt64(table, "folder_id")
s.Type = field.NewInt64(table, "type")
s.UserID = field.NewString(table, "user_id")
s.DeletedAt = field.NewField(table, "deleted_at")
s.OriginalPath = field.NewString(table, "original_path")
s.FileSource = field.NewInt64(table, "file_source")
s.fillFieldMap()
return s
}
func (s *scaFileRecycle) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaFileRecycle) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 8)
s.fieldMap["id"] = s.ID
s.fieldMap["file_id"] = s.FileID
s.fieldMap["folder_id"] = s.FolderID
s.fieldMap["type"] = s.Type
s.fieldMap["user_id"] = s.UserID
s.fieldMap["deleted_at"] = s.DeletedAt
s.fieldMap["original_path"] = s.OriginalPath
s.fieldMap["file_source"] = s.FileSource
}
func (s scaFileRecycle) clone(db *gorm.DB) scaFileRecycle {
s.scaFileRecycleDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaFileRecycle) replaceDB(db *gorm.DB) scaFileRecycle {
s.scaFileRecycleDo.ReplaceDB(db)
return s
}
type scaFileRecycleDo struct{ gen.DO }
type IScaFileRecycleDo interface {
gen.SubQuery
Debug() IScaFileRecycleDo
WithContext(ctx context.Context) IScaFileRecycleDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaFileRecycleDo
WriteDB() IScaFileRecycleDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaFileRecycleDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaFileRecycleDo
Not(conds ...gen.Condition) IScaFileRecycleDo
Or(conds ...gen.Condition) IScaFileRecycleDo
Select(conds ...field.Expr) IScaFileRecycleDo
Where(conds ...gen.Condition) IScaFileRecycleDo
Order(conds ...field.Expr) IScaFileRecycleDo
Distinct(cols ...field.Expr) IScaFileRecycleDo
Omit(cols ...field.Expr) IScaFileRecycleDo
Join(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
Group(cols ...field.Expr) IScaFileRecycleDo
Having(conds ...gen.Condition) IScaFileRecycleDo
Limit(limit int) IScaFileRecycleDo
Offset(offset int) IScaFileRecycleDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileRecycleDo
Unscoped() IScaFileRecycleDo
Create(values ...*model.ScaFileRecycle) error
CreateInBatches(values []*model.ScaFileRecycle, batchSize int) error
Save(values ...*model.ScaFileRecycle) error
First() (*model.ScaFileRecycle, error)
Take() (*model.ScaFileRecycle, error)
Last() (*model.ScaFileRecycle, error)
Find() ([]*model.ScaFileRecycle, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileRecycle, err error)
FindInBatches(result *[]*model.ScaFileRecycle, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaFileRecycle) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaFileRecycleDo
Assign(attrs ...field.AssignExpr) IScaFileRecycleDo
Joins(fields ...field.RelationField) IScaFileRecycleDo
Preload(fields ...field.RelationField) IScaFileRecycleDo
FirstOrInit() (*model.ScaFileRecycle, error)
FirstOrCreate() (*model.ScaFileRecycle, error)
FindByPage(offset int, limit int) (result []*model.ScaFileRecycle, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaFileRecycleDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaFileRecycleDo) Debug() IScaFileRecycleDo {
return s.withDO(s.DO.Debug())
}
func (s scaFileRecycleDo) WithContext(ctx context.Context) IScaFileRecycleDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaFileRecycleDo) ReadDB() IScaFileRecycleDo {
return s.Clauses(dbresolver.Read)
}
func (s scaFileRecycleDo) WriteDB() IScaFileRecycleDo {
return s.Clauses(dbresolver.Write)
}
func (s scaFileRecycleDo) Session(config *gorm.Session) IScaFileRecycleDo {
return s.withDO(s.DO.Session(config))
}
func (s scaFileRecycleDo) Clauses(conds ...clause.Expression) IScaFileRecycleDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaFileRecycleDo) Returning(value interface{}, columns ...string) IScaFileRecycleDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaFileRecycleDo) Not(conds ...gen.Condition) IScaFileRecycleDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaFileRecycleDo) Or(conds ...gen.Condition) IScaFileRecycleDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaFileRecycleDo) Select(conds ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaFileRecycleDo) Where(conds ...gen.Condition) IScaFileRecycleDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaFileRecycleDo) Order(conds ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaFileRecycleDo) Distinct(cols ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaFileRecycleDo) Omit(cols ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaFileRecycleDo) Join(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaFileRecycleDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaFileRecycleDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaFileRecycleDo) Group(cols ...field.Expr) IScaFileRecycleDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaFileRecycleDo) Having(conds ...gen.Condition) IScaFileRecycleDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaFileRecycleDo) Limit(limit int) IScaFileRecycleDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaFileRecycleDo) Offset(offset int) IScaFileRecycleDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaFileRecycleDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileRecycleDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaFileRecycleDo) Unscoped() IScaFileRecycleDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaFileRecycleDo) Create(values ...*model.ScaFileRecycle) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaFileRecycleDo) CreateInBatches(values []*model.ScaFileRecycle, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaFileRecycleDo) Save(values ...*model.ScaFileRecycle) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaFileRecycleDo) First() (*model.ScaFileRecycle, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileRecycle), nil
}
}
func (s scaFileRecycleDo) Take() (*model.ScaFileRecycle, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileRecycle), nil
}
}
func (s scaFileRecycleDo) Last() (*model.ScaFileRecycle, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileRecycle), nil
}
}
func (s scaFileRecycleDo) Find() ([]*model.ScaFileRecycle, error) {
result, err := s.DO.Find()
return result.([]*model.ScaFileRecycle), err
}
func (s scaFileRecycleDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileRecycle, err error) {
buf := make([]*model.ScaFileRecycle, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaFileRecycleDo) FindInBatches(result *[]*model.ScaFileRecycle, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaFileRecycleDo) Attrs(attrs ...field.AssignExpr) IScaFileRecycleDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaFileRecycleDo) Assign(attrs ...field.AssignExpr) IScaFileRecycleDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaFileRecycleDo) Joins(fields ...field.RelationField) IScaFileRecycleDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaFileRecycleDo) Preload(fields ...field.RelationField) IScaFileRecycleDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaFileRecycleDo) FirstOrInit() (*model.ScaFileRecycle, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileRecycle), nil
}
}
func (s scaFileRecycleDo) FirstOrCreate() (*model.ScaFileRecycle, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileRecycle), nil
}
}
func (s scaFileRecycleDo) FindByPage(offset int, limit int) (result []*model.ScaFileRecycle, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaFileRecycleDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaFileRecycleDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaFileRecycleDo) Delete(models ...*model.ScaFileRecycle) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaFileRecycleDo) withDO(do gen.Dao) *scaFileRecycleDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -1,402 +0,0 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
)
func newScaFileType(db *gorm.DB, opts ...gen.DOOption) scaFileType {
_scaFileType := scaFileType{}
_scaFileType.scaFileTypeDo.UseDB(db, opts...)
_scaFileType.scaFileTypeDo.UseModel(&model.ScaFileType{})
tableName := _scaFileType.scaFileTypeDo.TableName()
_scaFileType.ALL = field.NewAsterisk(tableName)
_scaFileType.ID = field.NewInt64(tableName, "id")
_scaFileType.TypeName = field.NewString(tableName, "type_name")
_scaFileType.MimeType = field.NewString(tableName, "mime_type")
_scaFileType.Status = field.NewInt64(tableName, "status")
_scaFileType.CreatedAt = field.NewTime(tableName, "created_at")
_scaFileType.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaFileType.DeletedAt = field.NewField(tableName, "deleted_at")
_scaFileType.fillFieldMap()
return _scaFileType
}
type scaFileType struct {
scaFileTypeDo
ALL field.Asterisk
ID field.Int64 // 主键
TypeName field.String // 类型名称
MimeType field.String // MIME 类型
Status field.Int64 // 类型状态
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaFileType) Table(newTableName string) *scaFileType {
s.scaFileTypeDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaFileType) As(alias string) *scaFileType {
s.scaFileTypeDo.DO = *(s.scaFileTypeDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaFileType) updateTableName(table string) *scaFileType {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.TypeName = field.NewString(table, "type_name")
s.MimeType = field.NewString(table, "mime_type")
s.Status = field.NewInt64(table, "status")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaFileType) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaFileType) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 7)
s.fieldMap["id"] = s.ID
s.fieldMap["type_name"] = s.TypeName
s.fieldMap["mime_type"] = s.MimeType
s.fieldMap["status"] = s.Status
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaFileType) clone(db *gorm.DB) scaFileType {
s.scaFileTypeDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaFileType) replaceDB(db *gorm.DB) scaFileType {
s.scaFileTypeDo.ReplaceDB(db)
return s
}
type scaFileTypeDo struct{ gen.DO }
type IScaFileTypeDo interface {
gen.SubQuery
Debug() IScaFileTypeDo
WithContext(ctx context.Context) IScaFileTypeDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaFileTypeDo
WriteDB() IScaFileTypeDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaFileTypeDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaFileTypeDo
Not(conds ...gen.Condition) IScaFileTypeDo
Or(conds ...gen.Condition) IScaFileTypeDo
Select(conds ...field.Expr) IScaFileTypeDo
Where(conds ...gen.Condition) IScaFileTypeDo
Order(conds ...field.Expr) IScaFileTypeDo
Distinct(cols ...field.Expr) IScaFileTypeDo
Omit(cols ...field.Expr) IScaFileTypeDo
Join(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
Group(cols ...field.Expr) IScaFileTypeDo
Having(conds ...gen.Condition) IScaFileTypeDo
Limit(limit int) IScaFileTypeDo
Offset(offset int) IScaFileTypeDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileTypeDo
Unscoped() IScaFileTypeDo
Create(values ...*model.ScaFileType) error
CreateInBatches(values []*model.ScaFileType, batchSize int) error
Save(values ...*model.ScaFileType) error
First() (*model.ScaFileType, error)
Take() (*model.ScaFileType, error)
Last() (*model.ScaFileType, error)
Find() ([]*model.ScaFileType, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileType, err error)
FindInBatches(result *[]*model.ScaFileType, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaFileType) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaFileTypeDo
Assign(attrs ...field.AssignExpr) IScaFileTypeDo
Joins(fields ...field.RelationField) IScaFileTypeDo
Preload(fields ...field.RelationField) IScaFileTypeDo
FirstOrInit() (*model.ScaFileType, error)
FirstOrCreate() (*model.ScaFileType, error)
FindByPage(offset int, limit int) (result []*model.ScaFileType, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaFileTypeDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaFileTypeDo) Debug() IScaFileTypeDo {
return s.withDO(s.DO.Debug())
}
func (s scaFileTypeDo) WithContext(ctx context.Context) IScaFileTypeDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaFileTypeDo) ReadDB() IScaFileTypeDo {
return s.Clauses(dbresolver.Read)
}
func (s scaFileTypeDo) WriteDB() IScaFileTypeDo {
return s.Clauses(dbresolver.Write)
}
func (s scaFileTypeDo) Session(config *gorm.Session) IScaFileTypeDo {
return s.withDO(s.DO.Session(config))
}
func (s scaFileTypeDo) Clauses(conds ...clause.Expression) IScaFileTypeDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaFileTypeDo) Returning(value interface{}, columns ...string) IScaFileTypeDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaFileTypeDo) Not(conds ...gen.Condition) IScaFileTypeDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaFileTypeDo) Or(conds ...gen.Condition) IScaFileTypeDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaFileTypeDo) Select(conds ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaFileTypeDo) Where(conds ...gen.Condition) IScaFileTypeDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaFileTypeDo) Order(conds ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaFileTypeDo) Distinct(cols ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaFileTypeDo) Omit(cols ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaFileTypeDo) Join(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaFileTypeDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaFileTypeDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaFileTypeDo) Group(cols ...field.Expr) IScaFileTypeDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaFileTypeDo) Having(conds ...gen.Condition) IScaFileTypeDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaFileTypeDo) Limit(limit int) IScaFileTypeDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaFileTypeDo) Offset(offset int) IScaFileTypeDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaFileTypeDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileTypeDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaFileTypeDo) Unscoped() IScaFileTypeDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaFileTypeDo) Create(values ...*model.ScaFileType) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaFileTypeDo) CreateInBatches(values []*model.ScaFileType, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaFileTypeDo) Save(values ...*model.ScaFileType) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaFileTypeDo) First() (*model.ScaFileType, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileType), nil
}
}
func (s scaFileTypeDo) Take() (*model.ScaFileType, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileType), nil
}
}
func (s scaFileTypeDo) Last() (*model.ScaFileType, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileType), nil
}
}
func (s scaFileTypeDo) Find() ([]*model.ScaFileType, error) {
result, err := s.DO.Find()
return result.([]*model.ScaFileType), err
}
func (s scaFileTypeDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileType, err error) {
buf := make([]*model.ScaFileType, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaFileTypeDo) FindInBatches(result *[]*model.ScaFileType, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaFileTypeDo) Attrs(attrs ...field.AssignExpr) IScaFileTypeDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaFileTypeDo) Assign(attrs ...field.AssignExpr) IScaFileTypeDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaFileTypeDo) Joins(fields ...field.RelationField) IScaFileTypeDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaFileTypeDo) Preload(fields ...field.RelationField) IScaFileTypeDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaFileTypeDo) FirstOrInit() (*model.ScaFileType, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileType), nil
}
}
func (s scaFileTypeDo) FirstOrCreate() (*model.ScaFileType, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaFileType), nil
}
}
func (s scaFileTypeDo) FindByPage(offset int, limit int) (result []*model.ScaFileType, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaFileTypeDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaFileTypeDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaFileTypeDo) Delete(models ...*model.ScaFileType) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaFileTypeDo) withDO(do gen.Dao) *scaFileTypeDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaMessageReport(db *gorm.DB, opts ...gen.DOOption) scaMessageReport {

View File

@@ -0,0 +1,420 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaStorageConfig(db *gorm.DB, opts ...gen.DOOption) scaStorageConfig {
_scaStorageConfig := scaStorageConfig{}
_scaStorageConfig.scaStorageConfigDo.UseDB(db, opts...)
_scaStorageConfig.scaStorageConfigDo.UseModel(&model.ScaStorageConfig{})
tableName := _scaStorageConfig.scaStorageConfigDo.TableName()
_scaStorageConfig.ALL = field.NewAsterisk(tableName)
_scaStorageConfig.ID = field.NewInt64(tableName, "id")
_scaStorageConfig.UserID = field.NewString(tableName, "user_id")
_scaStorageConfig.Type = field.NewString(tableName, "type")
_scaStorageConfig.Endpoint = field.NewString(tableName, "endpoint")
_scaStorageConfig.AccessKey = field.NewString(tableName, "access_key")
_scaStorageConfig.SecretKey = field.NewString(tableName, "secret_key")
_scaStorageConfig.Bucket = field.NewString(tableName, "bucket")
_scaStorageConfig.Region = field.NewString(tableName, "region")
_scaStorageConfig.CreatedAt = field.NewTime(tableName, "created_at")
_scaStorageConfig.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaStorageConfig.DeletedAt = field.NewField(tableName, "deleted_at")
_scaStorageConfig.fillFieldMap()
return _scaStorageConfig
}
type scaStorageConfig struct {
scaStorageConfigDo
ALL field.Asterisk
ID field.Int64 // 主键
UserID field.String // 用户ID
Type field.String // 类型
Endpoint field.String // 地址
AccessKey field.String // 密钥key
SecretKey field.String // 密钥
Bucket field.String // 存储桶
Region field.String // 地域
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaStorageConfig) Table(newTableName string) *scaStorageConfig {
s.scaStorageConfigDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaStorageConfig) As(alias string) *scaStorageConfig {
s.scaStorageConfigDo.DO = *(s.scaStorageConfigDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaStorageConfig) updateTableName(table string) *scaStorageConfig {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.UserID = field.NewString(table, "user_id")
s.Type = field.NewString(table, "type")
s.Endpoint = field.NewString(table, "endpoint")
s.AccessKey = field.NewString(table, "access_key")
s.SecretKey = field.NewString(table, "secret_key")
s.Bucket = field.NewString(table, "bucket")
s.Region = field.NewString(table, "region")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaStorageConfig) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaStorageConfig) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 11)
s.fieldMap["id"] = s.ID
s.fieldMap["user_id"] = s.UserID
s.fieldMap["type"] = s.Type
s.fieldMap["endpoint"] = s.Endpoint
s.fieldMap["access_key"] = s.AccessKey
s.fieldMap["secret_key"] = s.SecretKey
s.fieldMap["bucket"] = s.Bucket
s.fieldMap["region"] = s.Region
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaStorageConfig) clone(db *gorm.DB) scaStorageConfig {
s.scaStorageConfigDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaStorageConfig) replaceDB(db *gorm.DB) scaStorageConfig {
s.scaStorageConfigDo.ReplaceDB(db)
return s
}
type scaStorageConfigDo struct{ gen.DO }
type IScaStorageConfigDo interface {
gen.SubQuery
Debug() IScaStorageConfigDo
WithContext(ctx context.Context) IScaStorageConfigDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaStorageConfigDo
WriteDB() IScaStorageConfigDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaStorageConfigDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaStorageConfigDo
Not(conds ...gen.Condition) IScaStorageConfigDo
Or(conds ...gen.Condition) IScaStorageConfigDo
Select(conds ...field.Expr) IScaStorageConfigDo
Where(conds ...gen.Condition) IScaStorageConfigDo
Order(conds ...field.Expr) IScaStorageConfigDo
Distinct(cols ...field.Expr) IScaStorageConfigDo
Omit(cols ...field.Expr) IScaStorageConfigDo
Join(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
Group(cols ...field.Expr) IScaStorageConfigDo
Having(conds ...gen.Condition) IScaStorageConfigDo
Limit(limit int) IScaStorageConfigDo
Offset(offset int) IScaStorageConfigDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageConfigDo
Unscoped() IScaStorageConfigDo
Create(values ...*model.ScaStorageConfig) error
CreateInBatches(values []*model.ScaStorageConfig, batchSize int) error
Save(values ...*model.ScaStorageConfig) error
First() (*model.ScaStorageConfig, error)
Take() (*model.ScaStorageConfig, error)
Last() (*model.ScaStorageConfig, error)
Find() ([]*model.ScaStorageConfig, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageConfig, err error)
FindInBatches(result *[]*model.ScaStorageConfig, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaStorageConfig) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaStorageConfigDo
Assign(attrs ...field.AssignExpr) IScaStorageConfigDo
Joins(fields ...field.RelationField) IScaStorageConfigDo
Preload(fields ...field.RelationField) IScaStorageConfigDo
FirstOrInit() (*model.ScaStorageConfig, error)
FirstOrCreate() (*model.ScaStorageConfig, error)
FindByPage(offset int, limit int) (result []*model.ScaStorageConfig, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaStorageConfigDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaStorageConfigDo) Debug() IScaStorageConfigDo {
return s.withDO(s.DO.Debug())
}
func (s scaStorageConfigDo) WithContext(ctx context.Context) IScaStorageConfigDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaStorageConfigDo) ReadDB() IScaStorageConfigDo {
return s.Clauses(dbresolver.Read)
}
func (s scaStorageConfigDo) WriteDB() IScaStorageConfigDo {
return s.Clauses(dbresolver.Write)
}
func (s scaStorageConfigDo) Session(config *gorm.Session) IScaStorageConfigDo {
return s.withDO(s.DO.Session(config))
}
func (s scaStorageConfigDo) Clauses(conds ...clause.Expression) IScaStorageConfigDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaStorageConfigDo) Returning(value interface{}, columns ...string) IScaStorageConfigDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaStorageConfigDo) Not(conds ...gen.Condition) IScaStorageConfigDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaStorageConfigDo) Or(conds ...gen.Condition) IScaStorageConfigDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaStorageConfigDo) Select(conds ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaStorageConfigDo) Where(conds ...gen.Condition) IScaStorageConfigDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaStorageConfigDo) Order(conds ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaStorageConfigDo) Distinct(cols ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaStorageConfigDo) Omit(cols ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaStorageConfigDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaStorageConfigDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaStorageConfigDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaStorageConfigDo) Group(cols ...field.Expr) IScaStorageConfigDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaStorageConfigDo) Having(conds ...gen.Condition) IScaStorageConfigDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaStorageConfigDo) Limit(limit int) IScaStorageConfigDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaStorageConfigDo) Offset(offset int) IScaStorageConfigDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaStorageConfigDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageConfigDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaStorageConfigDo) Unscoped() IScaStorageConfigDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaStorageConfigDo) Create(values ...*model.ScaStorageConfig) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaStorageConfigDo) CreateInBatches(values []*model.ScaStorageConfig, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaStorageConfigDo) Save(values ...*model.ScaStorageConfig) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaStorageConfigDo) First() (*model.ScaStorageConfig, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageConfig), nil
}
}
func (s scaStorageConfigDo) Take() (*model.ScaStorageConfig, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageConfig), nil
}
}
func (s scaStorageConfigDo) Last() (*model.ScaStorageConfig, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageConfig), nil
}
}
func (s scaStorageConfigDo) Find() ([]*model.ScaStorageConfig, error) {
result, err := s.DO.Find()
return result.([]*model.ScaStorageConfig), err
}
func (s scaStorageConfigDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageConfig, err error) {
buf := make([]*model.ScaStorageConfig, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaStorageConfigDo) FindInBatches(result *[]*model.ScaStorageConfig, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaStorageConfigDo) Attrs(attrs ...field.AssignExpr) IScaStorageConfigDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaStorageConfigDo) Assign(attrs ...field.AssignExpr) IScaStorageConfigDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaStorageConfigDo) Joins(fields ...field.RelationField) IScaStorageConfigDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaStorageConfigDo) Preload(fields ...field.RelationField) IScaStorageConfigDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaStorageConfigDo) FirstOrInit() (*model.ScaStorageConfig, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageConfig), nil
}
}
func (s scaStorageConfigDo) FirstOrCreate() (*model.ScaStorageConfig, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageConfig), nil
}
}
func (s scaStorageConfigDo) FindByPage(offset int, limit int) (result []*model.ScaStorageConfig, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaStorageConfigDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaStorageConfigDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaStorageConfigDo) Delete(models ...*model.ScaStorageConfig) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaStorageConfigDo) withDO(do gen.Dao) *scaStorageConfigDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -0,0 +1,448 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaStorageInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageInfo {
_scaStorageInfo := scaStorageInfo{}
_scaStorageInfo.scaStorageInfoDo.UseDB(db, opts...)
_scaStorageInfo.scaStorageInfoDo.UseModel(&model.ScaStorageInfo{})
tableName := _scaStorageInfo.scaStorageInfoDo.TableName()
_scaStorageInfo.ALL = field.NewAsterisk(tableName)
_scaStorageInfo.ID = field.NewInt64(tableName, "id")
_scaStorageInfo.UserID = field.NewString(tableName, "user_id")
_scaStorageInfo.Storage = field.NewString(tableName, "storage")
_scaStorageInfo.Bucket = field.NewString(tableName, "bucket")
_scaStorageInfo.Type = field.NewString(tableName, "type")
_scaStorageInfo.Path = field.NewString(tableName, "path")
_scaStorageInfo.FileName = field.NewString(tableName, "file_name")
_scaStorageInfo.Category = field.NewString(tableName, "category")
_scaStorageInfo.Loaction = field.NewString(tableName, "loaction")
_scaStorageInfo.Hash = field.NewString(tableName, "hash")
_scaStorageInfo.Anime = field.NewString(tableName, "anime")
_scaStorageInfo.HasFace = field.NewString(tableName, "has_face")
_scaStorageInfo.FaceID = field.NewInt64(tableName, "face_id")
_scaStorageInfo.Landscape = field.NewString(tableName, "landscape")
_scaStorageInfo.Objects = field.NewString(tableName, "objects")
_scaStorageInfo.CreatedAt = field.NewTime(tableName, "created_at")
_scaStorageInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaStorageInfo.DeletedAt = field.NewField(tableName, "deleted_at")
_scaStorageInfo.fillFieldMap()
return _scaStorageInfo
}
type scaStorageInfo struct {
scaStorageInfoDo
ALL field.Asterisk
ID field.Int64 // 主键
UserID field.String // 用户ID
Storage field.String // 存储空间
Bucket field.String // 存储桶
Type field.String // 类型
Path field.String // 路径
FileName field.String // 名称
Category field.String // 分类
Loaction field.String // 地址
Hash field.String // 哈希值
Anime field.String // 是否是动漫图片
HasFace field.String // 是否人像
FaceID field.Int64 // 人像ID
Landscape field.String // 风景类型
Objects field.String // 对象识别
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaStorageInfo) Table(newTableName string) *scaStorageInfo {
s.scaStorageInfoDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaStorageInfo) As(alias string) *scaStorageInfo {
s.scaStorageInfoDo.DO = *(s.scaStorageInfoDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaStorageInfo) updateTableName(table string) *scaStorageInfo {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.UserID = field.NewString(table, "user_id")
s.Storage = field.NewString(table, "storage")
s.Bucket = field.NewString(table, "bucket")
s.Type = field.NewString(table, "type")
s.Path = field.NewString(table, "path")
s.FileName = field.NewString(table, "file_name")
s.Category = field.NewString(table, "category")
s.Loaction = field.NewString(table, "loaction")
s.Hash = field.NewString(table, "hash")
s.Anime = field.NewString(table, "anime")
s.HasFace = field.NewString(table, "has_face")
s.FaceID = field.NewInt64(table, "face_id")
s.Landscape = field.NewString(table, "landscape")
s.Objects = field.NewString(table, "objects")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaStorageInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaStorageInfo) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 18)
s.fieldMap["id"] = s.ID
s.fieldMap["user_id"] = s.UserID
s.fieldMap["storage"] = s.Storage
s.fieldMap["bucket"] = s.Bucket
s.fieldMap["type"] = s.Type
s.fieldMap["path"] = s.Path
s.fieldMap["file_name"] = s.FileName
s.fieldMap["category"] = s.Category
s.fieldMap["loaction"] = s.Loaction
s.fieldMap["hash"] = s.Hash
s.fieldMap["anime"] = s.Anime
s.fieldMap["has_face"] = s.HasFace
s.fieldMap["face_id"] = s.FaceID
s.fieldMap["landscape"] = s.Landscape
s.fieldMap["objects"] = s.Objects
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaStorageInfo) clone(db *gorm.DB) scaStorageInfo {
s.scaStorageInfoDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaStorageInfo) replaceDB(db *gorm.DB) scaStorageInfo {
s.scaStorageInfoDo.ReplaceDB(db)
return s
}
type scaStorageInfoDo struct{ gen.DO }
type IScaStorageInfoDo interface {
gen.SubQuery
Debug() IScaStorageInfoDo
WithContext(ctx context.Context) IScaStorageInfoDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaStorageInfoDo
WriteDB() IScaStorageInfoDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaStorageInfoDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaStorageInfoDo
Not(conds ...gen.Condition) IScaStorageInfoDo
Or(conds ...gen.Condition) IScaStorageInfoDo
Select(conds ...field.Expr) IScaStorageInfoDo
Where(conds ...gen.Condition) IScaStorageInfoDo
Order(conds ...field.Expr) IScaStorageInfoDo
Distinct(cols ...field.Expr) IScaStorageInfoDo
Omit(cols ...field.Expr) IScaStorageInfoDo
Join(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
Group(cols ...field.Expr) IScaStorageInfoDo
Having(conds ...gen.Condition) IScaStorageInfoDo
Limit(limit int) IScaStorageInfoDo
Offset(offset int) IScaStorageInfoDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageInfoDo
Unscoped() IScaStorageInfoDo
Create(values ...*model.ScaStorageInfo) error
CreateInBatches(values []*model.ScaStorageInfo, batchSize int) error
Save(values ...*model.ScaStorageInfo) error
First() (*model.ScaStorageInfo, error)
Take() (*model.ScaStorageInfo, error)
Last() (*model.ScaStorageInfo, error)
Find() ([]*model.ScaStorageInfo, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageInfo, err error)
FindInBatches(result *[]*model.ScaStorageInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaStorageInfo) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaStorageInfoDo
Assign(attrs ...field.AssignExpr) IScaStorageInfoDo
Joins(fields ...field.RelationField) IScaStorageInfoDo
Preload(fields ...field.RelationField) IScaStorageInfoDo
FirstOrInit() (*model.ScaStorageInfo, error)
FirstOrCreate() (*model.ScaStorageInfo, error)
FindByPage(offset int, limit int) (result []*model.ScaStorageInfo, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaStorageInfoDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaStorageInfoDo) Debug() IScaStorageInfoDo {
return s.withDO(s.DO.Debug())
}
func (s scaStorageInfoDo) WithContext(ctx context.Context) IScaStorageInfoDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaStorageInfoDo) ReadDB() IScaStorageInfoDo {
return s.Clauses(dbresolver.Read)
}
func (s scaStorageInfoDo) WriteDB() IScaStorageInfoDo {
return s.Clauses(dbresolver.Write)
}
func (s scaStorageInfoDo) Session(config *gorm.Session) IScaStorageInfoDo {
return s.withDO(s.DO.Session(config))
}
func (s scaStorageInfoDo) Clauses(conds ...clause.Expression) IScaStorageInfoDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaStorageInfoDo) Returning(value interface{}, columns ...string) IScaStorageInfoDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaStorageInfoDo) Not(conds ...gen.Condition) IScaStorageInfoDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaStorageInfoDo) Or(conds ...gen.Condition) IScaStorageInfoDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaStorageInfoDo) Select(conds ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaStorageInfoDo) Where(conds ...gen.Condition) IScaStorageInfoDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaStorageInfoDo) Order(conds ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaStorageInfoDo) Distinct(cols ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaStorageInfoDo) Omit(cols ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaStorageInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaStorageInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaStorageInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaStorageInfoDo) Group(cols ...field.Expr) IScaStorageInfoDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaStorageInfoDo) Having(conds ...gen.Condition) IScaStorageInfoDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaStorageInfoDo) Limit(limit int) IScaStorageInfoDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaStorageInfoDo) Offset(offset int) IScaStorageInfoDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaStorageInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageInfoDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaStorageInfoDo) Unscoped() IScaStorageInfoDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaStorageInfoDo) Create(values ...*model.ScaStorageInfo) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaStorageInfoDo) CreateInBatches(values []*model.ScaStorageInfo, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaStorageInfoDo) Save(values ...*model.ScaStorageInfo) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaStorageInfoDo) First() (*model.ScaStorageInfo, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageInfo), nil
}
}
func (s scaStorageInfoDo) Take() (*model.ScaStorageInfo, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageInfo), nil
}
}
func (s scaStorageInfoDo) Last() (*model.ScaStorageInfo, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageInfo), nil
}
}
func (s scaStorageInfoDo) Find() ([]*model.ScaStorageInfo, error) {
result, err := s.DO.Find()
return result.([]*model.ScaStorageInfo), err
}
func (s scaStorageInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageInfo, err error) {
buf := make([]*model.ScaStorageInfo, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaStorageInfoDo) FindInBatches(result *[]*model.ScaStorageInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaStorageInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageInfoDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaStorageInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageInfoDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaStorageInfoDo) Joins(fields ...field.RelationField) IScaStorageInfoDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaStorageInfoDo) Preload(fields ...field.RelationField) IScaStorageInfoDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaStorageInfoDo) FirstOrInit() (*model.ScaStorageInfo, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageInfo), nil
}
}
func (s scaStorageInfoDo) FirstOrCreate() (*model.ScaStorageInfo, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageInfo), nil
}
}
func (s scaStorageInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageInfo, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaStorageInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaStorageInfoDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaStorageInfoDo) Delete(models ...*model.ScaStorageInfo) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaStorageInfoDo) withDO(do gen.Dao) *scaStorageInfoDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -0,0 +1,400 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaStorageTag(db *gorm.DB, opts ...gen.DOOption) scaStorageTag {
_scaStorageTag := scaStorageTag{}
_scaStorageTag.scaStorageTagDo.UseDB(db, opts...)
_scaStorageTag.scaStorageTagDo.UseModel(&model.ScaStorageTag{})
tableName := _scaStorageTag.scaStorageTagDo.TableName()
_scaStorageTag.ALL = field.NewAsterisk(tableName)
_scaStorageTag.ID = field.NewInt64(tableName, "id")
_scaStorageTag.FileID = field.NewInt64(tableName, "file_id")
_scaStorageTag.TagID = field.NewInt64(tableName, "tag_id")
_scaStorageTag.CreatedAt = field.NewTime(tableName, "created_at")
_scaStorageTag.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaStorageTag.DeletedAt = field.NewField(tableName, "deleted_at")
_scaStorageTag.fillFieldMap()
return _scaStorageTag
}
type scaStorageTag struct {
scaStorageTagDo
ALL field.Asterisk
ID field.Int64 // 主键
FileID field.Int64 // 文件ID
TagID field.Int64 // 标签ID
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaStorageTag) Table(newTableName string) *scaStorageTag {
s.scaStorageTagDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaStorageTag) As(alias string) *scaStorageTag {
s.scaStorageTagDo.DO = *(s.scaStorageTagDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaStorageTag) updateTableName(table string) *scaStorageTag {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.FileID = field.NewInt64(table, "file_id")
s.TagID = field.NewInt64(table, "tag_id")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaStorageTag) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaStorageTag) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 6)
s.fieldMap["id"] = s.ID
s.fieldMap["file_id"] = s.FileID
s.fieldMap["tag_id"] = s.TagID
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaStorageTag) clone(db *gorm.DB) scaStorageTag {
s.scaStorageTagDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaStorageTag) replaceDB(db *gorm.DB) scaStorageTag {
s.scaStorageTagDo.ReplaceDB(db)
return s
}
type scaStorageTagDo struct{ gen.DO }
type IScaStorageTagDo interface {
gen.SubQuery
Debug() IScaStorageTagDo
WithContext(ctx context.Context) IScaStorageTagDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaStorageTagDo
WriteDB() IScaStorageTagDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaStorageTagDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaStorageTagDo
Not(conds ...gen.Condition) IScaStorageTagDo
Or(conds ...gen.Condition) IScaStorageTagDo
Select(conds ...field.Expr) IScaStorageTagDo
Where(conds ...gen.Condition) IScaStorageTagDo
Order(conds ...field.Expr) IScaStorageTagDo
Distinct(cols ...field.Expr) IScaStorageTagDo
Omit(cols ...field.Expr) IScaStorageTagDo
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
Group(cols ...field.Expr) IScaStorageTagDo
Having(conds ...gen.Condition) IScaStorageTagDo
Limit(limit int) IScaStorageTagDo
Offset(offset int) IScaStorageTagDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo
Unscoped() IScaStorageTagDo
Create(values ...*model.ScaStorageTag) error
CreateInBatches(values []*model.ScaStorageTag, batchSize int) error
Save(values ...*model.ScaStorageTag) error
First() (*model.ScaStorageTag, error)
Take() (*model.ScaStorageTag, error)
Last() (*model.ScaStorageTag, error)
Find() ([]*model.ScaStorageTag, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error)
FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaStorageTag) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaStorageTagDo
Assign(attrs ...field.AssignExpr) IScaStorageTagDo
Joins(fields ...field.RelationField) IScaStorageTagDo
Preload(fields ...field.RelationField) IScaStorageTagDo
FirstOrInit() (*model.ScaStorageTag, error)
FirstOrCreate() (*model.ScaStorageTag, error)
FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaStorageTagDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaStorageTagDo) Debug() IScaStorageTagDo {
return s.withDO(s.DO.Debug())
}
func (s scaStorageTagDo) WithContext(ctx context.Context) IScaStorageTagDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaStorageTagDo) ReadDB() IScaStorageTagDo {
return s.Clauses(dbresolver.Read)
}
func (s scaStorageTagDo) WriteDB() IScaStorageTagDo {
return s.Clauses(dbresolver.Write)
}
func (s scaStorageTagDo) Session(config *gorm.Session) IScaStorageTagDo {
return s.withDO(s.DO.Session(config))
}
func (s scaStorageTagDo) Clauses(conds ...clause.Expression) IScaStorageTagDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaStorageTagDo) Returning(value interface{}, columns ...string) IScaStorageTagDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaStorageTagDo) Not(conds ...gen.Condition) IScaStorageTagDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaStorageTagDo) Or(conds ...gen.Condition) IScaStorageTagDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaStorageTagDo) Select(conds ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaStorageTagDo) Where(conds ...gen.Condition) IScaStorageTagDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaStorageTagDo) Order(conds ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaStorageTagDo) Distinct(cols ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaStorageTagDo) Omit(cols ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaStorageTagDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaStorageTagDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaStorageTagDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaStorageTagDo) Group(cols ...field.Expr) IScaStorageTagDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaStorageTagDo) Having(conds ...gen.Condition) IScaStorageTagDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaStorageTagDo) Limit(limit int) IScaStorageTagDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaStorageTagDo) Offset(offset int) IScaStorageTagDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaStorageTagDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaStorageTagDo) Unscoped() IScaStorageTagDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaStorageTagDo) Create(values ...*model.ScaStorageTag) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaStorageTagDo) CreateInBatches(values []*model.ScaStorageTag, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaStorageTagDo) Save(values ...*model.ScaStorageTag) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaStorageTagDo) First() (*model.ScaStorageTag, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTag), nil
}
}
func (s scaStorageTagDo) Take() (*model.ScaStorageTag, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTag), nil
}
}
func (s scaStorageTagDo) Last() (*model.ScaStorageTag, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTag), nil
}
}
func (s scaStorageTagDo) Find() ([]*model.ScaStorageTag, error) {
result, err := s.DO.Find()
return result.([]*model.ScaStorageTag), err
}
func (s scaStorageTagDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error) {
buf := make([]*model.ScaStorageTag, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaStorageTagDo) FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaStorageTagDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaStorageTagDo) Assign(attrs ...field.AssignExpr) IScaStorageTagDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaStorageTagDo) Joins(fields ...field.RelationField) IScaStorageTagDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaStorageTagDo) Preload(fields ...field.RelationField) IScaStorageTagDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaStorageTagDo) FirstOrInit() (*model.ScaStorageTag, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTag), nil
}
}
func (s scaStorageTagDo) FirstOrCreate() (*model.ScaStorageTag, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTag), nil
}
}
func (s scaStorageTagDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaStorageTagDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaStorageTagDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaStorageTagDo) Delete(models ...*model.ScaStorageTag) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaStorageTagDo) withDO(do gen.Dao) *scaStorageTagDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -0,0 +1,400 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaStorageTagInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageTagInfo {
_scaStorageTagInfo := scaStorageTagInfo{}
_scaStorageTagInfo.scaStorageTagInfoDo.UseDB(db, opts...)
_scaStorageTagInfo.scaStorageTagInfoDo.UseModel(&model.ScaStorageTagInfo{})
tableName := _scaStorageTagInfo.scaStorageTagInfoDo.TableName()
_scaStorageTagInfo.ALL = field.NewAsterisk(tableName)
_scaStorageTagInfo.ID = field.NewInt64(tableName, "id")
_scaStorageTagInfo.TagName = field.NewString(tableName, "tag_name")
_scaStorageTagInfo.TagKey = field.NewString(tableName, "tag_key")
_scaStorageTagInfo.CreatedAt = field.NewTime(tableName, "created_at")
_scaStorageTagInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
_scaStorageTagInfo.DeletedAt = field.NewField(tableName, "deleted_at")
_scaStorageTagInfo.fillFieldMap()
return _scaStorageTagInfo
}
type scaStorageTagInfo struct {
scaStorageTagInfoDo
ALL field.Asterisk
ID field.Int64 // 主键
TagName field.String // 标签名称
TagKey field.String // 标签关键字
CreatedAt field.Time // 创建时间
UpdatedAt field.Time // 更新时间
DeletedAt field.Field // 删除时间
fieldMap map[string]field.Expr
}
func (s scaStorageTagInfo) Table(newTableName string) *scaStorageTagInfo {
s.scaStorageTagInfoDo.UseTable(newTableName)
return s.updateTableName(newTableName)
}
func (s scaStorageTagInfo) As(alias string) *scaStorageTagInfo {
s.scaStorageTagInfoDo.DO = *(s.scaStorageTagInfoDo.As(alias).(*gen.DO))
return s.updateTableName(alias)
}
func (s *scaStorageTagInfo) updateTableName(table string) *scaStorageTagInfo {
s.ALL = field.NewAsterisk(table)
s.ID = field.NewInt64(table, "id")
s.TagName = field.NewString(table, "tag_name")
s.TagKey = field.NewString(table, "tag_key")
s.CreatedAt = field.NewTime(table, "created_at")
s.UpdatedAt = field.NewTime(table, "updated_at")
s.DeletedAt = field.NewField(table, "deleted_at")
s.fillFieldMap()
return s
}
func (s *scaStorageTagInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := s.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (s *scaStorageTagInfo) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 6)
s.fieldMap["id"] = s.ID
s.fieldMap["tag_name"] = s.TagName
s.fieldMap["tag_key"] = s.TagKey
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
s.fieldMap["deleted_at"] = s.DeletedAt
}
func (s scaStorageTagInfo) clone(db *gorm.DB) scaStorageTagInfo {
s.scaStorageTagInfoDo.ReplaceConnPool(db.Statement.ConnPool)
return s
}
func (s scaStorageTagInfo) replaceDB(db *gorm.DB) scaStorageTagInfo {
s.scaStorageTagInfoDo.ReplaceDB(db)
return s
}
type scaStorageTagInfoDo struct{ gen.DO }
type IScaStorageTagInfoDo interface {
gen.SubQuery
Debug() IScaStorageTagInfoDo
WithContext(ctx context.Context) IScaStorageTagInfoDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IScaStorageTagInfoDo
WriteDB() IScaStorageTagInfoDo
As(alias string) gen.Dao
Session(config *gorm.Session) IScaStorageTagInfoDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IScaStorageTagInfoDo
Not(conds ...gen.Condition) IScaStorageTagInfoDo
Or(conds ...gen.Condition) IScaStorageTagInfoDo
Select(conds ...field.Expr) IScaStorageTagInfoDo
Where(conds ...gen.Condition) IScaStorageTagInfoDo
Order(conds ...field.Expr) IScaStorageTagInfoDo
Distinct(cols ...field.Expr) IScaStorageTagInfoDo
Omit(cols ...field.Expr) IScaStorageTagInfoDo
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
Group(cols ...field.Expr) IScaStorageTagInfoDo
Having(conds ...gen.Condition) IScaStorageTagInfoDo
Limit(limit int) IScaStorageTagInfoDo
Offset(offset int) IScaStorageTagInfoDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo
Unscoped() IScaStorageTagInfoDo
Create(values ...*model.ScaStorageTagInfo) error
CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error
Save(values ...*model.ScaStorageTagInfo) error
First() (*model.ScaStorageTagInfo, error)
Take() (*model.ScaStorageTagInfo, error)
Last() (*model.ScaStorageTagInfo, error)
Find() ([]*model.ScaStorageTagInfo, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error)
FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ScaStorageTagInfo) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo
Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo
Joins(fields ...field.RelationField) IScaStorageTagInfoDo
Preload(fields ...field.RelationField) IScaStorageTagInfoDo
FirstOrInit() (*model.ScaStorageTagInfo, error)
FirstOrCreate() (*model.ScaStorageTagInfo, error)
FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IScaStorageTagInfoDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (s scaStorageTagInfoDo) Debug() IScaStorageTagInfoDo {
return s.withDO(s.DO.Debug())
}
func (s scaStorageTagInfoDo) WithContext(ctx context.Context) IScaStorageTagInfoDo {
return s.withDO(s.DO.WithContext(ctx))
}
func (s scaStorageTagInfoDo) ReadDB() IScaStorageTagInfoDo {
return s.Clauses(dbresolver.Read)
}
func (s scaStorageTagInfoDo) WriteDB() IScaStorageTagInfoDo {
return s.Clauses(dbresolver.Write)
}
func (s scaStorageTagInfoDo) Session(config *gorm.Session) IScaStorageTagInfoDo {
return s.withDO(s.DO.Session(config))
}
func (s scaStorageTagInfoDo) Clauses(conds ...clause.Expression) IScaStorageTagInfoDo {
return s.withDO(s.DO.Clauses(conds...))
}
func (s scaStorageTagInfoDo) Returning(value interface{}, columns ...string) IScaStorageTagInfoDo {
return s.withDO(s.DO.Returning(value, columns...))
}
func (s scaStorageTagInfoDo) Not(conds ...gen.Condition) IScaStorageTagInfoDo {
return s.withDO(s.DO.Not(conds...))
}
func (s scaStorageTagInfoDo) Or(conds ...gen.Condition) IScaStorageTagInfoDo {
return s.withDO(s.DO.Or(conds...))
}
func (s scaStorageTagInfoDo) Select(conds ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Select(conds...))
}
func (s scaStorageTagInfoDo) Where(conds ...gen.Condition) IScaStorageTagInfoDo {
return s.withDO(s.DO.Where(conds...))
}
func (s scaStorageTagInfoDo) Order(conds ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Order(conds...))
}
func (s scaStorageTagInfoDo) Distinct(cols ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Distinct(cols...))
}
func (s scaStorageTagInfoDo) Omit(cols ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Omit(cols...))
}
func (s scaStorageTagInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Join(table, on...))
}
func (s scaStorageTagInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.LeftJoin(table, on...))
}
func (s scaStorageTagInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.RightJoin(table, on...))
}
func (s scaStorageTagInfoDo) Group(cols ...field.Expr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Group(cols...))
}
func (s scaStorageTagInfoDo) Having(conds ...gen.Condition) IScaStorageTagInfoDo {
return s.withDO(s.DO.Having(conds...))
}
func (s scaStorageTagInfoDo) Limit(limit int) IScaStorageTagInfoDo {
return s.withDO(s.DO.Limit(limit))
}
func (s scaStorageTagInfoDo) Offset(offset int) IScaStorageTagInfoDo {
return s.withDO(s.DO.Offset(offset))
}
func (s scaStorageTagInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo {
return s.withDO(s.DO.Scopes(funcs...))
}
func (s scaStorageTagInfoDo) Unscoped() IScaStorageTagInfoDo {
return s.withDO(s.DO.Unscoped())
}
func (s scaStorageTagInfoDo) Create(values ...*model.ScaStorageTagInfo) error {
if len(values) == 0 {
return nil
}
return s.DO.Create(values)
}
func (s scaStorageTagInfoDo) CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error {
return s.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (s scaStorageTagInfoDo) Save(values ...*model.ScaStorageTagInfo) error {
if len(values) == 0 {
return nil
}
return s.DO.Save(values)
}
func (s scaStorageTagInfoDo) First() (*model.ScaStorageTagInfo, error) {
if result, err := s.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTagInfo), nil
}
}
func (s scaStorageTagInfoDo) Take() (*model.ScaStorageTagInfo, error) {
if result, err := s.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTagInfo), nil
}
}
func (s scaStorageTagInfoDo) Last() (*model.ScaStorageTagInfo, error) {
if result, err := s.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTagInfo), nil
}
}
func (s scaStorageTagInfoDo) Find() ([]*model.ScaStorageTagInfo, error) {
result, err := s.DO.Find()
return result.([]*model.ScaStorageTagInfo), err
}
func (s scaStorageTagInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error) {
buf := make([]*model.ScaStorageTagInfo, 0, batchSize)
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (s scaStorageTagInfoDo) FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return s.DO.FindInBatches(result, batchSize, fc)
}
func (s scaStorageTagInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Attrs(attrs...))
}
func (s scaStorageTagInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
return s.withDO(s.DO.Assign(attrs...))
}
func (s scaStorageTagInfoDo) Joins(fields ...field.RelationField) IScaStorageTagInfoDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Joins(_f))
}
return &s
}
func (s scaStorageTagInfoDo) Preload(fields ...field.RelationField) IScaStorageTagInfoDo {
for _, _f := range fields {
s = *s.withDO(s.DO.Preload(_f))
}
return &s
}
func (s scaStorageTagInfoDo) FirstOrInit() (*model.ScaStorageTagInfo, error) {
if result, err := s.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTagInfo), nil
}
}
func (s scaStorageTagInfoDo) FirstOrCreate() (*model.ScaStorageTagInfo, error) {
if result, err := s.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ScaStorageTagInfo), nil
}
}
func (s scaStorageTagInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error) {
result, err = s.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = s.Offset(-1).Limit(-1).Count()
return
}
func (s scaStorageTagInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = s.Count()
if err != nil {
return
}
err = s.Offset(offset).Limit(limit).Scan(result)
return
}
func (s scaStorageTagInfoDo) Scan(result interface{}) (err error) {
return s.DO.Scan(result)
}
func (s scaStorageTagInfoDo) Delete(models ...*model.ScaStorageTagInfo) (result gen.ResultInfo, err error) {
return s.DO.Delete(models)
}
func (s *scaStorageTagInfoDo) withDO(do gen.Dao) *scaStorageTagInfoDo {
s.DO = *do.(*gen.DO)
return s
}

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaUserFollow(db *gorm.DB, opts ...gen.DOOption) scaUserFollow {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaUserLevel(db *gorm.DB, opts ...gen.DOOption) scaUserLevel {

View File

@@ -6,15 +6,17 @@ package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
)
func newScaUserMessage(db *gorm.DB, opts ...gen.DOOption) scaUserMessage {

View File

@@ -1,3 +0,0 @@
package rpc
//go:generate goctl rpc protoc file.proto --go_out=. --go-grpc_out=. --zrpc_out=. --client=true -m --style=go_zero

View File

@@ -1,99 +1,82 @@
package encrypt
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"errors"
"io"
)
// AEC/CBC/PKCS7Padding 加密解
// Encrypt 使用 AES-GCM 模式加
func Encrypt(plainText string, key string) (string, error) {
// 转换 key 为字节数组
keyBytes := []byte(key)
// Encrypt 加密
//
// plainText: 加密目标字符串
// key: 加密Key
// iv: 加密iv(AES时固定为16位)
func Encrypt(plainText string, key string, iv string) (string, error) {
data, err := aesCBCEncrypt([]byte(plainText), []byte(key), []byte(iv))
// 创建 AES block
block, err := aes.NewCipher(keyBytes)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(data), nil
// 创建 GCM 实例
aesGCM, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
// 生成随机 nonce
nonce := make([]byte, aesGCM.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
return "", err
}
// 加密明文
cipherText := aesGCM.Seal(nil, nonce, []byte(plainText), nil)
// 将 nonce 和密文拼接后进行 Base64 编码
result := append(nonce, cipherText...)
return base64.StdEncoding.EncodeToString(result), nil
}
// Decrypt 解密
//
// cipherText: 解密目标字符串
// key: 加密Key
// iv: 加密iv(AES时固定为16位)
func Decrypt(cipherText string, key string, iv string) (string, error) {
// Decrypt 使用 AES-GCM 模式解密
func Decrypt(cipherText string, key string) (string, error) {
// 转换 key 为字节数组
keyBytes := []byte(key)
// Base64 解码密文
data, err := base64.StdEncoding.DecodeString(cipherText)
if err != nil {
return "", err
}
dnData, err := aesCBCDecrypt(data, []byte(key), []byte(iv))
// 创建 AES block
block, err := aes.NewCipher(keyBytes)
if err != nil {
return "", err
}
return string(dnData), nil
}
// aesCBCEncrypt AES/CBC/PKCS7Padding 加密
func aesCBCEncrypt(plaintext []byte, key []byte, iv []byte) ([]byte, error) {
// AES
block, err := aes.NewCipher(key)
// 创建 GCM 实例
aesGCM, err := cipher.NewGCM(block)
if err != nil {
panic(err)
return "", err
}
// PKCS7 填充
plaintext = paddingPKCS7(plaintext, aes.BlockSize)
// 检查数据长度是否足够
nonceSize := aesGCM.NonceSize()
if len(data) < nonceSize {
return "", errors.New("cipherText too short")
}
// CBC 加密
mode := cipher.NewCBCEncrypter(block, iv)
mode.CryptBlocks(plaintext, plaintext)
// 分离 nonce 和密文
nonce, cipherTextBytes := data[:nonceSize], data[nonceSize:]
return plaintext, nil
}
// aesCBCDecrypt AES/CBC/PKCS7Padding 解密
func aesCBCDecrypt(ciphertext []byte, key []byte, iv []byte) ([]byte, error) {
// AES
block, err := aes.NewCipher(key)
// 解密密文
plainText, err := aesGCM.Open(nil, nonce, cipherTextBytes, nil)
if err != nil {
panic(err)
return "", err
}
if len(ciphertext)%aes.BlockSize != 0 {
panic("ciphertext is not a multiple of the block size")
}
// CBC 解密
mode := cipher.NewCBCDecrypter(block, iv)
mode.CryptBlocks(ciphertext, ciphertext)
// PKCS7 反填充
result := unPaddingPKCS7(ciphertext)
return result, nil
}
// PKCS7 填充
func paddingPKCS7(plaintext []byte, blockSize int) []byte {
paddingSize := blockSize - len(plaintext)%blockSize
paddingText := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)
return append(plaintext, paddingText...)
}
// PKCS7 反填充
func unPaddingPKCS7(s []byte) []byte {
length := len(s)
if length == 0 {
return s
}
unPadding := int(s[length-1])
return s[:(length - unPadding)]
return string(plainText), nil
}

View File

@@ -0,0 +1,26 @@
package encrypt
import (
"fmt"
"log"
"testing"
)
func TestAES(t *testing.T) {
key := "thisisasecretkey" // 16 字节密钥
plainText := "Hello, AES-GCM encryption!"
// 加密
encrypted, err := Encrypt(plainText, key)
if err != nil {
log.Fatalf("加密失败: %v", err)
}
fmt.Printf("加密结果: %s\n", encrypted)
// 解密
decrypted, err := Decrypt(encrypted, key)
if err != nil {
log.Fatalf("解密失败: %v", err)
}
fmt.Printf("解密结果: %s\n", decrypted)
}

View File

@@ -0,0 +1,36 @@
package config
import (
"errors"
)
// StorageConfig 用户存储配置结构
type StorageConfig struct {
// 必须字段
Provider string `json:"provider"` // 存储服务提供商
AccessKey string `json:"access_key"` // 访问密钥
SecretKey string `json:"secret_key"` // 安全密钥
Region string `json:"region"` // 区域
BucketName string `json:"bucket_name"` // 存储桶
// 可选字段
Endpoint string `json:"endpoint,omitempty"` // 自定义 API 终端地址
ExtraConfig map[string]string `json:"extra_config,omitempty"` // 额外的服务商特定配置
}
// Validate 校验存储配置是否有效
func (sc *StorageConfig) Validate() error {
if sc.Provider == "" {
return errors.New("provider is required")
}
if sc.AccessKey == "" || sc.SecretKey == "" {
return errors.New("access_key and secret_key are required")
}
if sc.Region == "" {
return errors.New("region is required")
}
if sc.BucketName == "" {
return errors.New("bucket_name is required")
}
return nil
}

View File

@@ -0,0 +1,9 @@
package constants
const (
ProviderAliOSS = "ali" // 阿里云 OSS
ProviderTencentCOS = "tencent" // 腾讯云 COS
ProviderQiniu = "qiniu" // 七牛云
ProviderMinio = "minio" // Minio
)

View File

@@ -0,0 +1,166 @@
package events
import (
"fmt"
"reflect"
"sync"
)
// Event 定义事件类型
type Event struct {
Name string // 事件名称
Data interface{} // 事件数据
}
// EventHandler 定义事件处理器函数类型
type EventHandler func(event Event)
// Dispatcher 接口定义事件分发器
type Dispatcher interface {
Register(eventName string, handler EventHandler) // 注册事件处理器
RegisterOnce(eventName string, handler EventHandler) // 注册一次性事件处理器
Dispatch(event Event) // 分发事件
RemoveHandler(eventName string, handler EventHandler) error // 移除特定处理器
ClearHandlers(eventName string) // 清除某事件的所有处理器
}
// defaultDispatcher 默认事件分发器实现
type defaultDispatcher struct {
handlers map[string][]EventHandler
once map[string]map[*EventHandler]struct{} // 使用指针作为 map 键
mu sync.RWMutex
}
// NewDispatcher 创建新的事件分发器
func NewDispatcher() Dispatcher {
return &defaultDispatcher{
handlers: make(map[string][]EventHandler),
once: make(map[string]map[*EventHandler]struct{}), // 修改为指针
}
}
// Register 注册事件处理器
func (d *defaultDispatcher) Register(eventName string, handler EventHandler) {
if eventName == "" || handler == nil {
return
}
d.mu.Lock()
defer d.mu.Unlock()
d.handlers[eventName] = append(d.handlers[eventName], handler)
}
// RegisterOnce 注册一次性事件处理器
func (d *defaultDispatcher) RegisterOnce(eventName string, handler EventHandler) {
if eventName == "" || handler == nil {
return
}
d.mu.Lock()
defer d.mu.Unlock()
// 如果还未初始化一次性处理器记录表,则初始化
if _, exists := d.once[eventName]; !exists {
d.once[eventName] = make(map[*EventHandler]struct{}) // 修改为指针
}
d.once[eventName][&handler] = struct{}{}
// 追加处理器
d.handlers[eventName] = append(d.handlers[eventName], handler)
}
// Dispatch 分发事件
func (d *defaultDispatcher) Dispatch(event Event) {
if event.Name == "" {
return
}
d.mu.RLock()
handlers := d.handlers[event.Name]
onceHandlers := d.once[event.Name]
d.mu.RUnlock()
if len(handlers) == 0 {
fmt.Printf("No handlers registered for event: %s\n", event.Name)
return
}
var wg sync.WaitGroup
for _, handler := range handlers {
wg.Add(1)
go func(h EventHandler) {
defer wg.Done()
h(event)
}(handler)
}
wg.Wait() // 等待所有处理器执行完毕
// 移除已执行的一次性处理器
if len(onceHandlers) > 0 {
d.mu.Lock()
defer d.mu.Unlock()
remainingHandlers := make([]EventHandler, 0, len(handlers))
for _, handler := range handlers {
if _, exists := onceHandlers[&handler]; !exists {
remainingHandlers = append(remainingHandlers, handler)
} else {
delete(d.once[event.Name], &handler)
}
}
d.handlers[event.Name] = remainingHandlers
}
}
// contains 检查事件处理器是否在一次性处理器中
func contains(onceHandlers map[*EventHandler]struct{}, handler *EventHandler) bool {
handlerAddr := reflect.ValueOf(handler).Pointer()
for onceHandler := range onceHandlers {
if reflect.ValueOf(onceHandler).Pointer() == handlerAddr {
return true
}
}
return false
}
// RemoveHandler 移除特定处理器
func (d *defaultDispatcher) RemoveHandler(eventName string, handler EventHandler) error {
if eventName == "" || handler == nil {
return fmt.Errorf("invalid event name or handler")
}
d.mu.Lock()
defer d.mu.Unlock()
handlers, exists := d.handlers[eventName]
if !exists {
return fmt.Errorf("event %s not found", eventName)
}
// 过滤掉需要移除的处理器
updatedHandlers := handlers[:0]
for _, h := range handlers {
if &h != &handler {
updatedHandlers = append(updatedHandlers, h)
}
}
d.handlers[eventName] = updatedHandlers
return nil
}
// ClearHandlers 清除某事件的所有处理器
func (d *defaultDispatcher) ClearHandlers(eventName string) {
if eventName == "" {
return
}
d.mu.Lock()
defer d.mu.Unlock()
delete(d.handlers, eventName)
delete(d.once, eventName)
}

View File

@@ -0,0 +1,13 @@
package events
import (
"fmt"
)
func LogHandler(event Event) {
fmt.Printf("[LOG] Event: %s, Data: %+v\n", event.Name, event.Data)
}
func NotifyHandler(event Event) {
fmt.Printf("[NOTIFY] User notified about event: %s\n", event.Name)
}

View File

@@ -0,0 +1,85 @@
package manager
import (
"errors"
"schisandra-album-cloud-microservices/common/storage/config"
"schisandra-album-cloud-microservices/common/storage/events"
"schisandra-album-cloud-microservices/common/storage/storage"
"sync"
"time"
)
// Factory 定义存储服务工厂函数类型
type Factory func(config *config.StorageConfig, dispatcher events.Dispatcher) (storage.Service, error)
// Manager 管理存储服务的注册、实例化和缓存
type Manager struct {
mu sync.RWMutex
registry map[string]Factory
dispatcher events.Dispatcher
cache *UserStorageCache
}
// NewStorageManager 创建新的存储管理器
func NewStorageManager(dispatcher events.Dispatcher) *Manager {
return &Manager{
registry: make(map[string]Factory),
dispatcher: dispatcher,
cache: NewUserStorageCache(),
}
}
// RegisterStorage 注册存储服务提供商
func (sm *Manager) RegisterStorage(provider string, factory Factory) error {
sm.mu.Lock()
defer sm.mu.Unlock()
if provider == "" || factory == nil {
return errors.New("invalid provider or factory")
}
if _, exists := sm.registry[provider]; exists {
return errors.New("provider already registered")
}
sm.registry[provider] = factory
return nil
}
// GetStorage 获取或创建存储服务实例
func (sm *Manager) GetStorage(key string, config *config.StorageConfig) (storage.Service, error) {
if key == "" || config.Provider == "" {
return nil, errors.New("invalid user ID or provider")
}
// 尝试从缓存获取实例
return sm.cache.GetOrCreate(key, config.Provider, func() (storage.Service, error) {
// 从注册表中查找工厂函数
sm.mu.RLock()
factory, exists := sm.registry[config.Provider]
sm.mu.RUnlock()
if !exists {
return nil, errors.New("unsupported provider: " + config.Provider)
}
// 创建新实例并返回
return factory(config, sm.dispatcher)
})
}
// ClearUnused 清理长时间未使用的缓存实例
func (sm *Manager) ClearUnused(timeout time.Duration) {
sm.cache.ClearUnused(timeout)
}
// ListProviders 列出所有注册的存储服务提供商
func (sm *Manager) ListProviders() []string {
sm.mu.RLock()
defer sm.mu.RUnlock()
providers := make([]string, 0, len(sm.registry))
for provider := range sm.registry {
providers = append(providers, provider)
}
return providers
}

View File

@@ -0,0 +1,68 @@
package manager
import (
"schisandra-album-cloud-microservices/common/storage/storage"
"sync"
"time"
)
// CacheEntry 缓存项定义
type CacheEntry struct {
Instance storage.Service
mu sync.Mutex // 确保 LastUsed 的线程安全
LastUsed time.Time
}
// UserStorageCache 管理每个用户的存储实例缓存
type UserStorageCache struct {
cache sync.Map // map[userID::providerName]*CacheEntry
}
// NewUserStorageCache 创建新的用户存储缓存
func NewUserStorageCache() *UserStorageCache {
return &UserStorageCache{}
}
// GetOrCreate 获取或创建缓存实例
func (usc *UserStorageCache) GetOrCreate(key, providerName string, factory func() (storage.Service, error)) (storage.Service, error) {
cacheKey := key + "::" + providerName
if entry, exists := usc.cache.Load(cacheKey); exists {
usc.updateLastUsed(entry.(*CacheEntry))
return entry.(*CacheEntry).Instance, nil
}
instance, err := factory()
if err != nil {
return nil, err
}
cacheEntry := &CacheEntry{
Instance: instance,
LastUsed: time.Now(),
}
usc.cache.Store(cacheKey, cacheEntry)
return instance, nil
}
// ClearUnused 清理长时间未使用的实例
func (usc *UserStorageCache) ClearUnused(timeout time.Duration) {
now := time.Now()
usc.cache.Range(func(key, value interface{}) bool {
entry := value.(*CacheEntry)
entry.mu.Lock()
defer entry.mu.Unlock()
if now.Sub(entry.LastUsed) > timeout {
usc.cache.Delete(key)
}
return true
})
}
// updateLastUsed 更新最后使用时间
func (usc *UserStorageCache) updateLastUsed(entry *CacheEntry) {
entry.mu.Lock()
defer entry.mu.Unlock()
entry.LastUsed = time.Now()
}

View File

@@ -0,0 +1,26 @@
package plugins
import (
"schisandra-album-cloud-microservices/common/storage/config"
"schisandra-album-cloud-microservices/common/storage/constants"
"schisandra-album-cloud-microservices/common/storage/events"
"schisandra-album-cloud-microservices/common/storage/manager"
"schisandra-album-cloud-microservices/common/storage/storage"
)
// pluginFactories 存储所有插件的工厂函数
var pluginFactories = map[string]manager.Factory{
constants.ProviderAliOSS: func(config *config.StorageConfig, dispatcher events.Dispatcher) (storage.Service, error) {
return storage.NewAliOSS(config, dispatcher)
},
}
// RegisterPlugins 注册所有插件
func RegisterPlugins(manager *manager.Manager) error {
for provider, factory := range pluginFactories {
if err := manager.RegisterStorage(provider, factory); err != nil {
return err
}
}
return nil
}

23
common/storage/storage.go Normal file
View File

@@ -0,0 +1,23 @@
package storage
import (
"schisandra-album-cloud-microservices/common/storage/events"
"schisandra-album-cloud-microservices/common/storage/manager"
"schisandra-album-cloud-microservices/common/storage/plugins"
)
// InitStorageManager 初始化存储管理器
func InitStorageManager() *manager.Manager {
// 初始化事件分发器
dispatcher := events.NewDispatcher()
// 初始化存储管理器
m := manager.NewStorageManager(dispatcher)
// 注册插件
if err := plugins.RegisterPlugins(m); err != nil {
panic(err)
return nil
}
return m
}

View File

@@ -0,0 +1,404 @@
package storage
import (
"bufio"
"bytes"
"context"
"fmt"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
"io"
"log"
"os"
"schisandra-album-cloud-microservices/common/storage/config"
"schisandra-album-cloud-microservices/common/storage/events"
"sync"
)
type AliOSS struct {
client *oss.Client
bucket string
dispatcher events.Dispatcher
}
// NewAliOSS 创建阿里云 OSS 实例
func NewAliOSS(config *config.StorageConfig, dispatcher events.Dispatcher) (*AliOSS, error) {
credentialsProvider := credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey)
cfg := oss.NewConfig().WithCredentialsProvider(credentialsProvider).
WithEndpoint(config.Endpoint).
WithRegion(config.Region).WithInsecureSkipVerify(false)
client := oss.NewClient(cfg)
return &AliOSS{client: client, bucket: config.BucketName, dispatcher: dispatcher}, nil
}
// CreateBucket 创建存储桶
func (a *AliOSS) CreateBucket(ctx context.Context, bucketName string) (string, error) {
request := &oss.PutBucketRequest{
Bucket: oss.Ptr(bucketName),
}
result, err := a.client.PutBucket(ctx, request)
if err != nil {
return "", fmt.Errorf("failed to put bucket, error: %v", err)
}
return result.Status, nil
}
// ListBucketsPage 列出所有存储桶
func (a *AliOSS) ListBucketsPage(ctx context.Context) ([]BucketProperties, error) {
request := &oss.ListBucketsRequest{}
// 定义一个函数来处理 PaginatorOptions
modifyOptions := func(opts *oss.PaginatorOptions) {
// 在这里可以修改opts的值比如设置每页返回的存储空间数量上限
// 示例opts.Limit = 5即每页返回5个存储空间
opts.Limit = 5
}
p := a.client.NewListBucketsPaginator(request, modifyOptions)
var buckets []BucketProperties
for p.HasNext() {
page, err := p.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list buckets, error: %v", err)
}
for _, b := range page.Buckets {
buckets = append(buckets, BucketProperties{
Name: b.Name,
CreationDate: b.CreationDate,
Location: b.Location,
Region: b.Region,
StorageClass: b.StorageClass,
ExtranetEndpoint: b.ExtranetEndpoint,
IntranetEndpoint: b.IntranetEndpoint,
ResourceGroupId: b.ResourceGroupId,
})
}
}
return buckets, nil
}
// ListBuckets 列出所有存储桶
func (a *AliOSS) ListBuckets(ctx context.Context, prefix string, maxKeys int32, marker string) ([]BucketProperties, error) {
request := &oss.ListBucketsRequest{
Prefix: oss.Ptr(prefix),
MaxKeys: maxKeys,
Marker: oss.Ptr(marker),
}
var buckets []BucketProperties
for {
lsRes, err := a.client.ListBuckets(ctx, request)
if err != nil {
return nil, fmt.Errorf("failed to list buckets, error: %v", err)
}
for _, bucket := range lsRes.Buckets {
buckets = append(buckets, BucketProperties{
Name: bucket.Name,
CreationDate: bucket.CreationDate,
Location: bucket.Location,
Region: bucket.Region,
StorageClass: bucket.StorageClass,
ExtranetEndpoint: bucket.ExtranetEndpoint,
IntranetEndpoint: bucket.IntranetEndpoint,
ResourceGroupId: bucket.ResourceGroupId,
})
}
if !lsRes.IsTruncated {
break
}
marker = *lsRes.NextMarker
}
return buckets, nil
}
// IsBucketExist 检查存储桶是否存在
func (a *AliOSS) IsBucketExist(ctx context.Context, bucketName string) (bool, error) {
exist, err := a.client.IsBucketExist(ctx, bucketName)
if err != nil {
return false, fmt.Errorf("failed to check bucket exist, error: %v", err)
}
return exist, nil
}
// GetBucketStat 获取存储桶容量
func (a *AliOSS) GetBucketStat(ctx context.Context, bucketName string) (*BucketStat, error) {
request := &oss.GetBucketStatRequest{
Bucket: oss.Ptr(bucketName),
}
result, err := a.client.GetBucketStat(ctx, request)
if err != nil {
return nil, fmt.Errorf("failed to get bucket stat, error: %v", err)
}
return &BucketStat{
Storage: result.Storage,
ObjectCount: result.ObjectCount,
LastModified: result.LastModifiedTime,
}, nil
}
// GetBucketInfo 获取存储桶信息
func (a *AliOSS) GetBucketInfo(ctx context.Context, bucketName string) (*BucketInfo, error) {
request := &oss.GetBucketInfoRequest{
Bucket: oss.Ptr(bucketName),
}
result, err := a.client.GetBucketInfo(ctx, request)
if err != nil {
return nil, fmt.Errorf("failed to get bucket info, error: %v", err)
}
return &BucketInfo{
Name: *result.BucketInfo.Name,
Location: *result.BucketInfo.Location,
CreationDate: result.BucketInfo.CreationDate,
}, nil
}
// DeleteBucket 删除存储桶
func (a *AliOSS) DeleteBucket(ctx context.Context, bucketName string) int {
request := &oss.DeleteBucketRequest{
Bucket: oss.Ptr(bucketName),
}
result, err := a.client.DeleteBucket(ctx, request)
if err != nil {
log.Fatalf("failed to delete bucket %v", err)
}
return result.StatusCode
}
// UploadFileSimple 上传文件
func (a *AliOSS) UploadFileSimple(ctx context.Context, bucketName, objectName string, fileData io.Reader, metadata map[string]string) (*PutObjectResult, error) {
putRequest := &oss.PutObjectRequest{
Bucket: oss.Ptr(bucketName), // 存储空间名称
Key: oss.Ptr(objectName), // 对象名称
StorageClass: oss.StorageClassStandard, // 指定对象的存储类型为标准存储
Acl: oss.ObjectACLPrivate, // 指定对象的访问权限为私有访问
Metadata: metadata, // 指定对象的元数据
Body: fileData, // 使用文件流
ServerSideEncryption: oss.Ptr("AES256"),
}
result, err := a.client.PutObject(ctx, putRequest)
if err != nil {
return nil, fmt.Errorf("failed to upload file, error: %v", err)
}
return &PutObjectResult{
ContentMD5: result.ContentMD5,
ETag: result.ETag,
HashCRC64: result.HashCRC64,
VersionId: result.VersionId,
CallbackResult: result.CallbackResult,
}, nil
}
// MultipartUpload 分片上传文件
func (a *AliOSS) MultipartUpload(ctx context.Context, bucketName, objectName string, filePath string) (*CompleteMultipartUploadResult, error) {
initRequest := &oss.InitiateMultipartUploadRequest{
Bucket: oss.Ptr(bucketName),
Key: oss.Ptr(objectName),
}
initResult, err := a.client.InitiateMultipartUpload(ctx, initRequest)
if err != nil {
return nil, fmt.Errorf("failed to initiate multipart upload, error: %v", err)
}
uploadId := *initResult.UploadId
var wg sync.WaitGroup
var parts []oss.UploadPart
count := 3
var mu sync.Mutex
file, err := os.Open(filePath)
if err != nil {
log.Fatalf("failed to open local file %v", err)
}
defer file.Close()
bufReader := bufio.NewReader(file)
content, err := io.ReadAll(bufReader)
if err != nil {
log.Fatalf("failed to read local file %v", err)
}
// 计算每个分片的大小
chunkSize := len(content) / count
if chunkSize == 0 {
chunkSize = 1
}
// 启动多个goroutine进行分片上传
for i := 0; i < count; i++ {
start := i * chunkSize
end := start + chunkSize
if i == count-1 {
end = len(content)
}
wg.Add(1)
go func(partNumber int, start, end int) {
defer wg.Done()
// 创建分片上传请求
partRequest := &oss.UploadPartRequest{
Bucket: oss.Ptr(bucketName), // 目标存储空间名称
Key: oss.Ptr(objectName), // 目标对象名称
PartNumber: int32(partNumber), // 分片编号
UploadId: oss.Ptr(uploadId), // 上传ID
Body: bytes.NewReader(content[start:end]), // 分片内容
}
// 发送分片上传请求
partResult, err := a.client.UploadPart(context.TODO(), partRequest)
if err != nil {
log.Fatalf("failed to upload part %d: %v", partNumber, err)
}
// 记录分片上传结果
part := oss.UploadPart{
PartNumber: partRequest.PartNumber,
ETag: partResult.ETag,
}
// 使用互斥锁保护共享数据
mu.Lock()
parts = append(parts, part)
mu.Unlock()
}(i+1, start, end)
}
// 等待所有goroutine完成
wg.Wait()
// 完成分片上传请求
request := &oss.CompleteMultipartUploadRequest{
Bucket: oss.Ptr(bucketName),
Key: oss.Ptr(objectName),
UploadId: oss.Ptr(uploadId),
CompleteMultipartUpload: &oss.CompleteMultipartUpload{
Parts: parts,
},
}
result, err := a.client.CompleteMultipartUpload(context.TODO(), request)
if err != nil {
log.Fatalf("failed to complete multipart upload %v", err)
}
return &CompleteMultipartUploadResult{
VersionId: result.VersionId, // 版本号
ETag: result.ETag, // 对象的ETag
HashCRC64: result.HashCRC64, // 对象的Hash值
EncodingType: result.EncodingType, // 对象的编码格式
Location: result.Location, // 对象的存储位置
Bucket: result.Bucket, // 对象的存储空间名称
Key: result.Key, // 对象的名称
CallbackResult: result.CallbackResult, // 回调结果
}, nil
}
// DownloadFile 下载文件
func (a *AliOSS) DownloadFile(ctx context.Context, bucketName, objectName string) ([]byte, error) {
request := &oss.GetObjectRequest{
Bucket: oss.Ptr(bucketName), // 存储空间名称
Key: oss.Ptr(objectName), // 对象名称
}
result, err := a.client.GetObject(ctx, request)
if err != nil {
return nil, fmt.Errorf("failed to download file, error: %v", err)
}
defer result.Body.Close()
data, err := io.ReadAll(result.Body)
if err != nil {
return nil, fmt.Errorf("failed to read file content, error: %v", err)
}
return data, nil
}
// IsObjectExist 检查对象是否存在
func (a *AliOSS) IsObjectExist(ctx context.Context, bucket string, objectName string) (bool, error) {
result, err := a.client.IsObjectExist(ctx, bucket, objectName)
if err != nil {
return false, fmt.Errorf("failed to check object exist, error: %v", err)
}
return result, nil
}
// ListObjects 列出存储桶中的对象
func (a *AliOSS) ListObjects(ctx context.Context, bucketName string, maxKeys int32) ([]ObjectProperties, error) {
var continueToken = ""
request := &oss.ListObjectsV2Request{
Bucket: oss.Ptr(bucketName),
ContinuationToken: &continueToken,
MaxKeys: maxKeys,
}
var objects []ObjectProperties
for {
// 执行列举所有文件的操作
lsRes, err := a.client.ListObjectsV2(ctx, request)
if err != nil {
return nil, fmt.Errorf("failed to list objects, error: %v", err)
}
// 打印列举结果
for _, object := range lsRes.Contents {
objects = append(objects, ObjectProperties{
Key: object.Key,
Type: object.Type,
Size: object.Size,
LastModified: object.LastModified,
ETag: object.ETag,
StorageClass: object.StorageClass,
RestoreInfo: object.RestoreInfo,
TransitionTime: object.TransitionTime,
})
}
// 如果还有更多对象需要列举则更新continueToken标记并继续循环
if lsRes.IsTruncated {
continueToken = *lsRes.NextContinuationToken
} else {
break // 如果没有更多对象,退出循环
}
}
return objects, nil
}
// DeleteObject 删除对象
func (a *AliOSS) DeleteObject(ctx context.Context, bucketName, objectName string) (int, error) {
request := &oss.DeleteObjectRequest{
Bucket: oss.Ptr(bucketName), // 存储空间名称
Key: oss.Ptr(objectName), // 对象名称
}
result, err := a.client.DeleteObject(ctx, request)
if err != nil {
return -1, fmt.Errorf("failed to delete object, error: %v", err)
}
return result.StatusCode, nil
}
// RenameObject 重命名对象
func (a *AliOSS) RenameObject(ctx context.Context, destBucketName, destObjectName, srcObjectName, srcBucketName string) (int, error) {
// 创建文件拷贝器
c := a.client.NewCopier() // 构建拷贝对象的请求
copyRequest := &oss.CopyObjectRequest{
Bucket: oss.Ptr(destBucketName), // 目标存储空间名称
Key: oss.Ptr(destObjectName), // 目标对象名称
SourceKey: oss.Ptr(srcObjectName), // 源对象名称
SourceBucket: oss.Ptr(srcBucketName), // 源存储空间名称
StorageClass: oss.StorageClassStandard, // 指定存储类型为归档类型
}
// 执行拷贝对象的操作
_, err := c.Copy(ctx, copyRequest)
if err != nil {
return -1, fmt.Errorf("failed to copy object, error: %v", err)
}
// 构建删除对象的请求
deleteRequest := &oss.DeleteObjectRequest{
Bucket: oss.Ptr(srcBucketName), // 存储空间名称
Key: oss.Ptr(srcObjectName), // 要删除的对象名称
}
// 执行删除对象的操作
deleteResult, err := a.client.DeleteObject(ctx, deleteRequest)
if err != nil {
return -1, fmt.Errorf("failed to delete object, error: %v", err)
}
return deleteResult.StatusCode, nil
}

View File

@@ -0,0 +1,77 @@
package storage
import (
"context"
"io"
"time"
)
type BucketProperties struct {
Name *string
Location *string
CreationDate *time.Time
StorageClass *string
ExtranetEndpoint *string
IntranetEndpoint *string
Region *string
ResourceGroupId *string
}
// 通用存储桶统计信息
type BucketStat struct {
Storage int64
ObjectCount int64
LastModified int64
}
// 通用存储桶信息
type BucketInfo struct {
Name string
Location string
CreationDate *time.Time
}
type PutObjectResult struct {
ContentMD5 *string
ETag *string
HashCRC64 *string
VersionId *string
CallbackResult map[string]any
}
type CompleteMultipartUploadResult struct {
VersionId *string
HashCRC64 *string
EncodingType *string
Location *string
Bucket *string
Key *string
ETag *string
CallbackResult map[string]any
}
type ObjectProperties struct {
Key *string
Type *string
Size int64
ETag *string
LastModified *time.Time
StorageClass *string
RestoreInfo *string
TransitionTime *time.Time
}
// Service 定义存储服务接口
type Service interface {
CreateBucket(ctx context.Context, name string) (string, error)
ListBuckets(ctx context.Context, prefix string, maxKeys int32, marker string) ([]BucketProperties, error)
ListBucketsPage(ctx context.Context) ([]BucketProperties, error)
IsBucketExist(ctx context.Context, name string) (bool, error)
GetBucketStat(ctx context.Context, name string) (*BucketStat, error)
GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error)
DeleteBucket(ctx context.Context, name string) int
UploadFileSimple(ctx context.Context, bucketName string, objectName string, fileData io.Reader, metadata map[string]string) (*PutObjectResult, error)
MultipartUpload(ctx context.Context, bucketName, objectName string, filePath string) (*CompleteMultipartUploadResult, error)
IsObjectExist(ctx context.Context, bucket string, objectName string) (bool, error)
ListObjects(ctx context.Context, bucketName string, maxKeys int32) ([]ObjectProperties, error)
DeleteObject(ctx context.Context, bucketName, objectName string) (int, error)
RenameObject(ctx context.Context, destBucketName, destObjectName, srcObjectName, srcBucketName string) (int, error)
}

View File

@@ -0,0 +1,19 @@
package storage
import (
"github.com/tencentyun/cos-go-sdk-v5"
"schisandra-album-cloud-microservices/common/storage/events"
"schisandra-album-cloud-microservices/common/storage/config"
)
type TencentCOS struct {
client *cos.Client
bucket string
dispatcher events.Dispatcher
}
// NewTencentCOS 创建tencent OSS 实例
func NewTencentCOS(config *config.StorageConfig, dispatcher events.Dispatcher) (*TencentCOS, error) {
return nil, nil
}

View File

@@ -0,0 +1,65 @@
package utils
import (
"crypto"
"encoding/hex"
"fmt"
"hash"
"io"
"os"
)
// SupportedHashFuncs 定义支持的哈希函数类型
var SupportedHashFuncs = map[string]func() hash.Hash{
"md5": crypto.MD5.New,
"sha1": crypto.SHA1.New,
"sha256": crypto.SHA256.New,
"sha512": crypto.SHA512.New,
}
// CalculateFileHash 根据指定的哈希算法计算文件的哈希值
func CalculateFileHash(filePath string, algorithm string) (string, error) {
// 获取对应的哈希函数
hashFunc, exists := SupportedHashFuncs[algorithm]
if !exists {
return "", fmt.Errorf("unsupported hash algorithm: %s", algorithm)
}
// 打开文件
file, err := os.Open(filePath)
if err != nil {
return "", fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
// 创建哈希对象
h := hashFunc()
// 计算哈希值
if _, err := io.Copy(h, file); err != nil {
return "", fmt.Errorf("failed to calculate hash: %w", err)
}
// 返回哈希值的十六进制字符串
return hex.EncodeToString(h.Sum(nil)), nil
}
// CalculateStreamHash 计算输入流的哈希值
func CalculateStreamHash(reader io.Reader, algorithm string) (string, error) {
// 获取对应的哈希函数
hashFunc, exists := SupportedHashFuncs[algorithm]
if !exists {
return "", fmt.Errorf("unsupported hash algorithm: %s", algorithm)
}
// 创建哈希对象
h := hashFunc()
// 从输入流计算哈希值
if _, err := io.Copy(h, reader); err != nil {
return "", fmt.Errorf("failed to calculate hash: %w", err)
}
// 返回哈希值的十六进制字符串
return hex.EncodeToString(h.Sum(nil)), nil
}

63
go.mod
View File

@@ -3,16 +3,17 @@ module schisandra-album-cloud-microservices
go 1.23.4
require (
github.com/ArtisanCloud/PowerLibs/v3 v3.2.6
github.com/ArtisanCloud/PowerWeChat/v3 v3.2.58
github.com/ArtisanCloud/PowerLibs/v3 v3.3.1
github.com/ArtisanCloud/PowerWeChat/v3 v3.3.3
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.0
github.com/asjdf/gorm-cache v1.2.3
github.com/casbin/casbin/v2 v2.102.0
github.com/casbin/casbin/v2 v2.103.0
github.com/casbin/gorm-adapter/v3 v3.32.0
github.com/ccpwcn/kgo v1.2.3
github.com/ccpwcn/kgo v1.2.8
github.com/chenmingyong0423/go-mongox/v2 v2.0.0
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0
github.com/lionsoul2014/ip2region/binding/golang v0.0.0-20240510055607-89e20ab7b6c6
github.com/lionsoul2014/ip2region/binding/golang v0.0.0-20241220152942-06eb5c6e8230
github.com/lxzan/gws v1.8.8
github.com/microcosm-cc/bluemonday v1.0.27
github.com/mssola/useragent v1.0.0
@@ -20,15 +21,16 @@ require (
github.com/pelletier/go-toml/v2 v2.2.3
github.com/pkg6/go-sms v0.1.2
github.com/redis/go-redis/v9 v9.7.0
github.com/tencentyun/cos-go-sdk-v5 v0.7.60
github.com/wenlng/go-captcha-assets v1.0.1
github.com/wenlng/go-captcha/v2 v2.0.2
github.com/yitter/idgenerator-go v1.3.3
github.com/zeromicro/go-zero v1.7.4
github.com/zeromicro/go-zero v1.7.6
github.com/zmexing/go-sensitive-word v1.3.0
go.mongodb.org/mongo-driver/v2 v2.0.0
golang.org/x/crypto v0.31.0
golang.org/x/crypto v0.32.0
golang.org/x/text v0.21.0
google.golang.org/grpc v1.69.2
google.golang.org/grpc v1.69.4
gorm.io/driver/mysql v1.5.7
gorm.io/gen v0.3.26
gorm.io/gorm v1.25.12
@@ -43,40 +45,42 @@ require (
github.com/aymerick/douceur v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect
github.com/casbin/govaluate v1.2.0 // indirect
github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect
github.com/casbin/govaluate v1.3.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/clbanning/mxj/v2 v2.7.0 // indirect
github.com/cloudflare/circl v1.5.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dolthub/maphash v0.1.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.7 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/glebarez/go-sqlite v1.22.0 // indirect
github.com/glebarez/sqlite v1.11.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.23.0 // indirect
github.com/go-playground/validator/v10 v10.24.0 // indirect
github.com/go-sql-driver/mysql v1.8.1 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
github.com/golang-sql/sqlexp v0.1.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imroc/req/v3 v3.49.0 // indirect
github.com/imroc/req/v3 v3.49.1 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.7.1 // indirect
github.com/jackc/pgx/v5 v5.7.2 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
@@ -85,14 +89,16 @@ require (
github.com/klauspost/compress v1.17.11 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/microsoft/go-mssqldb v1.8.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mozillazg/go-httpheader v0.4.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/onsi/ginkgo/v2 v2.22.1 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/orcaman/concurrent-map/v2 v2.0.1 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
@@ -122,29 +128,30 @@ require (
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
golang.org/x/image v0.23.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/tools v0.28.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb // indirect
google.golang.org/protobuf v1.36.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/time v0.8.0 // indirect
golang.org/x/tools v0.29.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 // indirect
google.golang.org/protobuf v1.36.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gorm.io/datatypes v1.2.5 // indirect
gorm.io/driver/postgres v1.5.11 // indirect
gorm.io/driver/sqlserver v1.5.4 // indirect
gorm.io/hints v1.1.2 // indirect
modernc.org/libc v1.61.4 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.8.0 // indirect
modernc.org/sqlite v1.34.3 // indirect
modernc.org/libc v1.61.7 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.8.1 // indirect
modernc.org/sqlite v1.34.4 // indirect
)

184
go.sum
View File

@@ -1,13 +1,11 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/ArtisanCloud/PowerLibs/v3 v3.2.6 h1:xNDXBJ1VNYAEgs4UG/lSygzU66/XG3mTA7mm/qE//NY=
github.com/ArtisanCloud/PowerLibs/v3 v3.2.6/go.mod h1:xFGsskCnzAu+6rFEJbGVAlwhrwZPXAny6m7j71S/B5k=
github.com/ArtisanCloud/PowerLibs/v3 v3.3.1 h1:SsxBygxATQpFS92pKuVtGrgdawwsscj9Y0M0jks9rTo=
github.com/ArtisanCloud/PowerLibs/v3 v3.3.1/go.mod h1:xFGsskCnzAu+6rFEJbGVAlwhrwZPXAny6m7j71S/B5k=
github.com/ArtisanCloud/PowerSocialite/v3 v3.0.7 h1:P+erNlErr+X2v7Et+yTWaTfIRhw+HfpAPdvNIEwk9Gw=
github.com/ArtisanCloud/PowerSocialite/v3 v3.0.7/go.mod h1:VZQNCvcK/rldF3QaExiSl1gJEAkyc5/I8RLOd3WFZq4=
github.com/ArtisanCloud/PowerWeChat/v3 v3.2.57 h1:6IEFuaPJdB2HVGePyInVN9KuSiXk83Kfls4rYZd0mXQ=
github.com/ArtisanCloud/PowerWeChat/v3 v3.2.57/go.mod h1:D2cB1wtwC1YgzYT1Ni8NWS5wJCm5n1T18TybXkFlwvo=
github.com/ArtisanCloud/PowerWeChat/v3 v3.2.58 h1:EkU5bWuy4irw23EooJliTjA/ucSnLaZIB9Ab3DyVweo=
github.com/ArtisanCloud/PowerWeChat/v3 v3.2.58/go.mod h1:D2cB1wtwC1YgzYT1Ni8NWS5wJCm5n1T18TybXkFlwvo=
github.com/ArtisanCloud/PowerWeChat/v3 v3.3.3 h1:DoXg2PcUJtVhC9Ly28C1Xhmug2Si023VbTBRoY49xPY=
github.com/ArtisanCloud/PowerWeChat/v3 v3.3.3/go.mod h1:J9Soww8NJcB5DVAJSrpjEpAj0y7bdwBD1wdpR8xPeqk=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
@@ -31,6 +29,9 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mx
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.0 h1:gUWBCekWIFWYK7jXhRvPHIa7mRhJih2BGPjzvzodO18=
github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.0/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M=
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/asjdf/gorm-cache v1.2.3 h1:h7GAMITzk6DdpOlAGlF0dUt25N8fK4R6zeQyO0pMqlA=
@@ -42,26 +43,28 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ=
github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/casbin/casbin/v2 v2.102.0 h1:weq9iSThUSL21SH3VrwoKa2DgRsaYMfjRNX/yOU3Foo=
github.com/casbin/casbin/v2 v2.102.0/go.mod h1:LO7YPez4dX3LgoTCqSQAleQDo0S0BeZBDxYnPUl95Ng=
github.com/casbin/casbin/v2 v2.103.0 h1:dHElatNXNrr8XcseUov0ZSiWjauwmZZE6YMV3eU1yic=
github.com/casbin/casbin/v2 v2.103.0/go.mod h1:Ee33aqGrmES+GNL17L0h9X28wXuo829wnNUnS0edAco=
github.com/casbin/gorm-adapter/v3 v3.32.0 h1:Au+IOILBIE9clox5BJhI2nA3p9t7Ep1ePlupdGbGfus=
github.com/casbin/gorm-adapter/v3 v3.32.0/go.mod h1:Zre/H8p17mpv5U3EaWgPoxLILLdXO3gHW5aoQQpUDZI=
github.com/casbin/govaluate v1.2.0 h1:wXCXFmqyY+1RwiKfYo3jMKyrtZmOL3kHwaqDyCPOYak=
github.com/casbin/govaluate v1.2.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A=
github.com/ccpwcn/kgo v1.2.3 h1:lcCO26EH/FcAhjuVgEkkJ2YcEOQd5TiFYAak9qwYknI=
github.com/ccpwcn/kgo v1.2.3/go.mod h1:y6G244zGfW95c6aCcw00TdZR6JUfBCmQ4acJcFdaktA=
github.com/casbin/govaluate v1.3.0 h1:VA0eSY0M2lA86dYd5kPPuNZMUD9QkWnOCnavGrw9myc=
github.com/casbin/govaluate v1.3.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A=
github.com/ccpwcn/kgo v1.2.8 h1:BP7mHtv5qsNAMJ0vANlbPgN0ou8RAvcnC6g7jqZy5uQ=
github.com/ccpwcn/kgo v1.2.8/go.mod h1:y6G244zGfW95c6aCcw00TdZR6JUfBCmQ4acJcFdaktA=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenmingyong0423/go-mongox/v2 v2.0.0 h1:76n276epp1cfKSbCwQRDGcpzkMTlzIXWM2dZEbbfyG8=
github.com/chenmingyong0423/go-mongox/v2 v2.0.0/go.mod h1:bGxuW2xChkIq6tX3tphYYjHCZOkaoCOowJKIO2Q26nQ=
github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME=
github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys=
@@ -79,8 +82,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA=
github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
@@ -97,12 +100,11 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg=
github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
@@ -124,11 +126,16 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -137,8 +144,6 @@ github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
@@ -150,14 +155,14 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/imroc/req/v3 v3.49.0 h1:5Rac2qvz7Dq0E3PeBo/c2szV3hagPQIGLoHtfBmYhu4=
github.com/imroc/req/v3 v3.49.0/go.mod h1:XZf4t94DNJzcA0UOBlA68hmSrWsAyvN407ADdH4mzCA=
github.com/imroc/req/v3 v3.49.1 h1:Nvwo02riiPEzh74ozFHeEJrtjakFxnoWNR3YZYuQm9U=
github.com/imroc/req/v3 v3.49.1/go.mod h1:tsOk8K7zI6cU4xu/VWCZVtq9Djw9IWm4MslKzme5woU=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI=
github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
@@ -186,13 +191,12 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lionsoul2014/ip2region/binding/golang v0.0.0-20240510055607-89e20ab7b6c6 h1:YeIGErDiB/fhmNsJy0cfjoT8XnRNT9hb19xZ4MvWQDU=
github.com/lionsoul2014/ip2region/binding/golang v0.0.0-20240510055607-89e20ab7b6c6/go.mod h1:C5LA5UO2ZXJrLaPLYtE1wUJMiyd/nwWaCO5cw/2pSHs=
github.com/lionsoul2014/ip2region/binding/golang v0.0.0-20241220152942-06eb5c6e8230 h1:B0oaMTAQKDZd8cwYT0qsAI7+c3KbFeBNA8GhgoBMXWw=
github.com/lionsoul2014/ip2region/binding/golang v0.0.0-20241220152942-06eb5c6e8230/go.mod h1:C5LA5UO2ZXJrLaPLYtE1wUJMiyd/nwWaCO5cw/2pSHs=
github.com/lxzan/gws v1.8.8 h1:st193ZG8qN8sSw8/g/UituFhs7etmKzS7jUqhijg5wM=
github.com/lxzan/gws v1.8.8/go.mod h1:FcGeRMB7HwGuTvMLR24ku0Zx0p6RXqeKASeMc4VYgi4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
@@ -203,6 +207,9 @@ github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB
github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
github.com/microsoft/go-mssqldb v1.8.0 h1:7cyZ/AT7ycDsEoWPIXibd+aVKFtteUNhDGf3aobP+tw=
github.com/microsoft/go-mssqldb v1.8.0/go.mod h1:6znkekS3T2vp0waiMhen4GPU1BiAsrP+iXHcE7a7rFo=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -210,6 +217,9 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60=
github.com/mozillazg/go-httpheader v0.4.0 h1:aBn6aRXtFzyDLZ4VIRLsZbbJloagQfMnCiYgOq6hK4w=
github.com/mozillazg/go-httpheader v0.4.0/go.mod h1:PuT8h0pw6efvp8ZeUec1Rs7dwjK08bt6gKSReGMqtdA=
github.com/mssola/useragent v1.0.0 h1:WRlDpXyxHDNfvZaPEut5Biveq86Ze4o4EMffyMxmH5o=
github.com/mssola/useragent v1.0.0/go.mod h1:hz9Cqz4RXusgg1EdI4Al0INR62kP7aPSRNHnpU+b85Y=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -218,13 +228,10 @@ github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdh
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/nicksnyder/go-i18n/v2 v2.4.1 h1:zwzjtX4uYyiaU02K5Ia3zSkpJZrByARkRB4V3YPrr0g=
github.com/nicksnyder/go-i18n/v2 v2.4.1/go.mod h1:++Pl70FR6Cki7hdzZRnEEqdc2dJt+SAGotyFg/SvZMk=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
@@ -282,6 +289,10 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
github.com/tencentyun/cos-go-sdk-v5 v0.7.60 h1:/e/tmvRmfKexr/QQIBzWhOkZWsmY3EK72NrI6G/Tv0o=
github.com/tencentyun/cos-go-sdk-v5 v0.7.60/go.mod h1:8+hG+mQMuRP/OIS9d83syAvXvrMj9HhkND6Q1fLghw0=
github.com/wenlng/go-captcha-assets v1.0.1 h1:AdjRFMKmadPRWRTv0XEYfjDvcaayZ2yExITDvlK/7bk=
github.com/wenlng/go-captcha-assets v1.0.1/go.mod h1:yQqc7rRbxgLCg+tWtVp+7Y317D1wIZDan/yIwt8wSac=
github.com/wenlng/go-captcha/v2 v2.0.2 h1:8twz6pI6xZwPvEGFezoFX395oFso1MuOlJt/tLiv7pk=
@@ -299,11 +310,10 @@ github.com/yitter/idgenerator-go v1.3.3/go.mod h1:VVjbqFjGUsIkaXVkXEdmx1LiXUL3K1
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zeromicro/go-zero v1.7.4 h1:lyIUsqbpVRzM4NmXu5pRM3XrdRdUuWOkQmHiNmJF0VU=
github.com/zeromicro/go-zero v1.7.4/go.mod h1:jmv4hTdUBkDn6kxgI+WrKQw0q6LKxDElGPMfCLOeeEY=
github.com/zeromicro/go-zero v1.7.6 h1:SArK4xecdrpVY3ZFJcbc0IZCx+NuWyHNjCv9f1+Gwrc=
github.com/zeromicro/go-zero v1.7.6/go.mod h1:SmGykRm5e0Z4CGNj+GaSKDffaHzQV56fel0FkymTLlE=
github.com/zmexing/go-sensitive-word v1.3.0 h1:dB9S9kNklksOODGLLAov0RaVCwC2w9Kwxz6NZMdM6rk=
github.com/zmexing/go-sensitive-word v1.3.0/go.mod h1:wkNIpkq1iPOe3l7l83zvnnV5mm20jfj2x8V8kjOTsUM=
go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM=
go.mongodb.org/mongo-driver/v2 v2.0.0 h1:Jfd7XpdZa9yk3eY774bO7SWVb30noLSirL9nKTpavhI=
go.mongodb.org/mongo-driver/v2 v2.0.0/go.mod h1:nSjmNq4JUstE8IRZKTktLgMHM4F1fccL6HGX1yh+8RA=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
@@ -330,8 +340,8 @@ go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4Jjx
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -351,12 +361,10 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4=
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo=
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/image v0.16.0/go.mod h1:ugSZItdV4nOxyqp56HmXwH0Ry0nBCpjnZdpDaIHdoPs=
golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68=
golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY=
@@ -378,10 +386,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -393,7 +399,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -403,8 +408,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -436,23 +441,18 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb h1:B7GIB7sr443wZ/EAEl7VZjmh1V6qzkt5V+RYcUYtS1U=
google.golang.org/genproto/googleapis/api v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:E5//3O5ZIG2l71Xnt+P/CYUY8Bxs8E7WMoZ9tlcMbAY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb h1:3oy2tynMOP1QbTC0MsNNAV+Se8M2Bd0A5+x1QHyw+pI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241219192143-6b3ec007d9bb/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24=
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 h1:3UsHvIr4Wc2aW4brOaSCmcxh9ksica6fHEr8P1XhkYw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -492,29 +492,27 @@ gorm.io/plugin/optimisticlock v1.1.3 h1:uFK8zz+Ln6ju3vGkTd1LY3xR2VBmMxjdU12KBb58
gorm.io/plugin/optimisticlock v1.1.3/go.mod h1:S+MH7qnHGQHxDBc9phjgN+DpNPn/qESd1q69fA3dtkg=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
modernc.org/cc/v4 v4.23.1 h1:WqJoPL3x4cUufQVHkXpXX7ThFJ1C4ik80i2eXEXbhD8=
modernc.org/cc/v4 v4.23.1/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
modernc.org/ccgo/v4 v4.23.1 h1:N49a7JiWGWV7lkPE4yYcvjkBGZQi93/JabRYjdWmJXc=
modernc.org/ccgo/v4 v4.23.1/go.mod h1:JoIUegEIfutvoWV/BBfDFpPpfR2nc3U0jKucGcbmwDU=
modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.23.10 h1:DnDZT/H6TtoJvQmVf7d8W+lVqEZpIJY/+0ENFh1LIHE=
modernc.org/ccgo/v4 v4.23.10/go.mod h1:vdN4h2WR5aEoNondUx26K7G8X+nuBscYnAEWSRmN2/0=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.5.0 h1:bJ9ChznK1L1mUtAQtxi0wi5AtAs5jQuw4PrPHO5pb6M=
modernc.org/gc/v2 v2.5.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
modernc.org/libc v1.61.4 h1:wVyqEx6tlltte9lPTjq0kDAdtdM9c4JH8rU6M1ZVawA=
modernc.org/libc v1.61.4/go.mod h1:VfXVuM/Shh5XsMNrh3C6OkfL78G3loa4ZC/Ljv9k7xc=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.34.2 h1:J9n76TPsfYYkFkZ9Uy1QphILYifiVEwwOT7yP5b++2Y=
modernc.org/sqlite v1.34.2/go.mod h1:dnR723UrTtjKpoHCAMN0Q/gZ9MT4r+iRvIBb9umWFkU=
modernc.org/sqlite v1.34.3 h1:494MIwJKBLd0tErBYkRar2HvEpy04Bl0ykPEm4XLhbo=
modernc.org/sqlite v1.34.3/go.mod h1:dnR723UrTtjKpoHCAMN0Q/gZ9MT4r+iRvIBb9umWFkU=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/gc/v2 v2.6.1 h1:+Qf6xdG8l7B27TQ8D8lw/iFMUj1RXRBOuMUWziJOsk8=
modernc.org/gc/v2 v2.6.1/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.61.7 h1:exz8rasFniviSgh3dH7QBnQHqYh9lolA5hVYfsiwkfo=
modernc.org/libc v1.61.7/go.mod h1:xspSrXRNVSfWfcfqgvZDVe/Hw5kv4FVC6IRfoms5v/0=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.8.1 h1:HS1HRg1jEohnuONobEq2WrLEhLyw8+J42yLFTnllm2A=
modernc.org/memory v1.8.1/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.34.4 h1:sjdARozcL5KJBvYQvLlZEmctRgW9xqIZc2ncN7PU0P8=
modernc.org/sqlite v1.34.4/go.mod h1:3QQFCG2SEMtc2nv+Wq4cQCH7Hjcg+p/RMlS1XK+zwbk=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=