✨ encapsulate object storage service operations
This commit is contained in:
@@ -25,11 +25,11 @@ var (
|
||||
ScaAuthUserSocial *scaAuthUserSocial
|
||||
ScaCommentLike *scaCommentLike
|
||||
ScaCommentReply *scaCommentReply
|
||||
ScaFileFolder *scaFileFolder
|
||||
ScaFileInfo *scaFileInfo
|
||||
ScaFileRecycle *scaFileRecycle
|
||||
ScaFileType *scaFileType
|
||||
ScaMessageReport *scaMessageReport
|
||||
ScaStorageConfig *scaStorageConfig
|
||||
ScaStorageInfo *scaStorageInfo
|
||||
ScaStorageTag *scaStorageTag
|
||||
ScaStorageTagInfo *scaStorageTagInfo
|
||||
ScaUserFollow *scaUserFollow
|
||||
ScaUserLevel *scaUserLevel
|
||||
ScaUserMessage *scaUserMessage
|
||||
@@ -45,11 +45,11 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) {
|
||||
ScaAuthUserSocial = &Q.ScaAuthUserSocial
|
||||
ScaCommentLike = &Q.ScaCommentLike
|
||||
ScaCommentReply = &Q.ScaCommentReply
|
||||
ScaFileFolder = &Q.ScaFileFolder
|
||||
ScaFileInfo = &Q.ScaFileInfo
|
||||
ScaFileRecycle = &Q.ScaFileRecycle
|
||||
ScaFileType = &Q.ScaFileType
|
||||
ScaMessageReport = &Q.ScaMessageReport
|
||||
ScaStorageConfig = &Q.ScaStorageConfig
|
||||
ScaStorageInfo = &Q.ScaStorageInfo
|
||||
ScaStorageTag = &Q.ScaStorageTag
|
||||
ScaStorageTagInfo = &Q.ScaStorageTagInfo
|
||||
ScaUserFollow = &Q.ScaUserFollow
|
||||
ScaUserLevel = &Q.ScaUserLevel
|
||||
ScaUserMessage = &Q.ScaUserMessage
|
||||
@@ -66,11 +66,11 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query {
|
||||
ScaAuthUserSocial: newScaAuthUserSocial(db, opts...),
|
||||
ScaCommentLike: newScaCommentLike(db, opts...),
|
||||
ScaCommentReply: newScaCommentReply(db, opts...),
|
||||
ScaFileFolder: newScaFileFolder(db, opts...),
|
||||
ScaFileInfo: newScaFileInfo(db, opts...),
|
||||
ScaFileRecycle: newScaFileRecycle(db, opts...),
|
||||
ScaFileType: newScaFileType(db, opts...),
|
||||
ScaMessageReport: newScaMessageReport(db, opts...),
|
||||
ScaStorageConfig: newScaStorageConfig(db, opts...),
|
||||
ScaStorageInfo: newScaStorageInfo(db, opts...),
|
||||
ScaStorageTag: newScaStorageTag(db, opts...),
|
||||
ScaStorageTagInfo: newScaStorageTagInfo(db, opts...),
|
||||
ScaUserFollow: newScaUserFollow(db, opts...),
|
||||
ScaUserLevel: newScaUserLevel(db, opts...),
|
||||
ScaUserMessage: newScaUserMessage(db, opts...),
|
||||
@@ -88,11 +88,11 @@ type Query struct {
|
||||
ScaAuthUserSocial scaAuthUserSocial
|
||||
ScaCommentLike scaCommentLike
|
||||
ScaCommentReply scaCommentReply
|
||||
ScaFileFolder scaFileFolder
|
||||
ScaFileInfo scaFileInfo
|
||||
ScaFileRecycle scaFileRecycle
|
||||
ScaFileType scaFileType
|
||||
ScaMessageReport scaMessageReport
|
||||
ScaStorageConfig scaStorageConfig
|
||||
ScaStorageInfo scaStorageInfo
|
||||
ScaStorageTag scaStorageTag
|
||||
ScaStorageTagInfo scaStorageTagInfo
|
||||
ScaUserFollow scaUserFollow
|
||||
ScaUserLevel scaUserLevel
|
||||
ScaUserMessage scaUserMessage
|
||||
@@ -111,11 +111,11 @@ func (q *Query) clone(db *gorm.DB) *Query {
|
||||
ScaAuthUserSocial: q.ScaAuthUserSocial.clone(db),
|
||||
ScaCommentLike: q.ScaCommentLike.clone(db),
|
||||
ScaCommentReply: q.ScaCommentReply.clone(db),
|
||||
ScaFileFolder: q.ScaFileFolder.clone(db),
|
||||
ScaFileInfo: q.ScaFileInfo.clone(db),
|
||||
ScaFileRecycle: q.ScaFileRecycle.clone(db),
|
||||
ScaFileType: q.ScaFileType.clone(db),
|
||||
ScaMessageReport: q.ScaMessageReport.clone(db),
|
||||
ScaStorageConfig: q.ScaStorageConfig.clone(db),
|
||||
ScaStorageInfo: q.ScaStorageInfo.clone(db),
|
||||
ScaStorageTag: q.ScaStorageTag.clone(db),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.clone(db),
|
||||
ScaUserFollow: q.ScaUserFollow.clone(db),
|
||||
ScaUserLevel: q.ScaUserLevel.clone(db),
|
||||
ScaUserMessage: q.ScaUserMessage.clone(db),
|
||||
@@ -141,11 +141,11 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query {
|
||||
ScaAuthUserSocial: q.ScaAuthUserSocial.replaceDB(db),
|
||||
ScaCommentLike: q.ScaCommentLike.replaceDB(db),
|
||||
ScaCommentReply: q.ScaCommentReply.replaceDB(db),
|
||||
ScaFileFolder: q.ScaFileFolder.replaceDB(db),
|
||||
ScaFileInfo: q.ScaFileInfo.replaceDB(db),
|
||||
ScaFileRecycle: q.ScaFileRecycle.replaceDB(db),
|
||||
ScaFileType: q.ScaFileType.replaceDB(db),
|
||||
ScaMessageReport: q.ScaMessageReport.replaceDB(db),
|
||||
ScaStorageConfig: q.ScaStorageConfig.replaceDB(db),
|
||||
ScaStorageInfo: q.ScaStorageInfo.replaceDB(db),
|
||||
ScaStorageTag: q.ScaStorageTag.replaceDB(db),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.replaceDB(db),
|
||||
ScaUserFollow: q.ScaUserFollow.replaceDB(db),
|
||||
ScaUserLevel: q.ScaUserLevel.replaceDB(db),
|
||||
ScaUserMessage: q.ScaUserMessage.replaceDB(db),
|
||||
@@ -161,11 +161,11 @@ type queryCtx struct {
|
||||
ScaAuthUserSocial IScaAuthUserSocialDo
|
||||
ScaCommentLike IScaCommentLikeDo
|
||||
ScaCommentReply IScaCommentReplyDo
|
||||
ScaFileFolder IScaFileFolderDo
|
||||
ScaFileInfo IScaFileInfoDo
|
||||
ScaFileRecycle IScaFileRecycleDo
|
||||
ScaFileType IScaFileTypeDo
|
||||
ScaMessageReport IScaMessageReportDo
|
||||
ScaStorageConfig IScaStorageConfigDo
|
||||
ScaStorageInfo IScaStorageInfoDo
|
||||
ScaStorageTag IScaStorageTagDo
|
||||
ScaStorageTagInfo IScaStorageTagInfoDo
|
||||
ScaUserFollow IScaUserFollowDo
|
||||
ScaUserLevel IScaUserLevelDo
|
||||
ScaUserMessage IScaUserMessageDo
|
||||
@@ -181,11 +181,11 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx {
|
||||
ScaAuthUserSocial: q.ScaAuthUserSocial.WithContext(ctx),
|
||||
ScaCommentLike: q.ScaCommentLike.WithContext(ctx),
|
||||
ScaCommentReply: q.ScaCommentReply.WithContext(ctx),
|
||||
ScaFileFolder: q.ScaFileFolder.WithContext(ctx),
|
||||
ScaFileInfo: q.ScaFileInfo.WithContext(ctx),
|
||||
ScaFileRecycle: q.ScaFileRecycle.WithContext(ctx),
|
||||
ScaFileType: q.ScaFileType.WithContext(ctx),
|
||||
ScaMessageReport: q.ScaMessageReport.WithContext(ctx),
|
||||
ScaStorageConfig: q.ScaStorageConfig.WithContext(ctx),
|
||||
ScaStorageInfo: q.ScaStorageInfo.WithContext(ctx),
|
||||
ScaStorageTag: q.ScaStorageTag.WithContext(ctx),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.WithContext(ctx),
|
||||
ScaUserFollow: q.ScaUserFollow.WithContext(ctx),
|
||||
ScaUserLevel: q.ScaUserLevel.WithContext(ctx),
|
||||
ScaUserMessage: q.ScaUserMessage.WithContext(ctx),
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthMenu(db *gorm.DB, opts ...gen.DOOption) scaAuthMenu {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthPermissionRule(db *gorm.DB, opts ...gen.DOOption) scaAuthPermissionRule {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthRole(db *gorm.DB, opts ...gen.DOOption) scaAuthRole {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthUser(db *gorm.DB, opts ...gen.DOOption) scaAuthUser {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthUserDevice(db *gorm.DB, opts ...gen.DOOption) scaAuthUserDevice {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthUserSocial(db *gorm.DB, opts ...gen.DOOption) scaAuthUserSocial {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaCommentLike(db *gorm.DB, opts ...gen.DOOption) scaCommentLike {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaCommentReply(db *gorm.DB, opts ...gen.DOOption) scaCommentReply {
|
||||
|
@@ -1,410 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileFolder(db *gorm.DB, opts ...gen.DOOption) scaFileFolder {
|
||||
_scaFileFolder := scaFileFolder{}
|
||||
|
||||
_scaFileFolder.scaFileFolderDo.UseDB(db, opts...)
|
||||
_scaFileFolder.scaFileFolderDo.UseModel(&model.ScaFileFolder{})
|
||||
|
||||
tableName := _scaFileFolder.scaFileFolderDo.TableName()
|
||||
_scaFileFolder.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileFolder.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileFolder.FolderName = field.NewString(tableName, "folder_name")
|
||||
_scaFileFolder.ParentFolderID = field.NewInt64(tableName, "parent_folder_id")
|
||||
_scaFileFolder.FolderAddr = field.NewString(tableName, "folder_addr")
|
||||
_scaFileFolder.UserID = field.NewString(tableName, "user_id")
|
||||
_scaFileFolder.FolderSource = field.NewInt64(tableName, "folder_source")
|
||||
_scaFileFolder.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaFileFolder.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaFileFolder.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaFileFolder.fillFieldMap()
|
||||
|
||||
return _scaFileFolder
|
||||
}
|
||||
|
||||
type scaFileFolder struct {
|
||||
scaFileFolderDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FolderName field.String // 文件夹名称
|
||||
ParentFolderID field.Int64 // 父文件夹编号
|
||||
FolderAddr field.String // 文件夹名称
|
||||
UserID field.String // 用户编号
|
||||
FolderSource field.Int64 // 文件夹来源 0相册 1 评论
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileFolder) Table(newTableName string) *scaFileFolder {
|
||||
s.scaFileFolderDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileFolder) As(alias string) *scaFileFolder {
|
||||
s.scaFileFolderDo.DO = *(s.scaFileFolderDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileFolder) updateTableName(table string) *scaFileFolder {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FolderName = field.NewString(table, "folder_name")
|
||||
s.ParentFolderID = field.NewInt64(table, "parent_folder_id")
|
||||
s.FolderAddr = field.NewString(table, "folder_addr")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.FolderSource = field.NewInt64(table, "folder_source")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileFolder) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileFolder) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 9)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["folder_name"] = s.FolderName
|
||||
s.fieldMap["parent_folder_id"] = s.ParentFolderID
|
||||
s.fieldMap["folder_addr"] = s.FolderAddr
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["folder_source"] = s.FolderSource
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaFileFolder) clone(db *gorm.DB) scaFileFolder {
|
||||
s.scaFileFolderDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileFolder) replaceDB(db *gorm.DB) scaFileFolder {
|
||||
s.scaFileFolderDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileFolderDo struct{ gen.DO }
|
||||
|
||||
type IScaFileFolderDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileFolderDo
|
||||
WithContext(ctx context.Context) IScaFileFolderDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileFolderDo
|
||||
WriteDB() IScaFileFolderDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileFolderDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileFolderDo
|
||||
Not(conds ...gen.Condition) IScaFileFolderDo
|
||||
Or(conds ...gen.Condition) IScaFileFolderDo
|
||||
Select(conds ...field.Expr) IScaFileFolderDo
|
||||
Where(conds ...gen.Condition) IScaFileFolderDo
|
||||
Order(conds ...field.Expr) IScaFileFolderDo
|
||||
Distinct(cols ...field.Expr) IScaFileFolderDo
|
||||
Omit(cols ...field.Expr) IScaFileFolderDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
|
||||
Group(cols ...field.Expr) IScaFileFolderDo
|
||||
Having(conds ...gen.Condition) IScaFileFolderDo
|
||||
Limit(limit int) IScaFileFolderDo
|
||||
Offset(offset int) IScaFileFolderDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileFolderDo
|
||||
Unscoped() IScaFileFolderDo
|
||||
Create(values ...*model.ScaFileFolder) error
|
||||
CreateInBatches(values []*model.ScaFileFolder, batchSize int) error
|
||||
Save(values ...*model.ScaFileFolder) error
|
||||
First() (*model.ScaFileFolder, error)
|
||||
Take() (*model.ScaFileFolder, error)
|
||||
Last() (*model.ScaFileFolder, error)
|
||||
Find() ([]*model.ScaFileFolder, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileFolder, err error)
|
||||
FindInBatches(result *[]*model.ScaFileFolder, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileFolder) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileFolderDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileFolderDo
|
||||
Joins(fields ...field.RelationField) IScaFileFolderDo
|
||||
Preload(fields ...field.RelationField) IScaFileFolderDo
|
||||
FirstOrInit() (*model.ScaFileFolder, error)
|
||||
FirstOrCreate() (*model.ScaFileFolder, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileFolder, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileFolderDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Debug() IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) WithContext(ctx context.Context) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) ReadDB() IScaFileFolderDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) WriteDB() IScaFileFolderDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Session(config *gorm.Session) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Clauses(conds ...clause.Expression) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Returning(value interface{}, columns ...string) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Not(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Or(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Select(conds ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Where(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Order(conds ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Distinct(cols ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Omit(cols ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Join(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Group(cols ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Having(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Limit(limit int) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Offset(offset int) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Unscoped() IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Create(values ...*model.ScaFileFolder) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) CreateInBatches(values []*model.ScaFileFolder, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileFolderDo) Save(values ...*model.ScaFileFolder) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) First() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Take() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Last() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Find() ([]*model.ScaFileFolder, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileFolder), err
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileFolder, err error) {
|
||||
buf := make([]*model.ScaFileFolder, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FindInBatches(result *[]*model.ScaFileFolder, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Attrs(attrs ...field.AssignExpr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Assign(attrs ...field.AssignExpr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Joins(fields ...field.RelationField) IScaFileFolderDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Preload(fields ...field.RelationField) IScaFileFolderDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FirstOrInit() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FirstOrCreate() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FindByPage(offset int, limit int) (result []*model.ScaFileFolder, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Delete(models ...*model.ScaFileFolder) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileFolderDo) withDO(do gen.Dao) *scaFileFolderDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,422 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileInfo(db *gorm.DB, opts ...gen.DOOption) scaFileInfo {
|
||||
_scaFileInfo := scaFileInfo{}
|
||||
|
||||
_scaFileInfo.scaFileInfoDo.UseDB(db, opts...)
|
||||
_scaFileInfo.scaFileInfoDo.UseModel(&model.ScaFileInfo{})
|
||||
|
||||
tableName := _scaFileInfo.scaFileInfoDo.TableName()
|
||||
_scaFileInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileInfo.FileName = field.NewString(tableName, "file_name")
|
||||
_scaFileInfo.FileSize = field.NewFloat64(tableName, "file_size")
|
||||
_scaFileInfo.FileTypeID = field.NewInt64(tableName, "file_type_id")
|
||||
_scaFileInfo.UploadTime = field.NewTime(tableName, "upload_time")
|
||||
_scaFileInfo.FolderID = field.NewInt64(tableName, "folder_id")
|
||||
_scaFileInfo.UserID = field.NewString(tableName, "user_id")
|
||||
_scaFileInfo.FileSource = field.NewInt64(tableName, "file_source")
|
||||
_scaFileInfo.Status = field.NewInt64(tableName, "status")
|
||||
_scaFileInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaFileInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaFileInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaFileInfo.fillFieldMap()
|
||||
|
||||
return _scaFileInfo
|
||||
}
|
||||
|
||||
type scaFileInfo struct {
|
||||
scaFileInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileName field.String // 文件名
|
||||
FileSize field.Float64 // 文件大小
|
||||
FileTypeID field.Int64 // 文件类型编号
|
||||
UploadTime field.Time // 上传时间
|
||||
FolderID field.Int64 // 文件夹编号
|
||||
UserID field.String // 用户编号
|
||||
FileSource field.Int64 // 文件来源 0 相册 1 评论
|
||||
Status field.Int64 // 文件状态
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileInfo) Table(newTableName string) *scaFileInfo {
|
||||
s.scaFileInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileInfo) As(alias string) *scaFileInfo {
|
||||
s.scaFileInfoDo.DO = *(s.scaFileInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileInfo) updateTableName(table string) *scaFileInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileName = field.NewString(table, "file_name")
|
||||
s.FileSize = field.NewFloat64(table, "file_size")
|
||||
s.FileTypeID = field.NewInt64(table, "file_type_id")
|
||||
s.UploadTime = field.NewTime(table, "upload_time")
|
||||
s.FolderID = field.NewInt64(table, "folder_id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.FileSource = field.NewInt64(table, "file_source")
|
||||
s.Status = field.NewInt64(table, "status")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 12)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_name"] = s.FileName
|
||||
s.fieldMap["file_size"] = s.FileSize
|
||||
s.fieldMap["file_type_id"] = s.FileTypeID
|
||||
s.fieldMap["upload_time"] = s.UploadTime
|
||||
s.fieldMap["folder_id"] = s.FolderID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["file_source"] = s.FileSource
|
||||
s.fieldMap["status"] = s.Status
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaFileInfo) clone(db *gorm.DB) scaFileInfo {
|
||||
s.scaFileInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileInfo) replaceDB(db *gorm.DB) scaFileInfo {
|
||||
s.scaFileInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaFileInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileInfoDo
|
||||
WithContext(ctx context.Context) IScaFileInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileInfoDo
|
||||
WriteDB() IScaFileInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileInfoDo
|
||||
Not(conds ...gen.Condition) IScaFileInfoDo
|
||||
Or(conds ...gen.Condition) IScaFileInfoDo
|
||||
Select(conds ...field.Expr) IScaFileInfoDo
|
||||
Where(conds ...gen.Condition) IScaFileInfoDo
|
||||
Order(conds ...field.Expr) IScaFileInfoDo
|
||||
Distinct(cols ...field.Expr) IScaFileInfoDo
|
||||
Omit(cols ...field.Expr) IScaFileInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
|
||||
Group(cols ...field.Expr) IScaFileInfoDo
|
||||
Having(conds ...gen.Condition) IScaFileInfoDo
|
||||
Limit(limit int) IScaFileInfoDo
|
||||
Offset(offset int) IScaFileInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileInfoDo
|
||||
Unscoped() IScaFileInfoDo
|
||||
Create(values ...*model.ScaFileInfo) error
|
||||
CreateInBatches(values []*model.ScaFileInfo, batchSize int) error
|
||||
Save(values ...*model.ScaFileInfo) error
|
||||
First() (*model.ScaFileInfo, error)
|
||||
Take() (*model.ScaFileInfo, error)
|
||||
Last() (*model.ScaFileInfo, error)
|
||||
Find() ([]*model.ScaFileInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaFileInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileInfoDo
|
||||
Joins(fields ...field.RelationField) IScaFileInfoDo
|
||||
Preload(fields ...field.RelationField) IScaFileInfoDo
|
||||
FirstOrInit() (*model.ScaFileInfo, error)
|
||||
FirstOrCreate() (*model.ScaFileInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Debug() IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) WithContext(ctx context.Context) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) ReadDB() IScaFileInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) WriteDB() IScaFileInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Session(config *gorm.Session) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Clauses(conds ...clause.Expression) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Returning(value interface{}, columns ...string) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Not(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Or(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Select(conds ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Where(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Order(conds ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Distinct(cols ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Omit(cols ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Group(cols ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Having(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Limit(limit int) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Offset(offset int) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Unscoped() IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Create(values ...*model.ScaFileInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) CreateInBatches(values []*model.ScaFileInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileInfoDo) Save(values ...*model.ScaFileInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) First() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Take() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Last() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Find() ([]*model.ScaFileInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileInfo), err
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileInfo, err error) {
|
||||
buf := make([]*model.ScaFileInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FindInBatches(result *[]*model.ScaFileInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Attrs(attrs ...field.AssignExpr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Assign(attrs ...field.AssignExpr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Joins(fields ...field.RelationField) IScaFileInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Preload(fields ...field.RelationField) IScaFileInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FirstOrInit() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FirstOrCreate() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FindByPage(offset int, limit int) (result []*model.ScaFileInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Delete(models ...*model.ScaFileInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileInfoDo) withDO(do gen.Dao) *scaFileInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,406 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileRecycle(db *gorm.DB, opts ...gen.DOOption) scaFileRecycle {
|
||||
_scaFileRecycle := scaFileRecycle{}
|
||||
|
||||
_scaFileRecycle.scaFileRecycleDo.UseDB(db, opts...)
|
||||
_scaFileRecycle.scaFileRecycleDo.UseModel(&model.ScaFileRecycle{})
|
||||
|
||||
tableName := _scaFileRecycle.scaFileRecycleDo.TableName()
|
||||
_scaFileRecycle.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileRecycle.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileRecycle.FileID = field.NewInt64(tableName, "file_id")
|
||||
_scaFileRecycle.FolderID = field.NewInt64(tableName, "folder_id")
|
||||
_scaFileRecycle.Type = field.NewInt64(tableName, "type")
|
||||
_scaFileRecycle.UserID = field.NewString(tableName, "user_id")
|
||||
_scaFileRecycle.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
_scaFileRecycle.OriginalPath = field.NewString(tableName, "original_path")
|
||||
_scaFileRecycle.FileSource = field.NewInt64(tableName, "file_source")
|
||||
|
||||
_scaFileRecycle.fillFieldMap()
|
||||
|
||||
return _scaFileRecycle
|
||||
}
|
||||
|
||||
type scaFileRecycle struct {
|
||||
scaFileRecycleDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileID field.Int64 // 文件编号
|
||||
FolderID field.Int64 // 文件夹编号
|
||||
Type field.Int64 // 类型 0 文件 1 文件夹
|
||||
UserID field.String // 用户编号
|
||||
DeletedAt field.Field // 删除时间
|
||||
OriginalPath field.String // 原始路径
|
||||
FileSource field.Int64 // 文件来源 0 相册 1 评论
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) Table(newTableName string) *scaFileRecycle {
|
||||
s.scaFileRecycleDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) As(alias string) *scaFileRecycle {
|
||||
s.scaFileRecycleDo.DO = *(s.scaFileRecycleDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileRecycle) updateTableName(table string) *scaFileRecycle {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileID = field.NewInt64(table, "file_id")
|
||||
s.FolderID = field.NewInt64(table, "folder_id")
|
||||
s.Type = field.NewInt64(table, "type")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
s.OriginalPath = field.NewString(table, "original_path")
|
||||
s.FileSource = field.NewInt64(table, "file_source")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileRecycle) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileRecycle) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 8)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_id"] = s.FileID
|
||||
s.fieldMap["folder_id"] = s.FolderID
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
s.fieldMap["original_path"] = s.OriginalPath
|
||||
s.fieldMap["file_source"] = s.FileSource
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) clone(db *gorm.DB) scaFileRecycle {
|
||||
s.scaFileRecycleDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) replaceDB(db *gorm.DB) scaFileRecycle {
|
||||
s.scaFileRecycleDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileRecycleDo struct{ gen.DO }
|
||||
|
||||
type IScaFileRecycleDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileRecycleDo
|
||||
WithContext(ctx context.Context) IScaFileRecycleDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileRecycleDo
|
||||
WriteDB() IScaFileRecycleDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileRecycleDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileRecycleDo
|
||||
Not(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Or(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Select(conds ...field.Expr) IScaFileRecycleDo
|
||||
Where(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Order(conds ...field.Expr) IScaFileRecycleDo
|
||||
Distinct(cols ...field.Expr) IScaFileRecycleDo
|
||||
Omit(cols ...field.Expr) IScaFileRecycleDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
|
||||
Group(cols ...field.Expr) IScaFileRecycleDo
|
||||
Having(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Limit(limit int) IScaFileRecycleDo
|
||||
Offset(offset int) IScaFileRecycleDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileRecycleDo
|
||||
Unscoped() IScaFileRecycleDo
|
||||
Create(values ...*model.ScaFileRecycle) error
|
||||
CreateInBatches(values []*model.ScaFileRecycle, batchSize int) error
|
||||
Save(values ...*model.ScaFileRecycle) error
|
||||
First() (*model.ScaFileRecycle, error)
|
||||
Take() (*model.ScaFileRecycle, error)
|
||||
Last() (*model.ScaFileRecycle, error)
|
||||
Find() ([]*model.ScaFileRecycle, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileRecycle, err error)
|
||||
FindInBatches(result *[]*model.ScaFileRecycle, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileRecycle) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileRecycleDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileRecycleDo
|
||||
Joins(fields ...field.RelationField) IScaFileRecycleDo
|
||||
Preload(fields ...field.RelationField) IScaFileRecycleDo
|
||||
FirstOrInit() (*model.ScaFileRecycle, error)
|
||||
FirstOrCreate() (*model.ScaFileRecycle, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileRecycle, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileRecycleDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Debug() IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) WithContext(ctx context.Context) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) ReadDB() IScaFileRecycleDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) WriteDB() IScaFileRecycleDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Session(config *gorm.Session) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Clauses(conds ...clause.Expression) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Returning(value interface{}, columns ...string) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Not(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Or(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Select(conds ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Where(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Order(conds ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Distinct(cols ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Omit(cols ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Join(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Group(cols ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Having(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Limit(limit int) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Offset(offset int) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Unscoped() IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Create(values ...*model.ScaFileRecycle) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) CreateInBatches(values []*model.ScaFileRecycle, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileRecycleDo) Save(values ...*model.ScaFileRecycle) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) First() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Take() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Last() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Find() ([]*model.ScaFileRecycle, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileRecycle), err
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileRecycle, err error) {
|
||||
buf := make([]*model.ScaFileRecycle, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FindInBatches(result *[]*model.ScaFileRecycle, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Attrs(attrs ...field.AssignExpr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Assign(attrs ...field.AssignExpr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Joins(fields ...field.RelationField) IScaFileRecycleDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Preload(fields ...field.RelationField) IScaFileRecycleDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FirstOrInit() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FirstOrCreate() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FindByPage(offset int, limit int) (result []*model.ScaFileRecycle, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Delete(models ...*model.ScaFileRecycle) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileRecycleDo) withDO(do gen.Dao) *scaFileRecycleDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,402 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileType(db *gorm.DB, opts ...gen.DOOption) scaFileType {
|
||||
_scaFileType := scaFileType{}
|
||||
|
||||
_scaFileType.scaFileTypeDo.UseDB(db, opts...)
|
||||
_scaFileType.scaFileTypeDo.UseModel(&model.ScaFileType{})
|
||||
|
||||
tableName := _scaFileType.scaFileTypeDo.TableName()
|
||||
_scaFileType.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileType.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileType.TypeName = field.NewString(tableName, "type_name")
|
||||
_scaFileType.MimeType = field.NewString(tableName, "mime_type")
|
||||
_scaFileType.Status = field.NewInt64(tableName, "status")
|
||||
_scaFileType.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaFileType.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaFileType.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaFileType.fillFieldMap()
|
||||
|
||||
return _scaFileType
|
||||
}
|
||||
|
||||
type scaFileType struct {
|
||||
scaFileTypeDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
TypeName field.String // 类型名称
|
||||
MimeType field.String // MIME 类型
|
||||
Status field.Int64 // 类型状态
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileType) Table(newTableName string) *scaFileType {
|
||||
s.scaFileTypeDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileType) As(alias string) *scaFileType {
|
||||
s.scaFileTypeDo.DO = *(s.scaFileTypeDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileType) updateTableName(table string) *scaFileType {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.TypeName = field.NewString(table, "type_name")
|
||||
s.MimeType = field.NewString(table, "mime_type")
|
||||
s.Status = field.NewInt64(table, "status")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileType) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileType) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 7)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["type_name"] = s.TypeName
|
||||
s.fieldMap["mime_type"] = s.MimeType
|
||||
s.fieldMap["status"] = s.Status
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaFileType) clone(db *gorm.DB) scaFileType {
|
||||
s.scaFileTypeDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileType) replaceDB(db *gorm.DB) scaFileType {
|
||||
s.scaFileTypeDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileTypeDo struct{ gen.DO }
|
||||
|
||||
type IScaFileTypeDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileTypeDo
|
||||
WithContext(ctx context.Context) IScaFileTypeDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileTypeDo
|
||||
WriteDB() IScaFileTypeDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileTypeDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileTypeDo
|
||||
Not(conds ...gen.Condition) IScaFileTypeDo
|
||||
Or(conds ...gen.Condition) IScaFileTypeDo
|
||||
Select(conds ...field.Expr) IScaFileTypeDo
|
||||
Where(conds ...gen.Condition) IScaFileTypeDo
|
||||
Order(conds ...field.Expr) IScaFileTypeDo
|
||||
Distinct(cols ...field.Expr) IScaFileTypeDo
|
||||
Omit(cols ...field.Expr) IScaFileTypeDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
|
||||
Group(cols ...field.Expr) IScaFileTypeDo
|
||||
Having(conds ...gen.Condition) IScaFileTypeDo
|
||||
Limit(limit int) IScaFileTypeDo
|
||||
Offset(offset int) IScaFileTypeDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileTypeDo
|
||||
Unscoped() IScaFileTypeDo
|
||||
Create(values ...*model.ScaFileType) error
|
||||
CreateInBatches(values []*model.ScaFileType, batchSize int) error
|
||||
Save(values ...*model.ScaFileType) error
|
||||
First() (*model.ScaFileType, error)
|
||||
Take() (*model.ScaFileType, error)
|
||||
Last() (*model.ScaFileType, error)
|
||||
Find() ([]*model.ScaFileType, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileType, err error)
|
||||
FindInBatches(result *[]*model.ScaFileType, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileType) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileTypeDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileTypeDo
|
||||
Joins(fields ...field.RelationField) IScaFileTypeDo
|
||||
Preload(fields ...field.RelationField) IScaFileTypeDo
|
||||
FirstOrInit() (*model.ScaFileType, error)
|
||||
FirstOrCreate() (*model.ScaFileType, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileType, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileTypeDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Debug() IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) WithContext(ctx context.Context) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) ReadDB() IScaFileTypeDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) WriteDB() IScaFileTypeDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Session(config *gorm.Session) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Clauses(conds ...clause.Expression) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Returning(value interface{}, columns ...string) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Not(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Or(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Select(conds ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Where(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Order(conds ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Distinct(cols ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Omit(cols ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Join(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Group(cols ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Having(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Limit(limit int) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Offset(offset int) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Unscoped() IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Create(values ...*model.ScaFileType) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) CreateInBatches(values []*model.ScaFileType, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileTypeDo) Save(values ...*model.ScaFileType) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) First() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Take() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Last() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Find() ([]*model.ScaFileType, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileType), err
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileType, err error) {
|
||||
buf := make([]*model.ScaFileType, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FindInBatches(result *[]*model.ScaFileType, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Attrs(attrs ...field.AssignExpr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Assign(attrs ...field.AssignExpr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Joins(fields ...field.RelationField) IScaFileTypeDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Preload(fields ...field.RelationField) IScaFileTypeDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FirstOrInit() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FirstOrCreate() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FindByPage(offset int, limit int) (result []*model.ScaFileType, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Delete(models ...*model.ScaFileType) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileTypeDo) withDO(do gen.Dao) *scaFileTypeDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaMessageReport(db *gorm.DB, opts ...gen.DOOption) scaMessageReport {
|
||||
|
420
app/auth/model/mysql/query/sca_storage_config.gen.go
Normal file
420
app/auth/model/mysql/query/sca_storage_config.gen.go
Normal file
@@ -0,0 +1,420 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageConfig(db *gorm.DB, opts ...gen.DOOption) scaStorageConfig {
|
||||
_scaStorageConfig := scaStorageConfig{}
|
||||
|
||||
_scaStorageConfig.scaStorageConfigDo.UseDB(db, opts...)
|
||||
_scaStorageConfig.scaStorageConfigDo.UseModel(&model.ScaStorageConfig{})
|
||||
|
||||
tableName := _scaStorageConfig.scaStorageConfigDo.TableName()
|
||||
_scaStorageConfig.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageConfig.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageConfig.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageConfig.Type = field.NewString(tableName, "type")
|
||||
_scaStorageConfig.Endpoint = field.NewString(tableName, "endpoint")
|
||||
_scaStorageConfig.AccessKey = field.NewString(tableName, "access_key")
|
||||
_scaStorageConfig.SecretKey = field.NewString(tableName, "secret_key")
|
||||
_scaStorageConfig.Bucket = field.NewString(tableName, "bucket")
|
||||
_scaStorageConfig.Region = field.NewString(tableName, "region")
|
||||
_scaStorageConfig.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageConfig.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageConfig.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageConfig.fillFieldMap()
|
||||
|
||||
return _scaStorageConfig
|
||||
}
|
||||
|
||||
type scaStorageConfig struct {
|
||||
scaStorageConfigDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
Type field.String // 类型
|
||||
Endpoint field.String // 地址
|
||||
AccessKey field.String // 密钥key
|
||||
SecretKey field.String // 密钥
|
||||
Bucket field.String // 存储桶
|
||||
Region field.String // 地域
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) Table(newTableName string) *scaStorageConfig {
|
||||
s.scaStorageConfigDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) As(alias string) *scaStorageConfig {
|
||||
s.scaStorageConfigDo.DO = *(s.scaStorageConfigDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) updateTableName(table string) *scaStorageConfig {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.Type = field.NewString(table, "type")
|
||||
s.Endpoint = field.NewString(table, "endpoint")
|
||||
s.AccessKey = field.NewString(table, "access_key")
|
||||
s.SecretKey = field.NewString(table, "secret_key")
|
||||
s.Bucket = field.NewString(table, "bucket")
|
||||
s.Region = field.NewString(table, "region")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 11)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["endpoint"] = s.Endpoint
|
||||
s.fieldMap["access_key"] = s.AccessKey
|
||||
s.fieldMap["secret_key"] = s.SecretKey
|
||||
s.fieldMap["bucket"] = s.Bucket
|
||||
s.fieldMap["region"] = s.Region
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) clone(db *gorm.DB) scaStorageConfig {
|
||||
s.scaStorageConfigDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) replaceDB(db *gorm.DB) scaStorageConfig {
|
||||
s.scaStorageConfigDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageConfigDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageConfigDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageConfigDo
|
||||
WithContext(ctx context.Context) IScaStorageConfigDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageConfigDo
|
||||
WriteDB() IScaStorageConfigDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageConfigDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageConfigDo
|
||||
Not(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Or(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Select(conds ...field.Expr) IScaStorageConfigDo
|
||||
Where(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Order(conds ...field.Expr) IScaStorageConfigDo
|
||||
Distinct(cols ...field.Expr) IScaStorageConfigDo
|
||||
Omit(cols ...field.Expr) IScaStorageConfigDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
|
||||
Group(cols ...field.Expr) IScaStorageConfigDo
|
||||
Having(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Limit(limit int) IScaStorageConfigDo
|
||||
Offset(offset int) IScaStorageConfigDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageConfigDo
|
||||
Unscoped() IScaStorageConfigDo
|
||||
Create(values ...*model.ScaStorageConfig) error
|
||||
CreateInBatches(values []*model.ScaStorageConfig, batchSize int) error
|
||||
Save(values ...*model.ScaStorageConfig) error
|
||||
First() (*model.ScaStorageConfig, error)
|
||||
Take() (*model.ScaStorageConfig, error)
|
||||
Last() (*model.ScaStorageConfig, error)
|
||||
Find() ([]*model.ScaStorageConfig, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageConfig, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageConfig, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageConfig) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageConfigDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageConfigDo
|
||||
Joins(fields ...field.RelationField) IScaStorageConfigDo
|
||||
Preload(fields ...field.RelationField) IScaStorageConfigDo
|
||||
FirstOrInit() (*model.ScaStorageConfig, error)
|
||||
FirstOrCreate() (*model.ScaStorageConfig, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageConfig, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageConfigDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Debug() IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) WithContext(ctx context.Context) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) ReadDB() IScaStorageConfigDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) WriteDB() IScaStorageConfigDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Session(config *gorm.Session) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Clauses(conds ...clause.Expression) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Returning(value interface{}, columns ...string) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Not(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Or(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Select(conds ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Where(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Order(conds ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Distinct(cols ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Omit(cols ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Group(cols ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Having(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Limit(limit int) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Offset(offset int) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Unscoped() IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Create(values ...*model.ScaStorageConfig) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) CreateInBatches(values []*model.ScaStorageConfig, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageConfigDo) Save(values ...*model.ScaStorageConfig) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) First() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Take() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Last() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Find() ([]*model.ScaStorageConfig, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageConfig), err
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageConfig, err error) {
|
||||
buf := make([]*model.ScaStorageConfig, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FindInBatches(result *[]*model.ScaStorageConfig, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Attrs(attrs ...field.AssignExpr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Assign(attrs ...field.AssignExpr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Joins(fields ...field.RelationField) IScaStorageConfigDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Preload(fields ...field.RelationField) IScaStorageConfigDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FirstOrInit() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FirstOrCreate() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FindByPage(offset int, limit int) (result []*model.ScaStorageConfig, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Delete(models ...*model.ScaStorageConfig) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageConfigDo) withDO(do gen.Dao) *scaStorageConfigDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
448
app/auth/model/mysql/query/sca_storage_info.gen.go
Normal file
448
app/auth/model/mysql/query/sca_storage_info.gen.go
Normal file
@@ -0,0 +1,448 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageInfo {
|
||||
_scaStorageInfo := scaStorageInfo{}
|
||||
|
||||
_scaStorageInfo.scaStorageInfoDo.UseDB(db, opts...)
|
||||
_scaStorageInfo.scaStorageInfoDo.UseModel(&model.ScaStorageInfo{})
|
||||
|
||||
tableName := _scaStorageInfo.scaStorageInfoDo.TableName()
|
||||
_scaStorageInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageInfo.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageInfo.Storage = field.NewString(tableName, "storage")
|
||||
_scaStorageInfo.Bucket = field.NewString(tableName, "bucket")
|
||||
_scaStorageInfo.Type = field.NewString(tableName, "type")
|
||||
_scaStorageInfo.Path = field.NewString(tableName, "path")
|
||||
_scaStorageInfo.FileName = field.NewString(tableName, "file_name")
|
||||
_scaStorageInfo.Category = field.NewString(tableName, "category")
|
||||
_scaStorageInfo.Loaction = field.NewString(tableName, "loaction")
|
||||
_scaStorageInfo.Hash = field.NewString(tableName, "hash")
|
||||
_scaStorageInfo.Anime = field.NewString(tableName, "anime")
|
||||
_scaStorageInfo.HasFace = field.NewString(tableName, "has_face")
|
||||
_scaStorageInfo.FaceID = field.NewInt64(tableName, "face_id")
|
||||
_scaStorageInfo.Landscape = field.NewString(tableName, "landscape")
|
||||
_scaStorageInfo.Objects = field.NewString(tableName, "objects")
|
||||
_scaStorageInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageInfo.fillFieldMap()
|
||||
|
||||
return _scaStorageInfo
|
||||
}
|
||||
|
||||
type scaStorageInfo struct {
|
||||
scaStorageInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
Storage field.String // 存储空间
|
||||
Bucket field.String // 存储桶
|
||||
Type field.String // 类型
|
||||
Path field.String // 路径
|
||||
FileName field.String // 名称
|
||||
Category field.String // 分类
|
||||
Loaction field.String // 地址
|
||||
Hash field.String // 哈希值
|
||||
Anime field.String // 是否是动漫图片
|
||||
HasFace field.String // 是否人像
|
||||
FaceID field.Int64 // 人像ID
|
||||
Landscape field.String // 风景类型
|
||||
Objects field.String // 对象识别
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) Table(newTableName string) *scaStorageInfo {
|
||||
s.scaStorageInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) As(alias string) *scaStorageInfo {
|
||||
s.scaStorageInfoDo.DO = *(s.scaStorageInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) updateTableName(table string) *scaStorageInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.Storage = field.NewString(table, "storage")
|
||||
s.Bucket = field.NewString(table, "bucket")
|
||||
s.Type = field.NewString(table, "type")
|
||||
s.Path = field.NewString(table, "path")
|
||||
s.FileName = field.NewString(table, "file_name")
|
||||
s.Category = field.NewString(table, "category")
|
||||
s.Loaction = field.NewString(table, "loaction")
|
||||
s.Hash = field.NewString(table, "hash")
|
||||
s.Anime = field.NewString(table, "anime")
|
||||
s.HasFace = field.NewString(table, "has_face")
|
||||
s.FaceID = field.NewInt64(table, "face_id")
|
||||
s.Landscape = field.NewString(table, "landscape")
|
||||
s.Objects = field.NewString(table, "objects")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 18)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["storage"] = s.Storage
|
||||
s.fieldMap["bucket"] = s.Bucket
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["path"] = s.Path
|
||||
s.fieldMap["file_name"] = s.FileName
|
||||
s.fieldMap["category"] = s.Category
|
||||
s.fieldMap["loaction"] = s.Loaction
|
||||
s.fieldMap["hash"] = s.Hash
|
||||
s.fieldMap["anime"] = s.Anime
|
||||
s.fieldMap["has_face"] = s.HasFace
|
||||
s.fieldMap["face_id"] = s.FaceID
|
||||
s.fieldMap["landscape"] = s.Landscape
|
||||
s.fieldMap["objects"] = s.Objects
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) clone(db *gorm.DB) scaStorageInfo {
|
||||
s.scaStorageInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) replaceDB(db *gorm.DB) scaStorageInfo {
|
||||
s.scaStorageInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageInfoDo
|
||||
WithContext(ctx context.Context) IScaStorageInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageInfoDo
|
||||
WriteDB() IScaStorageInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageInfoDo
|
||||
Not(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Or(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Select(conds ...field.Expr) IScaStorageInfoDo
|
||||
Where(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Order(conds ...field.Expr) IScaStorageInfoDo
|
||||
Distinct(cols ...field.Expr) IScaStorageInfoDo
|
||||
Omit(cols ...field.Expr) IScaStorageInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
|
||||
Group(cols ...field.Expr) IScaStorageInfoDo
|
||||
Having(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Limit(limit int) IScaStorageInfoDo
|
||||
Offset(offset int) IScaStorageInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageInfoDo
|
||||
Unscoped() IScaStorageInfoDo
|
||||
Create(values ...*model.ScaStorageInfo) error
|
||||
CreateInBatches(values []*model.ScaStorageInfo, batchSize int) error
|
||||
Save(values ...*model.ScaStorageInfo) error
|
||||
First() (*model.ScaStorageInfo, error)
|
||||
Take() (*model.ScaStorageInfo, error)
|
||||
Last() (*model.ScaStorageInfo, error)
|
||||
Find() ([]*model.ScaStorageInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageInfoDo
|
||||
Joins(fields ...field.RelationField) IScaStorageInfoDo
|
||||
Preload(fields ...field.RelationField) IScaStorageInfoDo
|
||||
FirstOrInit() (*model.ScaStorageInfo, error)
|
||||
FirstOrCreate() (*model.ScaStorageInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Debug() IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) WithContext(ctx context.Context) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) ReadDB() IScaStorageInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) WriteDB() IScaStorageInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Session(config *gorm.Session) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Clauses(conds ...clause.Expression) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Returning(value interface{}, columns ...string) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Not(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Or(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Select(conds ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Where(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Order(conds ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Distinct(cols ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Omit(cols ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Group(cols ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Having(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Limit(limit int) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Offset(offset int) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Unscoped() IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Create(values ...*model.ScaStorageInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) CreateInBatches(values []*model.ScaStorageInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageInfoDo) Save(values ...*model.ScaStorageInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) First() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Take() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Last() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Find() ([]*model.ScaStorageInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageInfo), err
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageInfo, err error) {
|
||||
buf := make([]*model.ScaStorageInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FindInBatches(result *[]*model.ScaStorageInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Joins(fields ...field.RelationField) IScaStorageInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Preload(fields ...field.RelationField) IScaStorageInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FirstOrInit() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FirstOrCreate() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Delete(models ...*model.ScaStorageInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageInfoDo) withDO(do gen.Dao) *scaStorageInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
400
app/auth/model/mysql/query/sca_storage_tag.gen.go
Normal file
400
app/auth/model/mysql/query/sca_storage_tag.gen.go
Normal file
@@ -0,0 +1,400 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageTag(db *gorm.DB, opts ...gen.DOOption) scaStorageTag {
|
||||
_scaStorageTag := scaStorageTag{}
|
||||
|
||||
_scaStorageTag.scaStorageTagDo.UseDB(db, opts...)
|
||||
_scaStorageTag.scaStorageTagDo.UseModel(&model.ScaStorageTag{})
|
||||
|
||||
tableName := _scaStorageTag.scaStorageTagDo.TableName()
|
||||
_scaStorageTag.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageTag.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageTag.FileID = field.NewInt64(tableName, "file_id")
|
||||
_scaStorageTag.TagID = field.NewInt64(tableName, "tag_id")
|
||||
_scaStorageTag.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageTag.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageTag.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageTag.fillFieldMap()
|
||||
|
||||
return _scaStorageTag
|
||||
}
|
||||
|
||||
type scaStorageTag struct {
|
||||
scaStorageTagDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileID field.Int64 // 文件ID
|
||||
TagID field.Int64 // 标签ID
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageTag) Table(newTableName string) *scaStorageTag {
|
||||
s.scaStorageTagDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageTag) As(alias string) *scaStorageTag {
|
||||
s.scaStorageTagDo.DO = *(s.scaStorageTagDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) updateTableName(table string) *scaStorageTag {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileID = field.NewInt64(table, "file_id")
|
||||
s.TagID = field.NewInt64(table, "tag_id")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 6)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_id"] = s.FileID
|
||||
s.fieldMap["tag_id"] = s.TagID
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageTag) clone(db *gorm.DB) scaStorageTag {
|
||||
s.scaStorageTagDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageTag) replaceDB(db *gorm.DB) scaStorageTag {
|
||||
s.scaStorageTagDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageTagDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageTagDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageTagDo
|
||||
WithContext(ctx context.Context) IScaStorageTagDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageTagDo
|
||||
WriteDB() IScaStorageTagDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageTagDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageTagDo
|
||||
Not(conds ...gen.Condition) IScaStorageTagDo
|
||||
Or(conds ...gen.Condition) IScaStorageTagDo
|
||||
Select(conds ...field.Expr) IScaStorageTagDo
|
||||
Where(conds ...gen.Condition) IScaStorageTagDo
|
||||
Order(conds ...field.Expr) IScaStorageTagDo
|
||||
Distinct(cols ...field.Expr) IScaStorageTagDo
|
||||
Omit(cols ...field.Expr) IScaStorageTagDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
Group(cols ...field.Expr) IScaStorageTagDo
|
||||
Having(conds ...gen.Condition) IScaStorageTagDo
|
||||
Limit(limit int) IScaStorageTagDo
|
||||
Offset(offset int) IScaStorageTagDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo
|
||||
Unscoped() IScaStorageTagDo
|
||||
Create(values ...*model.ScaStorageTag) error
|
||||
CreateInBatches(values []*model.ScaStorageTag, batchSize int) error
|
||||
Save(values ...*model.ScaStorageTag) error
|
||||
First() (*model.ScaStorageTag, error)
|
||||
Take() (*model.ScaStorageTag, error)
|
||||
Last() (*model.ScaStorageTag, error)
|
||||
Find() ([]*model.ScaStorageTag, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageTag) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageTagDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageTagDo
|
||||
Joins(fields ...field.RelationField) IScaStorageTagDo
|
||||
Preload(fields ...field.RelationField) IScaStorageTagDo
|
||||
FirstOrInit() (*model.ScaStorageTag, error)
|
||||
FirstOrCreate() (*model.ScaStorageTag, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageTagDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Debug() IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) WithContext(ctx context.Context) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) ReadDB() IScaStorageTagDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) WriteDB() IScaStorageTagDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Session(config *gorm.Session) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Clauses(conds ...clause.Expression) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Returning(value interface{}, columns ...string) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Not(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Or(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Select(conds ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Where(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Order(conds ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Distinct(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Omit(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Group(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Having(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Limit(limit int) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Offset(offset int) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Unscoped() IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Create(values ...*model.ScaStorageTag) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) CreateInBatches(values []*model.ScaStorageTag, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageTagDo) Save(values ...*model.ScaStorageTag) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) First() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Take() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Last() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Find() ([]*model.ScaStorageTag, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageTag), err
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error) {
|
||||
buf := make([]*model.ScaStorageTag, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Assign(attrs ...field.AssignExpr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Joins(fields ...field.RelationField) IScaStorageTagDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Preload(fields ...field.RelationField) IScaStorageTagDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FirstOrInit() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FirstOrCreate() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Delete(models ...*model.ScaStorageTag) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagDo) withDO(do gen.Dao) *scaStorageTagDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
400
app/auth/model/mysql/query/sca_storage_tag_info.gen.go
Normal file
400
app/auth/model/mysql/query/sca_storage_tag_info.gen.go
Normal file
@@ -0,0 +1,400 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageTagInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageTagInfo {
|
||||
_scaStorageTagInfo := scaStorageTagInfo{}
|
||||
|
||||
_scaStorageTagInfo.scaStorageTagInfoDo.UseDB(db, opts...)
|
||||
_scaStorageTagInfo.scaStorageTagInfoDo.UseModel(&model.ScaStorageTagInfo{})
|
||||
|
||||
tableName := _scaStorageTagInfo.scaStorageTagInfoDo.TableName()
|
||||
_scaStorageTagInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageTagInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageTagInfo.TagName = field.NewString(tableName, "tag_name")
|
||||
_scaStorageTagInfo.TagKey = field.NewString(tableName, "tag_key")
|
||||
_scaStorageTagInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageTagInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageTagInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageTagInfo.fillFieldMap()
|
||||
|
||||
return _scaStorageTagInfo
|
||||
}
|
||||
|
||||
type scaStorageTagInfo struct {
|
||||
scaStorageTagInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
TagName field.String // 标签名称
|
||||
TagKey field.String // 标签关键字
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) Table(newTableName string) *scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) As(alias string) *scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.DO = *(s.scaStorageTagInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) updateTableName(table string) *scaStorageTagInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.TagName = field.NewString(table, "tag_name")
|
||||
s.TagKey = field.NewString(table, "tag_key")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 6)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["tag_name"] = s.TagName
|
||||
s.fieldMap["tag_key"] = s.TagKey
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) clone(db *gorm.DB) scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) replaceDB(db *gorm.DB) scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageTagInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageTagInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageTagInfoDo
|
||||
WithContext(ctx context.Context) IScaStorageTagInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageTagInfoDo
|
||||
WriteDB() IScaStorageTagInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageTagInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageTagInfoDo
|
||||
Not(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Or(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Select(conds ...field.Expr) IScaStorageTagInfoDo
|
||||
Where(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Order(conds ...field.Expr) IScaStorageTagInfoDo
|
||||
Distinct(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Omit(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
Group(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Having(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Limit(limit int) IScaStorageTagInfoDo
|
||||
Offset(offset int) IScaStorageTagInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo
|
||||
Unscoped() IScaStorageTagInfoDo
|
||||
Create(values ...*model.ScaStorageTagInfo) error
|
||||
CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error
|
||||
Save(values ...*model.ScaStorageTagInfo) error
|
||||
First() (*model.ScaStorageTagInfo, error)
|
||||
Take() (*model.ScaStorageTagInfo, error)
|
||||
Last() (*model.ScaStorageTagInfo, error)
|
||||
Find() ([]*model.ScaStorageTagInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageTagInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo
|
||||
Joins(fields ...field.RelationField) IScaStorageTagInfoDo
|
||||
Preload(fields ...field.RelationField) IScaStorageTagInfoDo
|
||||
FirstOrInit() (*model.ScaStorageTagInfo, error)
|
||||
FirstOrCreate() (*model.ScaStorageTagInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageTagInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Debug() IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) WithContext(ctx context.Context) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) ReadDB() IScaStorageTagInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) WriteDB() IScaStorageTagInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Session(config *gorm.Session) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Clauses(conds ...clause.Expression) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Returning(value interface{}, columns ...string) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Not(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Or(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Select(conds ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Where(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Order(conds ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Distinct(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Omit(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Group(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Having(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Limit(limit int) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Offset(offset int) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Unscoped() IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Create(values ...*model.ScaStorageTagInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageTagInfoDo) Save(values ...*model.ScaStorageTagInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) First() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Take() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Last() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Find() ([]*model.ScaStorageTagInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageTagInfo), err
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error) {
|
||||
buf := make([]*model.ScaStorageTagInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Joins(fields ...field.RelationField) IScaStorageTagInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Preload(fields ...field.RelationField) IScaStorageTagInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FirstOrInit() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FirstOrCreate() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Delete(models ...*model.ScaStorageTagInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfoDo) withDO(do gen.Dao) *scaStorageTagInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaUserFollow(db *gorm.DB, opts ...gen.DOOption) scaUserFollow {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaUserLevel(db *gorm.DB, opts ...gen.DOOption) scaUserLevel {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaUserMessage(db *gorm.DB, opts ...gen.DOOption) scaUserMessage {
|
||||
|
Reference in New Issue
Block a user