✨ encapsulate object storage service operations
This commit is contained in:
@@ -11,7 +11,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const MySQLDSN = "root:1611@(localhost:3306)/schisandra-cloud-album?charset=utf8mb4&parseTime=True&loc=Local"
|
||||
const MySQLDSN = "root:LDQ20020618xxx@tcp(1.95.0.111:3306)/schisandra-cloud-album?charset=utf8mb4&parseTime=True&loc=Local"
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -25,7 +25,7 @@ func main() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
path := filepath.Join(dir, "app/auth/api/repository/mysql/", "query")
|
||||
path := filepath.Join(dir, "app/auth/model/mysql/", "query")
|
||||
// 生成实例
|
||||
g := gen.NewGenerator(gen.Config{
|
||||
// 相对执行`go run`时的路径, 会自动创建目录
|
||||
|
@@ -14,19 +14,19 @@ const TableNameScaAuthMenu = "sca_auth_menu"
|
||||
|
||||
// ScaAuthMenu mapped from table <sca_auth_menu>
|
||||
type ScaAuthMenu struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
MenuName string `gorm:"column:menu_name;type:varchar(64);comment:名称" json:"menu_name"` // 名称
|
||||
ParentID int64 `gorm:"column:parent_id;type:bigint;comment:父ID" json:"parent_id"` // 父ID
|
||||
Type int64 `gorm:"column:type;type:tinyint;comment:类型" json:"type"` // 类型
|
||||
Path string `gorm:"column:path;type:varchar(30);comment:路径" json:"path"` // 路径
|
||||
Status int64 `gorm:"column:status;type:tinyint;comment:状态 0 启用 1 停用" json:"status"` // 状态 0 启用 1 停用
|
||||
Icon string `gorm:"column:icon;type:varchar(128);comment:图标" json:"icon"` // 图标
|
||||
MenuKey string `gorm:"column:menu_key;type:varchar(64);comment:关键字" json:"menu_key"` // 关键字
|
||||
Order_ int64 `gorm:"column:order;type:int;comment:排序" json:"order"` // 排序
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
Remark string `gorm:"column:remark;type:varchar(255);comment:备注 描述" json:"remark"` // 备注 描述
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
MenuName string `gorm:"column:menu_name;type:varchar(64);comment:名称" json:"menu_name"` // 名称
|
||||
ParentID int64 `gorm:"column:parent_id;type:bigint(20);comment:父ID" json:"parent_id"` // 父ID
|
||||
Type int64 `gorm:"column:type;type:tinyint(4);comment:类型" json:"type"` // 类型
|
||||
Path string `gorm:"column:path;type:varchar(30);comment:路径" json:"path"` // 路径
|
||||
Status int64 `gorm:"column:status;type:tinyint(4);comment:状态 0 启用 1 停用" json:"status"` // 状态 0 启用 1 停用
|
||||
Icon string `gorm:"column:icon;type:varchar(128);comment:图标" json:"icon"` // 图标
|
||||
MenuKey string `gorm:"column:menu_key;type:varchar(64);comment:关键字" json:"menu_key"` // 关键字
|
||||
Order_ int64 `gorm:"column:order;type:bigint(20);comment:排序" json:"order"` // 排序
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
Remark string `gorm:"column:remark;type:varchar(255);comment:备注 描述" json:"remark"` // 备注 描述
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaAuthMenu's table name
|
||||
|
@@ -8,7 +8,7 @@ const TableNameScaAuthPermissionRule = "sca_auth_permission_rule"
|
||||
|
||||
// ScaAuthPermissionRule mapped from table <sca_auth_permission_rule>
|
||||
type ScaAuthPermissionRule struct {
|
||||
ID int64 `gorm:"column:id;type:int;primaryKey;autoIncrement:true;primary_key" json:"id,string"`
|
||||
ID int64 `gorm:"column:id;type:int(11);primaryKey;autoIncrement:true;primary_key" json:"id,string"`
|
||||
Ptype string `gorm:"column:ptype;type:varchar(100);uniqueIndex:idx_sca_auth_permission_rule,priority:1;index:IDX_sca_auth_permission_rule_ptype,priority:1" json:"ptype"`
|
||||
V0 string `gorm:"column:v0;type:varchar(100);uniqueIndex:idx_sca_auth_permission_rule,priority:2;index:IDX_sca_auth_permission_rule_v0,priority:1" json:"v0"`
|
||||
V1 string `gorm:"column:v1;type:varchar(100);uniqueIndex:idx_sca_auth_permission_rule,priority:3;index:IDX_sca_auth_permission_rule_v1,priority:1" json:"v1"`
|
||||
|
@@ -14,12 +14,12 @@ const TableNameScaAuthRole = "sca_auth_role"
|
||||
|
||||
// ScaAuthRole mapped from table <sca_auth_role>
|
||||
type ScaAuthRole struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
RoleName string `gorm:"column:role_name;type:varchar(32);not null;comment:角色名称" json:"role_name"` // 角色名称
|
||||
RoleKey string `gorm:"column:role_key;type:varchar(64);not null;comment:角色关键字" json:"role_key"` // 角色关键字
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
RoleName string `gorm:"column:role_name;type:varchar(32);not null;comment:角色名称" json:"role_name"` // 角色名称
|
||||
RoleKey string `gorm:"column:role_key;type:varchar(64);not null;comment:角色关键字" json:"role_key"` // 角色关键字
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaAuthRole's table name
|
||||
|
@@ -14,23 +14,23 @@ const TableNameScaAuthUser = "sca_auth_user"
|
||||
|
||||
// ScaAuthUser mapped from table <sca_auth_user>
|
||||
type ScaAuthUser struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:自增ID;primary_key" json:"id,string"` // 自增ID
|
||||
UID string `gorm:"column:uid;type:varchar(50);not null;uniqueIndex:uid,priority:1;comment:唯一ID" json:"uid"` // 唯一ID
|
||||
Username string `gorm:"column:username;type:varchar(32);comment:用户名" json:"username"` // 用户名
|
||||
Nickname string `gorm:"column:nickname;type:varchar(32);comment:昵称" json:"nickname"` // 昵称
|
||||
Email string `gorm:"column:email;type:varchar(32);comment:邮箱" json:"email"` // 邮箱
|
||||
Phone string `gorm:"column:phone;type:varchar(32);comment:电话" json:"phone"` // 电话
|
||||
Password string `gorm:"column:password;type:varchar(64);comment:密码" json:"password"` // 密码
|
||||
Gender int64 `gorm:"column:gender;type:tinyint;comment:性别" json:"gender"` // 性别
|
||||
Avatar string `gorm:"column:avatar;type:longtext;comment:头像" json:"avatar"` // 头像
|
||||
Status int64 `gorm:"column:status;type:tinyint;comment:状态 0 正常 1 封禁" json:"status"` // 状态 0 正常 1 封禁
|
||||
Introduce string `gorm:"column:introduce;type:varchar(255);comment:介绍" json:"introduce"` // 介绍
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
Blog string `gorm:"column:blog;type:varchar(30);comment:博客" json:"blog"` // 博客
|
||||
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
|
||||
Company string `gorm:"column:company;type:varchar(50);comment:公司" json:"company"` // 公司
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:自增ID;primary_key" json:"id,string"` // 自增ID
|
||||
UID string `gorm:"column:uid;type:varchar(50);not null;uniqueIndex:uid,priority:1;comment:唯一ID" json:"uid"` // 唯一ID
|
||||
Username string `gorm:"column:username;type:varchar(32);comment:用户名" json:"username"` // 用户名
|
||||
Nickname string `gorm:"column:nickname;type:varchar(32);comment:昵称" json:"nickname"` // 昵称
|
||||
Email string `gorm:"column:email;type:varchar(32);comment:邮箱" json:"email"` // 邮箱
|
||||
Phone string `gorm:"column:phone;type:varchar(32);comment:电话" json:"phone"` // 电话
|
||||
Password string `gorm:"column:password;type:varchar(64);comment:密码" json:"password"` // 密码
|
||||
Gender int64 `gorm:"column:gender;type:tinyint(4);comment:性别" json:"gender"` // 性别
|
||||
Avatar string `gorm:"column:avatar;type:longtext;comment:头像" json:"avatar"` // 头像
|
||||
Status int64 `gorm:"column:status;type:tinyint(4);comment:状态 0 正常 1 封禁" json:"status"` // 状态 0 正常 1 封禁
|
||||
Introduce string `gorm:"column:introduce;type:varchar(255);comment:介绍" json:"introduce"` // 介绍
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
Blog string `gorm:"column:blog;type:varchar(30);comment:博客" json:"blog"` // 博客
|
||||
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
|
||||
Company string `gorm:"column:company;type:varchar(50);comment:公司" json:"company"` // 公司
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaAuthUser's table name
|
||||
|
@@ -14,23 +14,23 @@ const TableNameScaAuthUserDevice = "sca_auth_user_device"
|
||||
|
||||
// ScaAuthUserDevice mapped from table <sca_auth_user_device>
|
||||
type ScaAuthUserDevice struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
UserID string `gorm:"column:user_id;type:varchar(20);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
IP string `gorm:"column:ip;type:varchar(20);comment:登录IP" json:"ip"` // 登录IP
|
||||
Location string `gorm:"column:location;type:varchar(20);comment:地址" json:"location"` // 地址
|
||||
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
Browser string `gorm:"column:browser;type:varchar(20);comment:浏览器" json:"browser"` // 浏览器
|
||||
OperatingSystem string `gorm:"column:operating_system;type:varchar(20);comment:操作系统" json:"operating_system"` // 操作系统
|
||||
BrowserVersion string `gorm:"column:browser_version;type:varchar(20);comment:浏览器版本" json:"browser_version"` // 浏览器版本
|
||||
Mobile int64 `gorm:"column:mobile;type:tinyint(1);comment:是否为手机 0否1是" json:"mobile"` // 是否为手机 0否1是
|
||||
Bot int64 `gorm:"column:bot;type:tinyint(1);comment:是否为bot 0否1是" json:"bot"` // 是否为bot 0否1是
|
||||
Mozilla string `gorm:"column:mozilla;type:varchar(10);comment:火狐版本" json:"mozilla"` // 火狐版本
|
||||
Platform string `gorm:"column:platform;type:varchar(20);comment:平台" json:"platform"` // 平台
|
||||
EngineName string `gorm:"column:engine_name;type:varchar(20);comment:引擎名称" json:"engine_name"` // 引擎名称
|
||||
EngineVersion string `gorm:"column:engine_version;type:varchar(20);comment:引擎版本" json:"engine_version"` // 引擎版本
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
UserID string `gorm:"column:user_id;type:varchar(20);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
IP string `gorm:"column:ip;type:varchar(20);comment:登录IP" json:"ip"` // 登录IP
|
||||
Location string `gorm:"column:location;type:varchar(20);comment:地址" json:"location"` // 地址
|
||||
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
Browser string `gorm:"column:browser;type:varchar(20);comment:浏览器" json:"browser"` // 浏览器
|
||||
OperatingSystem string `gorm:"column:operating_system;type:varchar(20);comment:操作系统" json:"operating_system"` // 操作系统
|
||||
BrowserVersion string `gorm:"column:browser_version;type:varchar(20);comment:浏览器版本" json:"browser_version"` // 浏览器版本
|
||||
Mobile int64 `gorm:"column:mobile;type:tinyint(1);comment:是否为手机 0否1是" json:"mobile"` // 是否为手机 0否1是
|
||||
Bot int64 `gorm:"column:bot;type:tinyint(1);comment:是否为bot 0否1是" json:"bot"` // 是否为bot 0否1是
|
||||
Mozilla string `gorm:"column:mozilla;type:varchar(10);comment:火狐版本" json:"mozilla"` // 火狐版本
|
||||
Platform string `gorm:"column:platform;type:varchar(20);comment:平台" json:"platform"` // 平台
|
||||
EngineName string `gorm:"column:engine_name;type:varchar(20);comment:引擎名称" json:"engine_name"` // 引擎名称
|
||||
EngineVersion string `gorm:"column:engine_version;type:varchar(20);comment:引擎版本" json:"engine_version"` // 引擎版本
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaAuthUserDevice's table name
|
||||
|
@@ -14,14 +14,14 @@ const TableNameScaAuthUserSocial = "sca_auth_user_social"
|
||||
|
||||
// ScaAuthUserSocial mapped from table <sca_auth_user_social>
|
||||
type ScaAuthUserSocial struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
OpenID string `gorm:"column:open_id;type:varchar(50);not null;comment:第三方用户的 open id" json:"open_id"` // 第三方用户的 open id
|
||||
Source string `gorm:"column:source;type:varchar(10);comment:第三方用户来源" json:"source"` // 第三方用户来源
|
||||
Status int64 `gorm:"column:status;type:bigint;comment:状态 0正常 1 封禁" json:"status"` // 状态 0正常 1 封禁
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键ID;primary_key" json:"id,string"` // 主键ID
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
OpenID string `gorm:"column:open_id;type:varchar(50);not null;comment:第三方用户的 open id" json:"open_id"` // 第三方用户的 open id
|
||||
Source string `gorm:"column:source;type:varchar(10);comment:第三方用户来源" json:"source"` // 第三方用户来源
|
||||
Status int64 `gorm:"column:status;type:bigint(20);comment:状态 0正常 1 封禁" json:"status"` // 状态 0正常 1 封禁
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaAuthUserSocial's table name
|
||||
|
@@ -12,11 +12,11 @@ const TableNameScaCommentLike = "sca_comment_likes"
|
||||
|
||||
// ScaCommentLike mapped from table <sca_comment_likes>
|
||||
type ScaCommentLike struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键id;primary_key" json:"id,string"` // 主键id
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(50);not null;comment:话题ID" json:"topic_id"` // 话题ID
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
CommentID int64 `gorm:"column:comment_id;type:bigint;not null;comment:评论ID" json:"comment_id"` // 评论ID
|
||||
LikeTime time.Time `gorm:"column:like_time;type:timestamp;comment:点赞时间" json:"like_time"` // 点赞时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键id;primary_key" json:"id,string"` // 主键id
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(50);not null;comment:话题ID" json:"topic_id"` // 话题ID
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
CommentID int64 `gorm:"column:comment_id;type:bigint(20);not null;comment:评论ID" json:"comment_id"` // 评论ID
|
||||
LikeTime time.Time `gorm:"column:like_time;type:timestamp;comment:点赞时间" json:"like_time"` // 点赞时间
|
||||
}
|
||||
|
||||
// TableName ScaCommentLike's table name
|
||||
|
@@ -15,27 +15,27 @@ const TableNameScaCommentReply = "sca_comment_reply"
|
||||
|
||||
// ScaCommentReply mapped from table <sca_comment_reply>
|
||||
type ScaCommentReply struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:主键id;primary_key" json:"id,string"` // 主键id
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:评论用户id" json:"user_id"` // 评论用户id
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:评论话题id" json:"topic_id"` // 评论话题id
|
||||
TopicType int64 `gorm:"column:topic_type;type:tinyint;comment:话题类型" json:"topic_type"` // 话题类型
|
||||
Content string `gorm:"column:content;type:text;comment:评论内容" json:"content"` // 评论内容
|
||||
CommentType int64 `gorm:"column:comment_type;type:bigint;comment:评论类型 0评论 1 回复" json:"comment_type"` // 评论类型 0评论 1 回复
|
||||
ReplyTo int64 `gorm:"column:reply_to;type:bigint;comment:回复子评论ID" json:"reply_to"` // 回复子评论ID
|
||||
ReplyID int64 `gorm:"column:reply_id;type:bigint;comment:回复父评论Id" json:"reply_id"` // 回复父评论Id
|
||||
ReplyUser string `gorm:"column:reply_user;type:varchar(50);comment:回复人id" json:"reply_user"` // 回复人id
|
||||
Author int64 `gorm:"column:author;type:tinyint;comment:评论回复是否作者 0否 1是" json:"author"` // 评论回复是否作者 0否 1是
|
||||
Likes int64 `gorm:"column:likes;type:bigint;comment:点赞数" json:"likes"` // 点赞数
|
||||
ReplyCount int64 `gorm:"column:reply_count;type:bigint;comment:回复数量" json:"reply_count"` // 回复数量
|
||||
Browser string `gorm:"column:browser;type:varchar(50);comment:浏览器" json:"browser"` // 浏览器
|
||||
OperatingSystem string `gorm:"column:operating_system;type:varchar(50);comment:操作系统" json:"operating_system"` // 操作系统
|
||||
CommentIP string `gorm:"column:comment_ip;type:varchar(50);comment:IP地址" json:"comment_ip"` // IP地址
|
||||
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
|
||||
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
Version optimisticlock.Version `gorm:"column:version;type:bigint;comment:版本" json:"version"` // 版本
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;uniqueIndex:id,priority:1;comment:主键id;primary_key" json:"id,string"` // 主键id
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:评论用户id" json:"user_id"` // 评论用户id
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:评论话题id" json:"topic_id"` // 评论话题id
|
||||
TopicType int64 `gorm:"column:topic_type;type:tinyint(4);comment:话题类型" json:"topic_type"` // 话题类型
|
||||
Content string `gorm:"column:content;type:text;comment:评论内容" json:"content"` // 评论内容
|
||||
CommentType int64 `gorm:"column:comment_type;type:bigint(20);comment:评论类型 0评论 1 回复" json:"comment_type"` // 评论类型 0评论 1 回复
|
||||
ReplyTo int64 `gorm:"column:reply_to;type:bigint(20);comment:回复子评论ID" json:"reply_to"` // 回复子评论ID
|
||||
ReplyID int64 `gorm:"column:reply_id;type:bigint(20);comment:回复父评论Id" json:"reply_id"` // 回复父评论Id
|
||||
ReplyUser string `gorm:"column:reply_user;type:varchar(50);comment:回复人id" json:"reply_user"` // 回复人id
|
||||
Author int64 `gorm:"column:author;type:tinyint(4);comment:评论回复是否作者 0否 1是" json:"author"` // 评论回复是否作者 0否 1是
|
||||
Likes int64 `gorm:"column:likes;type:bigint(20);comment:点赞数" json:"likes"` // 点赞数
|
||||
ReplyCount int64 `gorm:"column:reply_count;type:bigint(20);comment:回复数量" json:"reply_count"` // 回复数量
|
||||
Browser string `gorm:"column:browser;type:varchar(50);comment:浏览器" json:"browser"` // 浏览器
|
||||
OperatingSystem string `gorm:"column:operating_system;type:varchar(50);comment:操作系统" json:"operating_system"` // 操作系统
|
||||
CommentIP string `gorm:"column:comment_ip;type:varchar(50);comment:IP地址" json:"comment_ip"` // IP地址
|
||||
Location string `gorm:"column:location;type:varchar(50);comment:地址" json:"location"` // 地址
|
||||
Agent string `gorm:"column:agent;type:varchar(255);comment:设备信息" json:"agent"` // 设备信息
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
Version optimisticlock.Version `gorm:"column:version;type:bigint(20);comment:版本" json:"version"` // 版本
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaCommentReply's table name
|
||||
|
@@ -1,31 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaFileFolder = "sca_file_folder"
|
||||
|
||||
// ScaFileFolder mapped from table <sca_file_folder>
|
||||
type ScaFileFolder struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
FolderName string `gorm:"column:folder_name;type:varchar(512);comment:文件夹名称" json:"folder_name"` // 文件夹名称
|
||||
ParentFolderID int64 `gorm:"column:parent_folder_id;type:bigint;comment:父文件夹编号" json:"parent_folder_id"` // 父文件夹编号
|
||||
FolderAddr string `gorm:"column:folder_addr;type:varchar(1024);comment:文件夹名称" json:"folder_addr"` // 文件夹名称
|
||||
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户编号" json:"user_id"` // 用户编号
|
||||
FolderSource int64 `gorm:"column:folder_source;type:int;comment:文件夹来源 0相册 1 评论" json:"folder_source"` // 文件夹来源 0相册 1 评论
|
||||
CreatedAt *time.Time `gorm:"column:created_at;type:datetime;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt *time.Time `gorm:"column:updated_at;type:datetime;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaFileFolder's table name
|
||||
func (*ScaFileFolder) TableName() string {
|
||||
return TableNameScaFileFolder
|
||||
}
|
@@ -1,34 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaFileInfo = "sca_file_info"
|
||||
|
||||
// ScaFileInfo mapped from table <sca_file_info>
|
||||
type ScaFileInfo struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
FileName string `gorm:"column:file_name;type:varchar(50);comment:文件名" json:"file_name"` // 文件名
|
||||
FileSize float64 `gorm:"column:file_size;type:double;comment:文件大小" json:"file_size"` // 文件大小
|
||||
FileTypeID int64 `gorm:"column:file_type_id;type:bigint;comment:文件类型编号" json:"file_type_id"` // 文件类型编号
|
||||
UploadTime time.Time `gorm:"column:upload_time;type:datetime;comment:上传时间" json:"upload_time"` // 上传时间
|
||||
FolderID int64 `gorm:"column:folder_id;type:bigint;comment:文件夹编号" json:"folder_id"` // 文件夹编号
|
||||
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户编号" json:"user_id"` // 用户编号
|
||||
FileSource int64 `gorm:"column:file_source;type:int;comment:文件来源 0 相册 1 评论" json:"file_source"` // 文件来源 0 相册 1 评论
|
||||
Status int64 `gorm:"column:status;type:int;comment:文件状态" json:"status"` // 文件状态
|
||||
CreatedAt *time.Time `gorm:"column:created_at;type:datetime;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt *time.Time `gorm:"column:updated_at;type:datetime;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaFileInfo's table name
|
||||
func (*ScaFileInfo) TableName() string {
|
||||
return TableNameScaFileInfo
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaFileRecycle = "sca_file_recycle"
|
||||
|
||||
// ScaFileRecycle mapped from table <sca_file_recycle>
|
||||
type ScaFileRecycle struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
FileID int64 `gorm:"column:file_id;type:bigint;comment:文件编号" json:"file_id"` // 文件编号
|
||||
FolderID int64 `gorm:"column:folder_id;type:bigint;comment:文件夹编号" json:"folder_id"` // 文件夹编号
|
||||
Type int64 `gorm:"column:type;type:int;comment:类型 0 文件 1 文件夹" json:"type"` // 类型 0 文件 1 文件夹
|
||||
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户编号" json:"user_id"` // 用户编号
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
OriginalPath string `gorm:"column:original_path;type:varchar(1024);comment:原始路径" json:"original_path"` // 原始路径
|
||||
FileSource int64 `gorm:"column:file_source;type:int;comment:文件来源 0 相册 1 评论" json:"file_source"` // 文件来源 0 相册 1 评论
|
||||
}
|
||||
|
||||
// TableName ScaFileRecycle's table name
|
||||
func (*ScaFileRecycle) TableName() string {
|
||||
return TableNameScaFileRecycle
|
||||
}
|
@@ -1,29 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaFileType = "sca_file_type"
|
||||
|
||||
// ScaFileType mapped from table <sca_file_type>
|
||||
type ScaFileType struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
TypeName string `gorm:"column:type_name;type:varchar(100);comment:类型名称" json:"type_name"` // 类型名称
|
||||
MimeType string `gorm:"column:mime_type;type:varchar(50);comment:MIME 类型" json:"mime_type"` // MIME 类型
|
||||
Status int64 `gorm:"column:status;type:int;comment:类型状态" json:"status"` // 类型状态
|
||||
CreatedAt *time.Time `gorm:"column:created_at;type:datetime;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt *time.Time `gorm:"column:updated_at;type:datetime;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaFileType's table name
|
||||
func (*ScaFileType) TableName() string {
|
||||
return TableNameScaFileType
|
||||
}
|
@@ -14,15 +14,15 @@ const TableNameScaMessageReport = "sca_message_report"
|
||||
|
||||
// ScaMessageReport mapped from table <sca_message_report>
|
||||
type ScaMessageReport struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(20);comment:用户Id" json:"user_id"` // 用户Id
|
||||
Type int64 `gorm:"column:type;type:tinyint;comment:举报类型 0评论 1 相册" json:"type"` // 举报类型 0评论 1 相册
|
||||
CommentID int64 `gorm:"column:comment_id;type:bigint;comment:评论Id" json:"comment_id"` // 评论Id
|
||||
Type int64 `gorm:"column:type;type:tinyint(4);comment:举报类型 0评论 1 相册" json:"type"` // 举报类型 0评论 1 相册
|
||||
CommentID int64 `gorm:"column:comment_id;type:bigint(20);comment:评论Id" json:"comment_id"` // 评论Id
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(20);comment:话题Id" json:"topic_id"` // 话题Id
|
||||
ReportType int64 `gorm:"column:report_type;type:tinyint;comment:举报" json:"report_type"` // 举报
|
||||
ReportType int64 `gorm:"column:report_type;type:tinyint(4);comment:举报" json:"report_type"` // 举报
|
||||
ReportContent string `gorm:"column:report_content;type:text;comment:举报说明内容" json:"report_content"` // 举报说明内容
|
||||
ReportTag string `gorm:"column:report_tag;type:varchar(255);comment:举报标签" json:"report_tag"` // 举报标签
|
||||
Status int64 `gorm:"column:status;type:tinyint;comment:状态(0 未处理 1 已处理)" json:"status"` // 状态(0 未处理 1 已处理)
|
||||
Status int64 `gorm:"column:status;type:tinyint(4);comment:状态(0 未处理 1 已处理)" json:"status"` // 状态(0 未处理 1 已处理)
|
||||
CreatedAt *time.Time `gorm:"column:created_at;type:timestamp;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt *time.Time `gorm:"column:updated_at;type:timestamp;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
|
33
app/auth/model/mysql/model/sca_storage_config.gen.go
Normal file
33
app/auth/model/mysql/model/sca_storage_config.gen.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageConfig = "sca_storage_config"
|
||||
|
||||
// ScaStorageConfig mapped from table <sca_storage_config>
|
||||
type ScaStorageConfig struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
Type string `gorm:"column:type;type:varchar(50);comment:类型" json:"type"` // 类型
|
||||
Endpoint string `gorm:"column:endpoint;type:varchar(50);comment:地址" json:"endpoint"` // 地址
|
||||
AccessKey string `gorm:"column:access_key;type:varchar(100);comment:密钥key" json:"access_key"` // 密钥key
|
||||
SecretKey string `gorm:"column:secret_key;type:varchar(100);comment:密钥" json:"secret_key"` // 密钥
|
||||
Bucket string `gorm:"column:bucket;type:varchar(50);comment:存储桶" json:"bucket"` // 存储桶
|
||||
Region string `gorm:"column:region;type:varchar(50);comment:地域" json:"region"` // 地域
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageConfig's table name
|
||||
func (*ScaStorageConfig) TableName() string {
|
||||
return TableNameScaStorageConfig
|
||||
}
|
40
app/auth/model/mysql/model/sca_storage_info.gen.go
Normal file
40
app/auth/model/mysql/model/sca_storage_info.gen.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageInfo = "sca_storage_info"
|
||||
|
||||
// ScaStorageInfo mapped from table <sca_storage_info>
|
||||
type ScaStorageInfo struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
Storage string `gorm:"column:storage;type:varchar(50);comment:存储空间" json:"storage"` // 存储空间
|
||||
Bucket string `gorm:"column:bucket;type:varchar(50);comment:存储桶" json:"bucket"` // 存储桶
|
||||
Type string `gorm:"column:type;type:varchar(50);comment:类型" json:"type"` // 类型
|
||||
Path string `gorm:"column:path;type:varchar(255);comment:路径" json:"path"` // 路径
|
||||
FileName string `gorm:"column:file_name;type:varchar(100);comment:名称" json:"file_name"` // 名称
|
||||
Category string `gorm:"column:category;type:varchar(50);comment:分类" json:"category"` // 分类
|
||||
Loaction string `gorm:"column:loaction;type:varchar(100);comment:地址" json:"loaction"` // 地址
|
||||
Hash string `gorm:"column:hash;type:varchar(255);comment:哈希值" json:"hash"` // 哈希值
|
||||
Anime string `gorm:"column:anime;type:varchar(50);comment:是否是动漫图片" json:"anime"` // 是否是动漫图片
|
||||
HasFace string `gorm:"column:has_face;type:varchar(50);comment:是否人像" json:"has_face"` // 是否人像
|
||||
FaceID int64 `gorm:"column:face_id;type:bigint(20);comment:人像ID" json:"face_id"` // 人像ID
|
||||
Landscape string `gorm:"column:landscape;type:varchar(50);comment:风景类型" json:"landscape"` // 风景类型
|
||||
Objects string `gorm:"column:objects;type:varchar(50);comment:对象识别" json:"objects"` // 对象识别
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageInfo's table name
|
||||
func (*ScaStorageInfo) TableName() string {
|
||||
return TableNameScaStorageInfo
|
||||
}
|
28
app/auth/model/mysql/model/sca_storage_tag.gen.go
Normal file
28
app/auth/model/mysql/model/sca_storage_tag.gen.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageTag = "sca_storage_tag"
|
||||
|
||||
// ScaStorageTag mapped from table <sca_storage_tag>
|
||||
type ScaStorageTag struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
FileID int64 `gorm:"column:file_id;type:bigint(20);comment:文件ID" json:"file_id"` // 文件ID
|
||||
TagID int64 `gorm:"column:tag_id;type:bigint(20);comment:标签ID" json:"tag_id"` // 标签ID
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageTag's table name
|
||||
func (*ScaStorageTag) TableName() string {
|
||||
return TableNameScaStorageTag
|
||||
}
|
28
app/auth/model/mysql/model/sca_storage_tag_info.gen.go
Normal file
28
app/auth/model/mysql/model/sca_storage_tag_info.gen.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageTagInfo = "sca_storage_tag_info"
|
||||
|
||||
// ScaStorageTagInfo mapped from table <sca_storage_tag_info>
|
||||
type ScaStorageTagInfo struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
TagName string `gorm:"column:tag_name;type:varchar(50);not null;comment:标签名称" json:"tag_name"` // 标签名称
|
||||
TagKey string `gorm:"column:tag_key;type:varchar(50);comment:标签关键字" json:"tag_key"` // 标签关键字
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageTagInfo's table name
|
||||
func (*ScaStorageTagInfo) TableName() string {
|
||||
return TableNameScaStorageTagInfo
|
||||
}
|
@@ -16,10 +16,10 @@ const TableNameScaUserFollow = "sca_user_follows"
|
||||
type ScaUserFollow struct {
|
||||
FollowerID string `gorm:"column:follower_id;type:varchar(50);not null;comment:关注者" json:"follower_id"` // 关注者
|
||||
FolloweeID string `gorm:"column:followee_id;type:varchar(50);not null;comment:被关注者" json:"followee_id"` // 被关注者
|
||||
Status int64 `gorm:"column:status;type:tinyint unsigned;not null;comment:关注状态(0 未互关 1 互关)" json:"status"` // 关注状态(0 未互关 1 互关)
|
||||
Status int64 `gorm:"column:status;type:tinyint(3) unsigned;not null;comment:关注状态(0 未互关 1 互关)" json:"status"` // 关注状态(0 未互关 1 互关)
|
||||
CreatedAt *time.Time `gorm:"column:created_at;type:timestamp;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt *time.Time `gorm:"column:updated_at;type:timestamp;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;primary_key" json:"id,string"`
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;primary_key" json:"id,string"`
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
|
@@ -14,13 +14,13 @@ const TableNameScaUserLevel = "sca_user_level"
|
||||
|
||||
// ScaUserLevel mapped from table <sca_user_level>
|
||||
type ScaUserLevel struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);comment:用户Id" json:"user_id"` // 用户Id
|
||||
LevelType int64 `gorm:"column:level_type;type:tinyint unsigned;comment:等级类型" json:"level_type"` // 等级类型
|
||||
Level int64 `gorm:"column:level;type:int;comment:等级" json:"level"` // 等级
|
||||
LevelType int64 `gorm:"column:level_type;type:tinyint(3) unsigned;comment:等级类型" json:"level_type"` // 等级类型
|
||||
Level int64 `gorm:"column:level;type:bigint(20);comment:等级" json:"level"` // 等级
|
||||
LevelName string `gorm:"column:level_name;type:varchar(50);comment:等级名称" json:"level_name"` // 等级名称
|
||||
ExpStart int64 `gorm:"column:exp_start;type:bigint;comment:开始经验值" json:"exp_start"` // 开始经验值
|
||||
ExpEnd int64 `gorm:"column:exp_end;type:bigint;comment:结束经验值" json:"exp_end"` // 结束经验值
|
||||
ExpStart int64 `gorm:"column:exp_start;type:bigint(20);comment:开始经验值" json:"exp_start"` // 开始经验值
|
||||
ExpEnd int64 `gorm:"column:exp_end;type:bigint(20);comment:结束经验值" json:"exp_end"` // 结束经验值
|
||||
Description string `gorm:"column:description;type:text;comment:等级描述" json:"description"` // 等级描述
|
||||
CreatedAt *time.Time `gorm:"column:created_at;type:timestamp;default:CURRENT_TIMESTAMP;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt *time.Time `gorm:"column:updated_at;type:timestamp;default:CURRENT_TIMESTAMP;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
|
@@ -14,15 +14,15 @@ const TableNameScaUserMessage = "sca_user_message"
|
||||
|
||||
// ScaUserMessage mapped from table <sca_user_message>
|
||||
type ScaUserMessage struct {
|
||||
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:话题Id" json:"topic_id"` // 话题Id
|
||||
FromID string `gorm:"column:from_id;type:varchar(50);comment:来自人" json:"from_id"` // 来自人
|
||||
ToID string `gorm:"column:to_id;type:varchar(50);comment:送达人" json:"to_id"` // 送达人
|
||||
Content string `gorm:"column:content;type:text;comment:消息内容" json:"content"` // 消息内容
|
||||
IsRead int64 `gorm:"column:is_read;type:tinyint;comment:是否已读" json:"is_read"` // 是否已读
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
TopicID string `gorm:"column:topic_id;type:varchar(50);comment:话题Id" json:"topic_id"` // 话题Id
|
||||
FromID string `gorm:"column:from_id;type:varchar(50);comment:来自人" json:"from_id"` // 来自人
|
||||
ToID string `gorm:"column:to_id;type:varchar(50);comment:送达人" json:"to_id"` // 送达人
|
||||
Content string `gorm:"column:content;type:text;comment:消息内容" json:"content"` // 消息内容
|
||||
IsRead int64 `gorm:"column:is_read;type:tinyint(4);comment:是否已读" json:"is_read"` // 是否已读
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaUserMessage's table name
|
||||
|
@@ -3,6 +3,7 @@ package mysql
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/query"
|
||||
"time"
|
||||
|
||||
@@ -39,6 +40,8 @@ func NewMySQL(url string, maxOpenConn int, maxIdleConn int, client *redis.Client
|
||||
sqlDB.SetMaxOpenConns(maxOpenConn)
|
||||
sqlDB.SetMaxIdleConns(maxIdleConn)
|
||||
useDB := query.Use(db)
|
||||
// migrate
|
||||
Migrate(db)
|
||||
// cache
|
||||
gormCache, err := cache.NewGorm2Cache(&config.CacheConfig{
|
||||
CacheLevel: config.CacheLevelAll,
|
||||
@@ -63,3 +66,26 @@ func NewMySQL(url string, maxOpenConn int, maxIdleConn int, client *redis.Client
|
||||
|
||||
return db, useDB
|
||||
}
|
||||
|
||||
func Migrate(db *gorm.DB) {
|
||||
err := db.AutoMigrate(
|
||||
&model.ScaAuthUser{},
|
||||
&model.ScaAuthRole{},
|
||||
&model.ScaAuthPermissionRule{},
|
||||
&model.ScaAuthMenu{},
|
||||
&model.ScaAuthUserDevice{},
|
||||
&model.ScaAuthUserSocial{},
|
||||
&model.ScaCommentLike{},
|
||||
&model.ScaCommentReply{},
|
||||
&model.ScaStorageInfo{},
|
||||
&model.ScaStorageTag{},
|
||||
&model.ScaStorageTagInfo{},
|
||||
&model.ScaMessageReport{},
|
||||
&model.ScaStorageConfig{},
|
||||
&model.ScaUserFollow{},
|
||||
&model.ScaUserLevel{},
|
||||
&model.ScaUserMessage{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
@@ -25,11 +25,11 @@ var (
|
||||
ScaAuthUserSocial *scaAuthUserSocial
|
||||
ScaCommentLike *scaCommentLike
|
||||
ScaCommentReply *scaCommentReply
|
||||
ScaFileFolder *scaFileFolder
|
||||
ScaFileInfo *scaFileInfo
|
||||
ScaFileRecycle *scaFileRecycle
|
||||
ScaFileType *scaFileType
|
||||
ScaMessageReport *scaMessageReport
|
||||
ScaStorageConfig *scaStorageConfig
|
||||
ScaStorageInfo *scaStorageInfo
|
||||
ScaStorageTag *scaStorageTag
|
||||
ScaStorageTagInfo *scaStorageTagInfo
|
||||
ScaUserFollow *scaUserFollow
|
||||
ScaUserLevel *scaUserLevel
|
||||
ScaUserMessage *scaUserMessage
|
||||
@@ -45,11 +45,11 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) {
|
||||
ScaAuthUserSocial = &Q.ScaAuthUserSocial
|
||||
ScaCommentLike = &Q.ScaCommentLike
|
||||
ScaCommentReply = &Q.ScaCommentReply
|
||||
ScaFileFolder = &Q.ScaFileFolder
|
||||
ScaFileInfo = &Q.ScaFileInfo
|
||||
ScaFileRecycle = &Q.ScaFileRecycle
|
||||
ScaFileType = &Q.ScaFileType
|
||||
ScaMessageReport = &Q.ScaMessageReport
|
||||
ScaStorageConfig = &Q.ScaStorageConfig
|
||||
ScaStorageInfo = &Q.ScaStorageInfo
|
||||
ScaStorageTag = &Q.ScaStorageTag
|
||||
ScaStorageTagInfo = &Q.ScaStorageTagInfo
|
||||
ScaUserFollow = &Q.ScaUserFollow
|
||||
ScaUserLevel = &Q.ScaUserLevel
|
||||
ScaUserMessage = &Q.ScaUserMessage
|
||||
@@ -66,11 +66,11 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query {
|
||||
ScaAuthUserSocial: newScaAuthUserSocial(db, opts...),
|
||||
ScaCommentLike: newScaCommentLike(db, opts...),
|
||||
ScaCommentReply: newScaCommentReply(db, opts...),
|
||||
ScaFileFolder: newScaFileFolder(db, opts...),
|
||||
ScaFileInfo: newScaFileInfo(db, opts...),
|
||||
ScaFileRecycle: newScaFileRecycle(db, opts...),
|
||||
ScaFileType: newScaFileType(db, opts...),
|
||||
ScaMessageReport: newScaMessageReport(db, opts...),
|
||||
ScaStorageConfig: newScaStorageConfig(db, opts...),
|
||||
ScaStorageInfo: newScaStorageInfo(db, opts...),
|
||||
ScaStorageTag: newScaStorageTag(db, opts...),
|
||||
ScaStorageTagInfo: newScaStorageTagInfo(db, opts...),
|
||||
ScaUserFollow: newScaUserFollow(db, opts...),
|
||||
ScaUserLevel: newScaUserLevel(db, opts...),
|
||||
ScaUserMessage: newScaUserMessage(db, opts...),
|
||||
@@ -88,11 +88,11 @@ type Query struct {
|
||||
ScaAuthUserSocial scaAuthUserSocial
|
||||
ScaCommentLike scaCommentLike
|
||||
ScaCommentReply scaCommentReply
|
||||
ScaFileFolder scaFileFolder
|
||||
ScaFileInfo scaFileInfo
|
||||
ScaFileRecycle scaFileRecycle
|
||||
ScaFileType scaFileType
|
||||
ScaMessageReport scaMessageReport
|
||||
ScaStorageConfig scaStorageConfig
|
||||
ScaStorageInfo scaStorageInfo
|
||||
ScaStorageTag scaStorageTag
|
||||
ScaStorageTagInfo scaStorageTagInfo
|
||||
ScaUserFollow scaUserFollow
|
||||
ScaUserLevel scaUserLevel
|
||||
ScaUserMessage scaUserMessage
|
||||
@@ -111,11 +111,11 @@ func (q *Query) clone(db *gorm.DB) *Query {
|
||||
ScaAuthUserSocial: q.ScaAuthUserSocial.clone(db),
|
||||
ScaCommentLike: q.ScaCommentLike.clone(db),
|
||||
ScaCommentReply: q.ScaCommentReply.clone(db),
|
||||
ScaFileFolder: q.ScaFileFolder.clone(db),
|
||||
ScaFileInfo: q.ScaFileInfo.clone(db),
|
||||
ScaFileRecycle: q.ScaFileRecycle.clone(db),
|
||||
ScaFileType: q.ScaFileType.clone(db),
|
||||
ScaMessageReport: q.ScaMessageReport.clone(db),
|
||||
ScaStorageConfig: q.ScaStorageConfig.clone(db),
|
||||
ScaStorageInfo: q.ScaStorageInfo.clone(db),
|
||||
ScaStorageTag: q.ScaStorageTag.clone(db),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.clone(db),
|
||||
ScaUserFollow: q.ScaUserFollow.clone(db),
|
||||
ScaUserLevel: q.ScaUserLevel.clone(db),
|
||||
ScaUserMessage: q.ScaUserMessage.clone(db),
|
||||
@@ -141,11 +141,11 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query {
|
||||
ScaAuthUserSocial: q.ScaAuthUserSocial.replaceDB(db),
|
||||
ScaCommentLike: q.ScaCommentLike.replaceDB(db),
|
||||
ScaCommentReply: q.ScaCommentReply.replaceDB(db),
|
||||
ScaFileFolder: q.ScaFileFolder.replaceDB(db),
|
||||
ScaFileInfo: q.ScaFileInfo.replaceDB(db),
|
||||
ScaFileRecycle: q.ScaFileRecycle.replaceDB(db),
|
||||
ScaFileType: q.ScaFileType.replaceDB(db),
|
||||
ScaMessageReport: q.ScaMessageReport.replaceDB(db),
|
||||
ScaStorageConfig: q.ScaStorageConfig.replaceDB(db),
|
||||
ScaStorageInfo: q.ScaStorageInfo.replaceDB(db),
|
||||
ScaStorageTag: q.ScaStorageTag.replaceDB(db),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.replaceDB(db),
|
||||
ScaUserFollow: q.ScaUserFollow.replaceDB(db),
|
||||
ScaUserLevel: q.ScaUserLevel.replaceDB(db),
|
||||
ScaUserMessage: q.ScaUserMessage.replaceDB(db),
|
||||
@@ -161,11 +161,11 @@ type queryCtx struct {
|
||||
ScaAuthUserSocial IScaAuthUserSocialDo
|
||||
ScaCommentLike IScaCommentLikeDo
|
||||
ScaCommentReply IScaCommentReplyDo
|
||||
ScaFileFolder IScaFileFolderDo
|
||||
ScaFileInfo IScaFileInfoDo
|
||||
ScaFileRecycle IScaFileRecycleDo
|
||||
ScaFileType IScaFileTypeDo
|
||||
ScaMessageReport IScaMessageReportDo
|
||||
ScaStorageConfig IScaStorageConfigDo
|
||||
ScaStorageInfo IScaStorageInfoDo
|
||||
ScaStorageTag IScaStorageTagDo
|
||||
ScaStorageTagInfo IScaStorageTagInfoDo
|
||||
ScaUserFollow IScaUserFollowDo
|
||||
ScaUserLevel IScaUserLevelDo
|
||||
ScaUserMessage IScaUserMessageDo
|
||||
@@ -181,11 +181,11 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx {
|
||||
ScaAuthUserSocial: q.ScaAuthUserSocial.WithContext(ctx),
|
||||
ScaCommentLike: q.ScaCommentLike.WithContext(ctx),
|
||||
ScaCommentReply: q.ScaCommentReply.WithContext(ctx),
|
||||
ScaFileFolder: q.ScaFileFolder.WithContext(ctx),
|
||||
ScaFileInfo: q.ScaFileInfo.WithContext(ctx),
|
||||
ScaFileRecycle: q.ScaFileRecycle.WithContext(ctx),
|
||||
ScaFileType: q.ScaFileType.WithContext(ctx),
|
||||
ScaMessageReport: q.ScaMessageReport.WithContext(ctx),
|
||||
ScaStorageConfig: q.ScaStorageConfig.WithContext(ctx),
|
||||
ScaStorageInfo: q.ScaStorageInfo.WithContext(ctx),
|
||||
ScaStorageTag: q.ScaStorageTag.WithContext(ctx),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.WithContext(ctx),
|
||||
ScaUserFollow: q.ScaUserFollow.WithContext(ctx),
|
||||
ScaUserLevel: q.ScaUserLevel.WithContext(ctx),
|
||||
ScaUserMessage: q.ScaUserMessage.WithContext(ctx),
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthMenu(db *gorm.DB, opts ...gen.DOOption) scaAuthMenu {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthPermissionRule(db *gorm.DB, opts ...gen.DOOption) scaAuthPermissionRule {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthRole(db *gorm.DB, opts ...gen.DOOption) scaAuthRole {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthUser(db *gorm.DB, opts ...gen.DOOption) scaAuthUser {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthUserDevice(db *gorm.DB, opts ...gen.DOOption) scaAuthUserDevice {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaAuthUserSocial(db *gorm.DB, opts ...gen.DOOption) scaAuthUserSocial {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaCommentLike(db *gorm.DB, opts ...gen.DOOption) scaCommentLike {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaCommentReply(db *gorm.DB, opts ...gen.DOOption) scaCommentReply {
|
||||
|
@@ -1,410 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileFolder(db *gorm.DB, opts ...gen.DOOption) scaFileFolder {
|
||||
_scaFileFolder := scaFileFolder{}
|
||||
|
||||
_scaFileFolder.scaFileFolderDo.UseDB(db, opts...)
|
||||
_scaFileFolder.scaFileFolderDo.UseModel(&model.ScaFileFolder{})
|
||||
|
||||
tableName := _scaFileFolder.scaFileFolderDo.TableName()
|
||||
_scaFileFolder.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileFolder.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileFolder.FolderName = field.NewString(tableName, "folder_name")
|
||||
_scaFileFolder.ParentFolderID = field.NewInt64(tableName, "parent_folder_id")
|
||||
_scaFileFolder.FolderAddr = field.NewString(tableName, "folder_addr")
|
||||
_scaFileFolder.UserID = field.NewString(tableName, "user_id")
|
||||
_scaFileFolder.FolderSource = field.NewInt64(tableName, "folder_source")
|
||||
_scaFileFolder.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaFileFolder.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaFileFolder.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaFileFolder.fillFieldMap()
|
||||
|
||||
return _scaFileFolder
|
||||
}
|
||||
|
||||
type scaFileFolder struct {
|
||||
scaFileFolderDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FolderName field.String // 文件夹名称
|
||||
ParentFolderID field.Int64 // 父文件夹编号
|
||||
FolderAddr field.String // 文件夹名称
|
||||
UserID field.String // 用户编号
|
||||
FolderSource field.Int64 // 文件夹来源 0相册 1 评论
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileFolder) Table(newTableName string) *scaFileFolder {
|
||||
s.scaFileFolderDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileFolder) As(alias string) *scaFileFolder {
|
||||
s.scaFileFolderDo.DO = *(s.scaFileFolderDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileFolder) updateTableName(table string) *scaFileFolder {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FolderName = field.NewString(table, "folder_name")
|
||||
s.ParentFolderID = field.NewInt64(table, "parent_folder_id")
|
||||
s.FolderAddr = field.NewString(table, "folder_addr")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.FolderSource = field.NewInt64(table, "folder_source")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileFolder) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileFolder) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 9)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["folder_name"] = s.FolderName
|
||||
s.fieldMap["parent_folder_id"] = s.ParentFolderID
|
||||
s.fieldMap["folder_addr"] = s.FolderAddr
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["folder_source"] = s.FolderSource
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaFileFolder) clone(db *gorm.DB) scaFileFolder {
|
||||
s.scaFileFolderDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileFolder) replaceDB(db *gorm.DB) scaFileFolder {
|
||||
s.scaFileFolderDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileFolderDo struct{ gen.DO }
|
||||
|
||||
type IScaFileFolderDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileFolderDo
|
||||
WithContext(ctx context.Context) IScaFileFolderDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileFolderDo
|
||||
WriteDB() IScaFileFolderDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileFolderDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileFolderDo
|
||||
Not(conds ...gen.Condition) IScaFileFolderDo
|
||||
Or(conds ...gen.Condition) IScaFileFolderDo
|
||||
Select(conds ...field.Expr) IScaFileFolderDo
|
||||
Where(conds ...gen.Condition) IScaFileFolderDo
|
||||
Order(conds ...field.Expr) IScaFileFolderDo
|
||||
Distinct(cols ...field.Expr) IScaFileFolderDo
|
||||
Omit(cols ...field.Expr) IScaFileFolderDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo
|
||||
Group(cols ...field.Expr) IScaFileFolderDo
|
||||
Having(conds ...gen.Condition) IScaFileFolderDo
|
||||
Limit(limit int) IScaFileFolderDo
|
||||
Offset(offset int) IScaFileFolderDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileFolderDo
|
||||
Unscoped() IScaFileFolderDo
|
||||
Create(values ...*model.ScaFileFolder) error
|
||||
CreateInBatches(values []*model.ScaFileFolder, batchSize int) error
|
||||
Save(values ...*model.ScaFileFolder) error
|
||||
First() (*model.ScaFileFolder, error)
|
||||
Take() (*model.ScaFileFolder, error)
|
||||
Last() (*model.ScaFileFolder, error)
|
||||
Find() ([]*model.ScaFileFolder, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileFolder, err error)
|
||||
FindInBatches(result *[]*model.ScaFileFolder, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileFolder) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileFolderDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileFolderDo
|
||||
Joins(fields ...field.RelationField) IScaFileFolderDo
|
||||
Preload(fields ...field.RelationField) IScaFileFolderDo
|
||||
FirstOrInit() (*model.ScaFileFolder, error)
|
||||
FirstOrCreate() (*model.ScaFileFolder, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileFolder, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileFolderDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Debug() IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) WithContext(ctx context.Context) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) ReadDB() IScaFileFolderDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) WriteDB() IScaFileFolderDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Session(config *gorm.Session) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Clauses(conds ...clause.Expression) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Returning(value interface{}, columns ...string) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Not(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Or(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Select(conds ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Where(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Order(conds ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Distinct(cols ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Omit(cols ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Join(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Group(cols ...field.Expr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Having(conds ...gen.Condition) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Limit(limit int) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Offset(offset int) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Unscoped() IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Create(values ...*model.ScaFileFolder) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) CreateInBatches(values []*model.ScaFileFolder, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileFolderDo) Save(values ...*model.ScaFileFolder) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) First() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Take() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Last() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Find() ([]*model.ScaFileFolder, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileFolder), err
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileFolder, err error) {
|
||||
buf := make([]*model.ScaFileFolder, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FindInBatches(result *[]*model.ScaFileFolder, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Attrs(attrs ...field.AssignExpr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Assign(attrs ...field.AssignExpr) IScaFileFolderDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Joins(fields ...field.RelationField) IScaFileFolderDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Preload(fields ...field.RelationField) IScaFileFolderDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FirstOrInit() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FirstOrCreate() (*model.ScaFileFolder, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileFolder), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) FindByPage(offset int, limit int) (result []*model.ScaFileFolder, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileFolderDo) Delete(models ...*model.ScaFileFolder) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileFolderDo) withDO(do gen.Dao) *scaFileFolderDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,422 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileInfo(db *gorm.DB, opts ...gen.DOOption) scaFileInfo {
|
||||
_scaFileInfo := scaFileInfo{}
|
||||
|
||||
_scaFileInfo.scaFileInfoDo.UseDB(db, opts...)
|
||||
_scaFileInfo.scaFileInfoDo.UseModel(&model.ScaFileInfo{})
|
||||
|
||||
tableName := _scaFileInfo.scaFileInfoDo.TableName()
|
||||
_scaFileInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileInfo.FileName = field.NewString(tableName, "file_name")
|
||||
_scaFileInfo.FileSize = field.NewFloat64(tableName, "file_size")
|
||||
_scaFileInfo.FileTypeID = field.NewInt64(tableName, "file_type_id")
|
||||
_scaFileInfo.UploadTime = field.NewTime(tableName, "upload_time")
|
||||
_scaFileInfo.FolderID = field.NewInt64(tableName, "folder_id")
|
||||
_scaFileInfo.UserID = field.NewString(tableName, "user_id")
|
||||
_scaFileInfo.FileSource = field.NewInt64(tableName, "file_source")
|
||||
_scaFileInfo.Status = field.NewInt64(tableName, "status")
|
||||
_scaFileInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaFileInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaFileInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaFileInfo.fillFieldMap()
|
||||
|
||||
return _scaFileInfo
|
||||
}
|
||||
|
||||
type scaFileInfo struct {
|
||||
scaFileInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileName field.String // 文件名
|
||||
FileSize field.Float64 // 文件大小
|
||||
FileTypeID field.Int64 // 文件类型编号
|
||||
UploadTime field.Time // 上传时间
|
||||
FolderID field.Int64 // 文件夹编号
|
||||
UserID field.String // 用户编号
|
||||
FileSource field.Int64 // 文件来源 0 相册 1 评论
|
||||
Status field.Int64 // 文件状态
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileInfo) Table(newTableName string) *scaFileInfo {
|
||||
s.scaFileInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileInfo) As(alias string) *scaFileInfo {
|
||||
s.scaFileInfoDo.DO = *(s.scaFileInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileInfo) updateTableName(table string) *scaFileInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileName = field.NewString(table, "file_name")
|
||||
s.FileSize = field.NewFloat64(table, "file_size")
|
||||
s.FileTypeID = field.NewInt64(table, "file_type_id")
|
||||
s.UploadTime = field.NewTime(table, "upload_time")
|
||||
s.FolderID = field.NewInt64(table, "folder_id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.FileSource = field.NewInt64(table, "file_source")
|
||||
s.Status = field.NewInt64(table, "status")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 12)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_name"] = s.FileName
|
||||
s.fieldMap["file_size"] = s.FileSize
|
||||
s.fieldMap["file_type_id"] = s.FileTypeID
|
||||
s.fieldMap["upload_time"] = s.UploadTime
|
||||
s.fieldMap["folder_id"] = s.FolderID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["file_source"] = s.FileSource
|
||||
s.fieldMap["status"] = s.Status
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaFileInfo) clone(db *gorm.DB) scaFileInfo {
|
||||
s.scaFileInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileInfo) replaceDB(db *gorm.DB) scaFileInfo {
|
||||
s.scaFileInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaFileInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileInfoDo
|
||||
WithContext(ctx context.Context) IScaFileInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileInfoDo
|
||||
WriteDB() IScaFileInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileInfoDo
|
||||
Not(conds ...gen.Condition) IScaFileInfoDo
|
||||
Or(conds ...gen.Condition) IScaFileInfoDo
|
||||
Select(conds ...field.Expr) IScaFileInfoDo
|
||||
Where(conds ...gen.Condition) IScaFileInfoDo
|
||||
Order(conds ...field.Expr) IScaFileInfoDo
|
||||
Distinct(cols ...field.Expr) IScaFileInfoDo
|
||||
Omit(cols ...field.Expr) IScaFileInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo
|
||||
Group(cols ...field.Expr) IScaFileInfoDo
|
||||
Having(conds ...gen.Condition) IScaFileInfoDo
|
||||
Limit(limit int) IScaFileInfoDo
|
||||
Offset(offset int) IScaFileInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileInfoDo
|
||||
Unscoped() IScaFileInfoDo
|
||||
Create(values ...*model.ScaFileInfo) error
|
||||
CreateInBatches(values []*model.ScaFileInfo, batchSize int) error
|
||||
Save(values ...*model.ScaFileInfo) error
|
||||
First() (*model.ScaFileInfo, error)
|
||||
Take() (*model.ScaFileInfo, error)
|
||||
Last() (*model.ScaFileInfo, error)
|
||||
Find() ([]*model.ScaFileInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaFileInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileInfoDo
|
||||
Joins(fields ...field.RelationField) IScaFileInfoDo
|
||||
Preload(fields ...field.RelationField) IScaFileInfoDo
|
||||
FirstOrInit() (*model.ScaFileInfo, error)
|
||||
FirstOrCreate() (*model.ScaFileInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Debug() IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) WithContext(ctx context.Context) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) ReadDB() IScaFileInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) WriteDB() IScaFileInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Session(config *gorm.Session) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Clauses(conds ...clause.Expression) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Returning(value interface{}, columns ...string) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Not(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Or(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Select(conds ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Where(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Order(conds ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Distinct(cols ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Omit(cols ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Group(cols ...field.Expr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Having(conds ...gen.Condition) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Limit(limit int) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Offset(offset int) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Unscoped() IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Create(values ...*model.ScaFileInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) CreateInBatches(values []*model.ScaFileInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileInfoDo) Save(values ...*model.ScaFileInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) First() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Take() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Last() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Find() ([]*model.ScaFileInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileInfo), err
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileInfo, err error) {
|
||||
buf := make([]*model.ScaFileInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FindInBatches(result *[]*model.ScaFileInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Attrs(attrs ...field.AssignExpr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Assign(attrs ...field.AssignExpr) IScaFileInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Joins(fields ...field.RelationField) IScaFileInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Preload(fields ...field.RelationField) IScaFileInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FirstOrInit() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FirstOrCreate() (*model.ScaFileInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) FindByPage(offset int, limit int) (result []*model.ScaFileInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileInfoDo) Delete(models ...*model.ScaFileInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileInfoDo) withDO(do gen.Dao) *scaFileInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,406 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileRecycle(db *gorm.DB, opts ...gen.DOOption) scaFileRecycle {
|
||||
_scaFileRecycle := scaFileRecycle{}
|
||||
|
||||
_scaFileRecycle.scaFileRecycleDo.UseDB(db, opts...)
|
||||
_scaFileRecycle.scaFileRecycleDo.UseModel(&model.ScaFileRecycle{})
|
||||
|
||||
tableName := _scaFileRecycle.scaFileRecycleDo.TableName()
|
||||
_scaFileRecycle.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileRecycle.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileRecycle.FileID = field.NewInt64(tableName, "file_id")
|
||||
_scaFileRecycle.FolderID = field.NewInt64(tableName, "folder_id")
|
||||
_scaFileRecycle.Type = field.NewInt64(tableName, "type")
|
||||
_scaFileRecycle.UserID = field.NewString(tableName, "user_id")
|
||||
_scaFileRecycle.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
_scaFileRecycle.OriginalPath = field.NewString(tableName, "original_path")
|
||||
_scaFileRecycle.FileSource = field.NewInt64(tableName, "file_source")
|
||||
|
||||
_scaFileRecycle.fillFieldMap()
|
||||
|
||||
return _scaFileRecycle
|
||||
}
|
||||
|
||||
type scaFileRecycle struct {
|
||||
scaFileRecycleDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileID field.Int64 // 文件编号
|
||||
FolderID field.Int64 // 文件夹编号
|
||||
Type field.Int64 // 类型 0 文件 1 文件夹
|
||||
UserID field.String // 用户编号
|
||||
DeletedAt field.Field // 删除时间
|
||||
OriginalPath field.String // 原始路径
|
||||
FileSource field.Int64 // 文件来源 0 相册 1 评论
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) Table(newTableName string) *scaFileRecycle {
|
||||
s.scaFileRecycleDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) As(alias string) *scaFileRecycle {
|
||||
s.scaFileRecycleDo.DO = *(s.scaFileRecycleDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileRecycle) updateTableName(table string) *scaFileRecycle {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileID = field.NewInt64(table, "file_id")
|
||||
s.FolderID = field.NewInt64(table, "folder_id")
|
||||
s.Type = field.NewInt64(table, "type")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
s.OriginalPath = field.NewString(table, "original_path")
|
||||
s.FileSource = field.NewInt64(table, "file_source")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileRecycle) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileRecycle) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 8)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_id"] = s.FileID
|
||||
s.fieldMap["folder_id"] = s.FolderID
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
s.fieldMap["original_path"] = s.OriginalPath
|
||||
s.fieldMap["file_source"] = s.FileSource
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) clone(db *gorm.DB) scaFileRecycle {
|
||||
s.scaFileRecycleDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileRecycle) replaceDB(db *gorm.DB) scaFileRecycle {
|
||||
s.scaFileRecycleDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileRecycleDo struct{ gen.DO }
|
||||
|
||||
type IScaFileRecycleDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileRecycleDo
|
||||
WithContext(ctx context.Context) IScaFileRecycleDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileRecycleDo
|
||||
WriteDB() IScaFileRecycleDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileRecycleDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileRecycleDo
|
||||
Not(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Or(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Select(conds ...field.Expr) IScaFileRecycleDo
|
||||
Where(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Order(conds ...field.Expr) IScaFileRecycleDo
|
||||
Distinct(cols ...field.Expr) IScaFileRecycleDo
|
||||
Omit(cols ...field.Expr) IScaFileRecycleDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo
|
||||
Group(cols ...field.Expr) IScaFileRecycleDo
|
||||
Having(conds ...gen.Condition) IScaFileRecycleDo
|
||||
Limit(limit int) IScaFileRecycleDo
|
||||
Offset(offset int) IScaFileRecycleDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileRecycleDo
|
||||
Unscoped() IScaFileRecycleDo
|
||||
Create(values ...*model.ScaFileRecycle) error
|
||||
CreateInBatches(values []*model.ScaFileRecycle, batchSize int) error
|
||||
Save(values ...*model.ScaFileRecycle) error
|
||||
First() (*model.ScaFileRecycle, error)
|
||||
Take() (*model.ScaFileRecycle, error)
|
||||
Last() (*model.ScaFileRecycle, error)
|
||||
Find() ([]*model.ScaFileRecycle, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileRecycle, err error)
|
||||
FindInBatches(result *[]*model.ScaFileRecycle, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileRecycle) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileRecycleDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileRecycleDo
|
||||
Joins(fields ...field.RelationField) IScaFileRecycleDo
|
||||
Preload(fields ...field.RelationField) IScaFileRecycleDo
|
||||
FirstOrInit() (*model.ScaFileRecycle, error)
|
||||
FirstOrCreate() (*model.ScaFileRecycle, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileRecycle, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileRecycleDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Debug() IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) WithContext(ctx context.Context) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) ReadDB() IScaFileRecycleDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) WriteDB() IScaFileRecycleDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Session(config *gorm.Session) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Clauses(conds ...clause.Expression) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Returning(value interface{}, columns ...string) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Not(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Or(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Select(conds ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Where(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Order(conds ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Distinct(cols ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Omit(cols ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Join(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Group(cols ...field.Expr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Having(conds ...gen.Condition) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Limit(limit int) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Offset(offset int) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Unscoped() IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Create(values ...*model.ScaFileRecycle) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) CreateInBatches(values []*model.ScaFileRecycle, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileRecycleDo) Save(values ...*model.ScaFileRecycle) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) First() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Take() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Last() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Find() ([]*model.ScaFileRecycle, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileRecycle), err
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileRecycle, err error) {
|
||||
buf := make([]*model.ScaFileRecycle, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FindInBatches(result *[]*model.ScaFileRecycle, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Attrs(attrs ...field.AssignExpr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Assign(attrs ...field.AssignExpr) IScaFileRecycleDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Joins(fields ...field.RelationField) IScaFileRecycleDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Preload(fields ...field.RelationField) IScaFileRecycleDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FirstOrInit() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FirstOrCreate() (*model.ScaFileRecycle, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileRecycle), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) FindByPage(offset int, limit int) (result []*model.ScaFileRecycle, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileRecycleDo) Delete(models ...*model.ScaFileRecycle) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileRecycleDo) withDO(do gen.Dao) *scaFileRecycleDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,402 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
)
|
||||
|
||||
func newScaFileType(db *gorm.DB, opts ...gen.DOOption) scaFileType {
|
||||
_scaFileType := scaFileType{}
|
||||
|
||||
_scaFileType.scaFileTypeDo.UseDB(db, opts...)
|
||||
_scaFileType.scaFileTypeDo.UseModel(&model.ScaFileType{})
|
||||
|
||||
tableName := _scaFileType.scaFileTypeDo.TableName()
|
||||
_scaFileType.ALL = field.NewAsterisk(tableName)
|
||||
_scaFileType.ID = field.NewInt64(tableName, "id")
|
||||
_scaFileType.TypeName = field.NewString(tableName, "type_name")
|
||||
_scaFileType.MimeType = field.NewString(tableName, "mime_type")
|
||||
_scaFileType.Status = field.NewInt64(tableName, "status")
|
||||
_scaFileType.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaFileType.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaFileType.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaFileType.fillFieldMap()
|
||||
|
||||
return _scaFileType
|
||||
}
|
||||
|
||||
type scaFileType struct {
|
||||
scaFileTypeDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
TypeName field.String // 类型名称
|
||||
MimeType field.String // MIME 类型
|
||||
Status field.Int64 // 类型状态
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaFileType) Table(newTableName string) *scaFileType {
|
||||
s.scaFileTypeDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaFileType) As(alias string) *scaFileType {
|
||||
s.scaFileTypeDo.DO = *(s.scaFileTypeDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaFileType) updateTableName(table string) *scaFileType {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.TypeName = field.NewString(table, "type_name")
|
||||
s.MimeType = field.NewString(table, "mime_type")
|
||||
s.Status = field.NewInt64(table, "status")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaFileType) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaFileType) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 7)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["type_name"] = s.TypeName
|
||||
s.fieldMap["mime_type"] = s.MimeType
|
||||
s.fieldMap["status"] = s.Status
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaFileType) clone(db *gorm.DB) scaFileType {
|
||||
s.scaFileTypeDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaFileType) replaceDB(db *gorm.DB) scaFileType {
|
||||
s.scaFileTypeDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaFileTypeDo struct{ gen.DO }
|
||||
|
||||
type IScaFileTypeDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaFileTypeDo
|
||||
WithContext(ctx context.Context) IScaFileTypeDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaFileTypeDo
|
||||
WriteDB() IScaFileTypeDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaFileTypeDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaFileTypeDo
|
||||
Not(conds ...gen.Condition) IScaFileTypeDo
|
||||
Or(conds ...gen.Condition) IScaFileTypeDo
|
||||
Select(conds ...field.Expr) IScaFileTypeDo
|
||||
Where(conds ...gen.Condition) IScaFileTypeDo
|
||||
Order(conds ...field.Expr) IScaFileTypeDo
|
||||
Distinct(cols ...field.Expr) IScaFileTypeDo
|
||||
Omit(cols ...field.Expr) IScaFileTypeDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo
|
||||
Group(cols ...field.Expr) IScaFileTypeDo
|
||||
Having(conds ...gen.Condition) IScaFileTypeDo
|
||||
Limit(limit int) IScaFileTypeDo
|
||||
Offset(offset int) IScaFileTypeDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileTypeDo
|
||||
Unscoped() IScaFileTypeDo
|
||||
Create(values ...*model.ScaFileType) error
|
||||
CreateInBatches(values []*model.ScaFileType, batchSize int) error
|
||||
Save(values ...*model.ScaFileType) error
|
||||
First() (*model.ScaFileType, error)
|
||||
Take() (*model.ScaFileType, error)
|
||||
Last() (*model.ScaFileType, error)
|
||||
Find() ([]*model.ScaFileType, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileType, err error)
|
||||
FindInBatches(result *[]*model.ScaFileType, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaFileType) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaFileTypeDo
|
||||
Assign(attrs ...field.AssignExpr) IScaFileTypeDo
|
||||
Joins(fields ...field.RelationField) IScaFileTypeDo
|
||||
Preload(fields ...field.RelationField) IScaFileTypeDo
|
||||
FirstOrInit() (*model.ScaFileType, error)
|
||||
FirstOrCreate() (*model.ScaFileType, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaFileType, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaFileTypeDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Debug() IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) WithContext(ctx context.Context) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) ReadDB() IScaFileTypeDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) WriteDB() IScaFileTypeDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Session(config *gorm.Session) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Clauses(conds ...clause.Expression) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Returning(value interface{}, columns ...string) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Not(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Or(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Select(conds ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Where(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Order(conds ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Distinct(cols ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Omit(cols ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Join(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Group(cols ...field.Expr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Having(conds ...gen.Condition) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Limit(limit int) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Offset(offset int) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Unscoped() IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Create(values ...*model.ScaFileType) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) CreateInBatches(values []*model.ScaFileType, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaFileTypeDo) Save(values ...*model.ScaFileType) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) First() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Take() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Last() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Find() ([]*model.ScaFileType, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaFileType), err
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaFileType, err error) {
|
||||
buf := make([]*model.ScaFileType, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FindInBatches(result *[]*model.ScaFileType, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Attrs(attrs ...field.AssignExpr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Assign(attrs ...field.AssignExpr) IScaFileTypeDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Joins(fields ...field.RelationField) IScaFileTypeDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Preload(fields ...field.RelationField) IScaFileTypeDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FirstOrInit() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FirstOrCreate() (*model.ScaFileType, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaFileType), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) FindByPage(offset int, limit int) (result []*model.ScaFileType, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaFileTypeDo) Delete(models ...*model.ScaFileType) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaFileTypeDo) withDO(do gen.Dao) *scaFileTypeDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaMessageReport(db *gorm.DB, opts ...gen.DOOption) scaMessageReport {
|
||||
|
420
app/auth/model/mysql/query/sca_storage_config.gen.go
Normal file
420
app/auth/model/mysql/query/sca_storage_config.gen.go
Normal file
@@ -0,0 +1,420 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageConfig(db *gorm.DB, opts ...gen.DOOption) scaStorageConfig {
|
||||
_scaStorageConfig := scaStorageConfig{}
|
||||
|
||||
_scaStorageConfig.scaStorageConfigDo.UseDB(db, opts...)
|
||||
_scaStorageConfig.scaStorageConfigDo.UseModel(&model.ScaStorageConfig{})
|
||||
|
||||
tableName := _scaStorageConfig.scaStorageConfigDo.TableName()
|
||||
_scaStorageConfig.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageConfig.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageConfig.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageConfig.Type = field.NewString(tableName, "type")
|
||||
_scaStorageConfig.Endpoint = field.NewString(tableName, "endpoint")
|
||||
_scaStorageConfig.AccessKey = field.NewString(tableName, "access_key")
|
||||
_scaStorageConfig.SecretKey = field.NewString(tableName, "secret_key")
|
||||
_scaStorageConfig.Bucket = field.NewString(tableName, "bucket")
|
||||
_scaStorageConfig.Region = field.NewString(tableName, "region")
|
||||
_scaStorageConfig.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageConfig.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageConfig.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageConfig.fillFieldMap()
|
||||
|
||||
return _scaStorageConfig
|
||||
}
|
||||
|
||||
type scaStorageConfig struct {
|
||||
scaStorageConfigDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
Type field.String // 类型
|
||||
Endpoint field.String // 地址
|
||||
AccessKey field.String // 密钥key
|
||||
SecretKey field.String // 密钥
|
||||
Bucket field.String // 存储桶
|
||||
Region field.String // 地域
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) Table(newTableName string) *scaStorageConfig {
|
||||
s.scaStorageConfigDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) As(alias string) *scaStorageConfig {
|
||||
s.scaStorageConfigDo.DO = *(s.scaStorageConfigDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) updateTableName(table string) *scaStorageConfig {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.Type = field.NewString(table, "type")
|
||||
s.Endpoint = field.NewString(table, "endpoint")
|
||||
s.AccessKey = field.NewString(table, "access_key")
|
||||
s.SecretKey = field.NewString(table, "secret_key")
|
||||
s.Bucket = field.NewString(table, "bucket")
|
||||
s.Region = field.NewString(table, "region")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 11)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["endpoint"] = s.Endpoint
|
||||
s.fieldMap["access_key"] = s.AccessKey
|
||||
s.fieldMap["secret_key"] = s.SecretKey
|
||||
s.fieldMap["bucket"] = s.Bucket
|
||||
s.fieldMap["region"] = s.Region
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) clone(db *gorm.DB) scaStorageConfig {
|
||||
s.scaStorageConfigDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageConfig) replaceDB(db *gorm.DB) scaStorageConfig {
|
||||
s.scaStorageConfigDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageConfigDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageConfigDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageConfigDo
|
||||
WithContext(ctx context.Context) IScaStorageConfigDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageConfigDo
|
||||
WriteDB() IScaStorageConfigDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageConfigDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageConfigDo
|
||||
Not(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Or(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Select(conds ...field.Expr) IScaStorageConfigDo
|
||||
Where(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Order(conds ...field.Expr) IScaStorageConfigDo
|
||||
Distinct(cols ...field.Expr) IScaStorageConfigDo
|
||||
Omit(cols ...field.Expr) IScaStorageConfigDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo
|
||||
Group(cols ...field.Expr) IScaStorageConfigDo
|
||||
Having(conds ...gen.Condition) IScaStorageConfigDo
|
||||
Limit(limit int) IScaStorageConfigDo
|
||||
Offset(offset int) IScaStorageConfigDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageConfigDo
|
||||
Unscoped() IScaStorageConfigDo
|
||||
Create(values ...*model.ScaStorageConfig) error
|
||||
CreateInBatches(values []*model.ScaStorageConfig, batchSize int) error
|
||||
Save(values ...*model.ScaStorageConfig) error
|
||||
First() (*model.ScaStorageConfig, error)
|
||||
Take() (*model.ScaStorageConfig, error)
|
||||
Last() (*model.ScaStorageConfig, error)
|
||||
Find() ([]*model.ScaStorageConfig, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageConfig, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageConfig, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageConfig) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageConfigDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageConfigDo
|
||||
Joins(fields ...field.RelationField) IScaStorageConfigDo
|
||||
Preload(fields ...field.RelationField) IScaStorageConfigDo
|
||||
FirstOrInit() (*model.ScaStorageConfig, error)
|
||||
FirstOrCreate() (*model.ScaStorageConfig, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageConfig, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageConfigDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Debug() IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) WithContext(ctx context.Context) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) ReadDB() IScaStorageConfigDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) WriteDB() IScaStorageConfigDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Session(config *gorm.Session) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Clauses(conds ...clause.Expression) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Returning(value interface{}, columns ...string) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Not(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Or(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Select(conds ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Where(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Order(conds ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Distinct(cols ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Omit(cols ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Group(cols ...field.Expr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Having(conds ...gen.Condition) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Limit(limit int) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Offset(offset int) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Unscoped() IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Create(values ...*model.ScaStorageConfig) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) CreateInBatches(values []*model.ScaStorageConfig, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageConfigDo) Save(values ...*model.ScaStorageConfig) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) First() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Take() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Last() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Find() ([]*model.ScaStorageConfig, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageConfig), err
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageConfig, err error) {
|
||||
buf := make([]*model.ScaStorageConfig, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FindInBatches(result *[]*model.ScaStorageConfig, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Attrs(attrs ...field.AssignExpr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Assign(attrs ...field.AssignExpr) IScaStorageConfigDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Joins(fields ...field.RelationField) IScaStorageConfigDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Preload(fields ...field.RelationField) IScaStorageConfigDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FirstOrInit() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FirstOrCreate() (*model.ScaStorageConfig, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageConfig), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) FindByPage(offset int, limit int) (result []*model.ScaStorageConfig, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageConfigDo) Delete(models ...*model.ScaStorageConfig) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageConfigDo) withDO(do gen.Dao) *scaStorageConfigDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
448
app/auth/model/mysql/query/sca_storage_info.gen.go
Normal file
448
app/auth/model/mysql/query/sca_storage_info.gen.go
Normal file
@@ -0,0 +1,448 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageInfo {
|
||||
_scaStorageInfo := scaStorageInfo{}
|
||||
|
||||
_scaStorageInfo.scaStorageInfoDo.UseDB(db, opts...)
|
||||
_scaStorageInfo.scaStorageInfoDo.UseModel(&model.ScaStorageInfo{})
|
||||
|
||||
tableName := _scaStorageInfo.scaStorageInfoDo.TableName()
|
||||
_scaStorageInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageInfo.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageInfo.Storage = field.NewString(tableName, "storage")
|
||||
_scaStorageInfo.Bucket = field.NewString(tableName, "bucket")
|
||||
_scaStorageInfo.Type = field.NewString(tableName, "type")
|
||||
_scaStorageInfo.Path = field.NewString(tableName, "path")
|
||||
_scaStorageInfo.FileName = field.NewString(tableName, "file_name")
|
||||
_scaStorageInfo.Category = field.NewString(tableName, "category")
|
||||
_scaStorageInfo.Loaction = field.NewString(tableName, "loaction")
|
||||
_scaStorageInfo.Hash = field.NewString(tableName, "hash")
|
||||
_scaStorageInfo.Anime = field.NewString(tableName, "anime")
|
||||
_scaStorageInfo.HasFace = field.NewString(tableName, "has_face")
|
||||
_scaStorageInfo.FaceID = field.NewInt64(tableName, "face_id")
|
||||
_scaStorageInfo.Landscape = field.NewString(tableName, "landscape")
|
||||
_scaStorageInfo.Objects = field.NewString(tableName, "objects")
|
||||
_scaStorageInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageInfo.fillFieldMap()
|
||||
|
||||
return _scaStorageInfo
|
||||
}
|
||||
|
||||
type scaStorageInfo struct {
|
||||
scaStorageInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
Storage field.String // 存储空间
|
||||
Bucket field.String // 存储桶
|
||||
Type field.String // 类型
|
||||
Path field.String // 路径
|
||||
FileName field.String // 名称
|
||||
Category field.String // 分类
|
||||
Loaction field.String // 地址
|
||||
Hash field.String // 哈希值
|
||||
Anime field.String // 是否是动漫图片
|
||||
HasFace field.String // 是否人像
|
||||
FaceID field.Int64 // 人像ID
|
||||
Landscape field.String // 风景类型
|
||||
Objects field.String // 对象识别
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) Table(newTableName string) *scaStorageInfo {
|
||||
s.scaStorageInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) As(alias string) *scaStorageInfo {
|
||||
s.scaStorageInfoDo.DO = *(s.scaStorageInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) updateTableName(table string) *scaStorageInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.Storage = field.NewString(table, "storage")
|
||||
s.Bucket = field.NewString(table, "bucket")
|
||||
s.Type = field.NewString(table, "type")
|
||||
s.Path = field.NewString(table, "path")
|
||||
s.FileName = field.NewString(table, "file_name")
|
||||
s.Category = field.NewString(table, "category")
|
||||
s.Loaction = field.NewString(table, "loaction")
|
||||
s.Hash = field.NewString(table, "hash")
|
||||
s.Anime = field.NewString(table, "anime")
|
||||
s.HasFace = field.NewString(table, "has_face")
|
||||
s.FaceID = field.NewInt64(table, "face_id")
|
||||
s.Landscape = field.NewString(table, "landscape")
|
||||
s.Objects = field.NewString(table, "objects")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 18)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["storage"] = s.Storage
|
||||
s.fieldMap["bucket"] = s.Bucket
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["path"] = s.Path
|
||||
s.fieldMap["file_name"] = s.FileName
|
||||
s.fieldMap["category"] = s.Category
|
||||
s.fieldMap["loaction"] = s.Loaction
|
||||
s.fieldMap["hash"] = s.Hash
|
||||
s.fieldMap["anime"] = s.Anime
|
||||
s.fieldMap["has_face"] = s.HasFace
|
||||
s.fieldMap["face_id"] = s.FaceID
|
||||
s.fieldMap["landscape"] = s.Landscape
|
||||
s.fieldMap["objects"] = s.Objects
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) clone(db *gorm.DB) scaStorageInfo {
|
||||
s.scaStorageInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageInfo) replaceDB(db *gorm.DB) scaStorageInfo {
|
||||
s.scaStorageInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageInfoDo
|
||||
WithContext(ctx context.Context) IScaStorageInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageInfoDo
|
||||
WriteDB() IScaStorageInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageInfoDo
|
||||
Not(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Or(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Select(conds ...field.Expr) IScaStorageInfoDo
|
||||
Where(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Order(conds ...field.Expr) IScaStorageInfoDo
|
||||
Distinct(cols ...field.Expr) IScaStorageInfoDo
|
||||
Omit(cols ...field.Expr) IScaStorageInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo
|
||||
Group(cols ...field.Expr) IScaStorageInfoDo
|
||||
Having(conds ...gen.Condition) IScaStorageInfoDo
|
||||
Limit(limit int) IScaStorageInfoDo
|
||||
Offset(offset int) IScaStorageInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageInfoDo
|
||||
Unscoped() IScaStorageInfoDo
|
||||
Create(values ...*model.ScaStorageInfo) error
|
||||
CreateInBatches(values []*model.ScaStorageInfo, batchSize int) error
|
||||
Save(values ...*model.ScaStorageInfo) error
|
||||
First() (*model.ScaStorageInfo, error)
|
||||
Take() (*model.ScaStorageInfo, error)
|
||||
Last() (*model.ScaStorageInfo, error)
|
||||
Find() ([]*model.ScaStorageInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageInfoDo
|
||||
Joins(fields ...field.RelationField) IScaStorageInfoDo
|
||||
Preload(fields ...field.RelationField) IScaStorageInfoDo
|
||||
FirstOrInit() (*model.ScaStorageInfo, error)
|
||||
FirstOrCreate() (*model.ScaStorageInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Debug() IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) WithContext(ctx context.Context) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) ReadDB() IScaStorageInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) WriteDB() IScaStorageInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Session(config *gorm.Session) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Clauses(conds ...clause.Expression) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Returning(value interface{}, columns ...string) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Not(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Or(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Select(conds ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Where(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Order(conds ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Distinct(cols ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Omit(cols ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Group(cols ...field.Expr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Having(conds ...gen.Condition) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Limit(limit int) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Offset(offset int) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Unscoped() IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Create(values ...*model.ScaStorageInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) CreateInBatches(values []*model.ScaStorageInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageInfoDo) Save(values ...*model.ScaStorageInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) First() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Take() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Last() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Find() ([]*model.ScaStorageInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageInfo), err
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageInfo, err error) {
|
||||
buf := make([]*model.ScaStorageInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FindInBatches(result *[]*model.ScaStorageInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Joins(fields ...field.RelationField) IScaStorageInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Preload(fields ...field.RelationField) IScaStorageInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FirstOrInit() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FirstOrCreate() (*model.ScaStorageInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageInfoDo) Delete(models ...*model.ScaStorageInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageInfoDo) withDO(do gen.Dao) *scaStorageInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
400
app/auth/model/mysql/query/sca_storage_tag.gen.go
Normal file
400
app/auth/model/mysql/query/sca_storage_tag.gen.go
Normal file
@@ -0,0 +1,400 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageTag(db *gorm.DB, opts ...gen.DOOption) scaStorageTag {
|
||||
_scaStorageTag := scaStorageTag{}
|
||||
|
||||
_scaStorageTag.scaStorageTagDo.UseDB(db, opts...)
|
||||
_scaStorageTag.scaStorageTagDo.UseModel(&model.ScaStorageTag{})
|
||||
|
||||
tableName := _scaStorageTag.scaStorageTagDo.TableName()
|
||||
_scaStorageTag.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageTag.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageTag.FileID = field.NewInt64(tableName, "file_id")
|
||||
_scaStorageTag.TagID = field.NewInt64(tableName, "tag_id")
|
||||
_scaStorageTag.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageTag.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageTag.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageTag.fillFieldMap()
|
||||
|
||||
return _scaStorageTag
|
||||
}
|
||||
|
||||
type scaStorageTag struct {
|
||||
scaStorageTagDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileID field.Int64 // 文件ID
|
||||
TagID field.Int64 // 标签ID
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageTag) Table(newTableName string) *scaStorageTag {
|
||||
s.scaStorageTagDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageTag) As(alias string) *scaStorageTag {
|
||||
s.scaStorageTagDo.DO = *(s.scaStorageTagDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) updateTableName(table string) *scaStorageTag {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileID = field.NewInt64(table, "file_id")
|
||||
s.TagID = field.NewInt64(table, "tag_id")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 6)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_id"] = s.FileID
|
||||
s.fieldMap["tag_id"] = s.TagID
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageTag) clone(db *gorm.DB) scaStorageTag {
|
||||
s.scaStorageTagDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageTag) replaceDB(db *gorm.DB) scaStorageTag {
|
||||
s.scaStorageTagDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageTagDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageTagDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageTagDo
|
||||
WithContext(ctx context.Context) IScaStorageTagDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageTagDo
|
||||
WriteDB() IScaStorageTagDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageTagDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageTagDo
|
||||
Not(conds ...gen.Condition) IScaStorageTagDo
|
||||
Or(conds ...gen.Condition) IScaStorageTagDo
|
||||
Select(conds ...field.Expr) IScaStorageTagDo
|
||||
Where(conds ...gen.Condition) IScaStorageTagDo
|
||||
Order(conds ...field.Expr) IScaStorageTagDo
|
||||
Distinct(cols ...field.Expr) IScaStorageTagDo
|
||||
Omit(cols ...field.Expr) IScaStorageTagDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
Group(cols ...field.Expr) IScaStorageTagDo
|
||||
Having(conds ...gen.Condition) IScaStorageTagDo
|
||||
Limit(limit int) IScaStorageTagDo
|
||||
Offset(offset int) IScaStorageTagDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo
|
||||
Unscoped() IScaStorageTagDo
|
||||
Create(values ...*model.ScaStorageTag) error
|
||||
CreateInBatches(values []*model.ScaStorageTag, batchSize int) error
|
||||
Save(values ...*model.ScaStorageTag) error
|
||||
First() (*model.ScaStorageTag, error)
|
||||
Take() (*model.ScaStorageTag, error)
|
||||
Last() (*model.ScaStorageTag, error)
|
||||
Find() ([]*model.ScaStorageTag, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageTag) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageTagDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageTagDo
|
||||
Joins(fields ...field.RelationField) IScaStorageTagDo
|
||||
Preload(fields ...field.RelationField) IScaStorageTagDo
|
||||
FirstOrInit() (*model.ScaStorageTag, error)
|
||||
FirstOrCreate() (*model.ScaStorageTag, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageTagDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Debug() IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) WithContext(ctx context.Context) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) ReadDB() IScaStorageTagDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) WriteDB() IScaStorageTagDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Session(config *gorm.Session) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Clauses(conds ...clause.Expression) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Returning(value interface{}, columns ...string) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Not(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Or(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Select(conds ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Where(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Order(conds ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Distinct(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Omit(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Group(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Having(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Limit(limit int) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Offset(offset int) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Unscoped() IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Create(values ...*model.ScaStorageTag) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) CreateInBatches(values []*model.ScaStorageTag, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageTagDo) Save(values ...*model.ScaStorageTag) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) First() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Take() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Last() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Find() ([]*model.ScaStorageTag, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageTag), err
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error) {
|
||||
buf := make([]*model.ScaStorageTag, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Assign(attrs ...field.AssignExpr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Joins(fields ...field.RelationField) IScaStorageTagDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Preload(fields ...field.RelationField) IScaStorageTagDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FirstOrInit() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FirstOrCreate() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Delete(models ...*model.ScaStorageTag) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagDo) withDO(do gen.Dao) *scaStorageTagDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
400
app/auth/model/mysql/query/sca_storage_tag_info.gen.go
Normal file
400
app/auth/model/mysql/query/sca_storage_tag_info.gen.go
Normal file
@@ -0,0 +1,400 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageTagInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageTagInfo {
|
||||
_scaStorageTagInfo := scaStorageTagInfo{}
|
||||
|
||||
_scaStorageTagInfo.scaStorageTagInfoDo.UseDB(db, opts...)
|
||||
_scaStorageTagInfo.scaStorageTagInfoDo.UseModel(&model.ScaStorageTagInfo{})
|
||||
|
||||
tableName := _scaStorageTagInfo.scaStorageTagInfoDo.TableName()
|
||||
_scaStorageTagInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageTagInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageTagInfo.TagName = field.NewString(tableName, "tag_name")
|
||||
_scaStorageTagInfo.TagKey = field.NewString(tableName, "tag_key")
|
||||
_scaStorageTagInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageTagInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageTagInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageTagInfo.fillFieldMap()
|
||||
|
||||
return _scaStorageTagInfo
|
||||
}
|
||||
|
||||
type scaStorageTagInfo struct {
|
||||
scaStorageTagInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
TagName field.String // 标签名称
|
||||
TagKey field.String // 标签关键字
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) Table(newTableName string) *scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) As(alias string) *scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.DO = *(s.scaStorageTagInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) updateTableName(table string) *scaStorageTagInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.TagName = field.NewString(table, "tag_name")
|
||||
s.TagKey = field.NewString(table, "tag_key")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 6)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["tag_name"] = s.TagName
|
||||
s.fieldMap["tag_key"] = s.TagKey
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) clone(db *gorm.DB) scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) replaceDB(db *gorm.DB) scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageTagInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageTagInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageTagInfoDo
|
||||
WithContext(ctx context.Context) IScaStorageTagInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageTagInfoDo
|
||||
WriteDB() IScaStorageTagInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageTagInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageTagInfoDo
|
||||
Not(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Or(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Select(conds ...field.Expr) IScaStorageTagInfoDo
|
||||
Where(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Order(conds ...field.Expr) IScaStorageTagInfoDo
|
||||
Distinct(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Omit(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
Group(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Having(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Limit(limit int) IScaStorageTagInfoDo
|
||||
Offset(offset int) IScaStorageTagInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo
|
||||
Unscoped() IScaStorageTagInfoDo
|
||||
Create(values ...*model.ScaStorageTagInfo) error
|
||||
CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error
|
||||
Save(values ...*model.ScaStorageTagInfo) error
|
||||
First() (*model.ScaStorageTagInfo, error)
|
||||
Take() (*model.ScaStorageTagInfo, error)
|
||||
Last() (*model.ScaStorageTagInfo, error)
|
||||
Find() ([]*model.ScaStorageTagInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageTagInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo
|
||||
Joins(fields ...field.RelationField) IScaStorageTagInfoDo
|
||||
Preload(fields ...field.RelationField) IScaStorageTagInfoDo
|
||||
FirstOrInit() (*model.ScaStorageTagInfo, error)
|
||||
FirstOrCreate() (*model.ScaStorageTagInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageTagInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Debug() IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) WithContext(ctx context.Context) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) ReadDB() IScaStorageTagInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) WriteDB() IScaStorageTagInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Session(config *gorm.Session) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Clauses(conds ...clause.Expression) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Returning(value interface{}, columns ...string) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Not(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Or(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Select(conds ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Where(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Order(conds ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Distinct(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Omit(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Group(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Having(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Limit(limit int) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Offset(offset int) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Unscoped() IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Create(values ...*model.ScaStorageTagInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageTagInfoDo) Save(values ...*model.ScaStorageTagInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) First() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Take() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Last() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Find() ([]*model.ScaStorageTagInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageTagInfo), err
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error) {
|
||||
buf := make([]*model.ScaStorageTagInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Joins(fields ...field.RelationField) IScaStorageTagInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Preload(fields ...field.RelationField) IScaStorageTagInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FirstOrInit() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FirstOrCreate() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Delete(models ...*model.ScaStorageTagInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfoDo) withDO(do gen.Dao) *scaStorageTagInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaUserFollow(db *gorm.DB, opts ...gen.DOOption) scaUserFollow {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaUserLevel(db *gorm.DB, opts ...gen.DOOption) scaUserLevel {
|
||||
|
@@ -6,15 +6,17 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaUserMessage(db *gorm.DB, opts ...gen.DOOption) scaUserMessage {
|
||||
|
Reference in New Issue
Block a user