✨ added apis and optimized table structures
This commit is contained in:
@@ -593,6 +593,11 @@ type (
|
||||
SingleImageRequest {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
DeleteImageRequest {
|
||||
IDS []int64 `json:"ids"`
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
StorageMeta {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
@@ -605,6 +610,22 @@ type (
|
||||
StorageListResponse {
|
||||
Records []StroageNode `json:"records"`
|
||||
}
|
||||
QueryDeleteRecordRequest {
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
DeleteRecordListResponse {
|
||||
Records []AllImageDetail `json:"records"`
|
||||
}
|
||||
BucketCapacityRequest {
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
BucketCapacityResponse {
|
||||
Capacity string `json:"capacity"`
|
||||
Used string `json:"used"`
|
||||
Percentage float64 `json:"percentage"`
|
||||
}
|
||||
)
|
||||
|
||||
// 文件上传
|
||||
@@ -695,6 +716,18 @@ service auth {
|
||||
// 获取用户存储配置列表
|
||||
@handler getUserStorageList
|
||||
post /user/config/list returns (StorageListResponse)
|
||||
|
||||
// 删除图片
|
||||
@handler deleteImage
|
||||
post /image/delete (DeleteImageRequest) returns (string)
|
||||
|
||||
// 获取删除记录
|
||||
@handler getDeleteRecord
|
||||
post /delete/record (QueryDeleteRecordRequest) returns (DeleteRecordListResponse)
|
||||
|
||||
// 获取存储桶的容量信息
|
||||
@handler getBucketCapacity
|
||||
post /bucket/capacity (BucketCapacityRequest) returns (BucketCapacityResponse)
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -717,7 +750,7 @@ type (
|
||||
Images []ShareImageMeta `json:"images"`
|
||||
}
|
||||
QueryShareImageRequest {
|
||||
ShareCode string `json:"share_code"`
|
||||
InviteCode string `json:"invite_code"`
|
||||
AccessPassword string `json:"access_password,omitempty"`
|
||||
}
|
||||
ShareImageListMeta {
|
||||
@@ -725,13 +758,13 @@ type (
|
||||
FileName string `json:"file_name"`
|
||||
URL string `json:"url"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
ThumbW float64 `json:"thumb_w"`
|
||||
ThumbH float64 `json:"thumb_h"`
|
||||
Width float64 `json:"width"`
|
||||
Height float64 `json:"height"`
|
||||
ThumbSize float64 `json:"thumb_size"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
QueryShareImageResponse {
|
||||
List []ShareImageListMeta `json:"list"`
|
||||
Records []ShareImageListMeta `json:"records"`
|
||||
}
|
||||
ShareRecordListRequest {
|
||||
DateRange []string `json:"date_range"`
|
||||
@@ -741,7 +774,7 @@ type (
|
||||
ID int64 `json:"id"`
|
||||
CoverImage string `json:"cover_image"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
ShareCode string `json:"share_code"`
|
||||
InviteCode string `json:"invite_code"`
|
||||
VisitLimit int64 `json:"visit_limit"`
|
||||
AccessPassword string `json:"access_password"`
|
||||
ValidityPeriod int64 `json:"validity_period"`
|
||||
@@ -749,6 +782,31 @@ type (
|
||||
ShareRecordListResponse {
|
||||
records []ShareRecord `json:"records"`
|
||||
}
|
||||
QueryShareInfoRequest {
|
||||
InviteCode string `json:"invite_code"`
|
||||
}
|
||||
ShareInfoResponse {
|
||||
ID int64 `json:"id"`
|
||||
CoverImage string `json:"cover_image"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
VisitLimit int64 `json:"visit_limit"`
|
||||
ExpireTime string `json:"expire_time"`
|
||||
ImageCount int64 `json:"image_count"`
|
||||
VisitCount int64 `json:"visit_count"`
|
||||
ViewerCount int64 `json:"viewer_count"`
|
||||
SharerAvatar string `json:"sharer_avatar"`
|
||||
SharerName string `json:"sharer_name"`
|
||||
AlbumName string `json:"album_name"`
|
||||
}
|
||||
// 分享数据概览响应参数
|
||||
ShareOverviewResponse {
|
||||
VisitCount int64 `json:"visit_count"`
|
||||
VisitCountToday int64 `json:"visit_count_today"`
|
||||
ViewerCount int64 `json:"viewer_count"`
|
||||
ViewerCountToday int64 `json:"viewer_count_today"`
|
||||
PublishCount int64 `json:"publish_count"`
|
||||
PublishCountToday int64 `json:"publish_count_today"`
|
||||
}
|
||||
)
|
||||
|
||||
// 分享服务
|
||||
@@ -774,5 +832,13 @@ service auth {
|
||||
// 列出分享记录
|
||||
@handler listShareRecord
|
||||
post /record/list (ShareRecordListRequest) returns (ShareRecordListResponse)
|
||||
|
||||
// 查看分享信息
|
||||
@handler queryShareInfo
|
||||
post /info (QueryShareInfoRequest) returns (ShareInfoResponse)
|
||||
|
||||
// 查询浏览数据概览
|
||||
@handler queryShareOverview
|
||||
post /overview returns (ShareOverviewResponse)
|
||||
}
|
||||
|
||||
|
@@ -168,6 +168,16 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
|
||||
Path: "/image/list",
|
||||
Handler: share.QueryShareImageHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/info",
|
||||
Handler: share.QueryShareInfoHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/overview",
|
||||
Handler: share.QueryShareOverviewHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/record/list",
|
||||
@@ -241,11 +251,21 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
|
||||
Path: "/album/rename",
|
||||
Handler: storage.RenameAlbumHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/bucket/capacity",
|
||||
Handler: storage.GetBucketCapacityHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/config",
|
||||
Handler: storage.SetStorageConfigHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/delete/record",
|
||||
Handler: storage.GetDeleteRecordHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/face/detail/list",
|
||||
@@ -271,6 +291,11 @@ func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
|
||||
Path: "/image/all/list",
|
||||
Handler: storage.QueryAllImageListHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/image/delete",
|
||||
Handler: storage.DeleteImageHandler(serverCtx),
|
||||
},
|
||||
{
|
||||
Method: http.MethodPost,
|
||||
Path: "/image/location/detail/list",
|
||||
|
@@ -0,0 +1,29 @@
|
||||
package share
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/zeromicro/go-zero/rest/httpx"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/share"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"schisandra-album-cloud-microservices/common/xhttp"
|
||||
)
|
||||
|
||||
func QueryShareInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.QueryShareInfoRequest
|
||||
if err := httpx.Parse(r, &req); err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
return
|
||||
}
|
||||
|
||||
l := share.NewQueryShareInfoLogic(r.Context(), svcCtx)
|
||||
resp, err := l.QueryShareInfo(&req)
|
||||
if err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
} else {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,21 @@
|
||||
package share
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/share"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/common/xhttp"
|
||||
)
|
||||
|
||||
func QueryShareOverviewHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
l := share.NewQueryShareOverviewLogic(r.Context(), svcCtx)
|
||||
resp, err := l.QueryShareOverview()
|
||||
if err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
} else {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,29 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/zeromicro/go-zero/rest/httpx"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"schisandra-album-cloud-microservices/common/xhttp"
|
||||
)
|
||||
|
||||
func DeleteImageHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.DeleteImageRequest
|
||||
if err := httpx.Parse(r, &req); err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
return
|
||||
}
|
||||
|
||||
l := storage.NewDeleteImageLogic(r.Context(), svcCtx)
|
||||
resp, err := l.DeleteImage(&req)
|
||||
if err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
} else {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,29 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/zeromicro/go-zero/rest/httpx"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"schisandra-album-cloud-microservices/common/xhttp"
|
||||
)
|
||||
|
||||
func GetBucketCapacityHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.BucketCapacityRequest
|
||||
if err := httpx.Parse(r, &req); err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
return
|
||||
}
|
||||
|
||||
l := storage.NewGetBucketCapacityLogic(r.Context(), svcCtx)
|
||||
resp, err := l.GetBucketCapacity(&req)
|
||||
if err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
} else {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,29 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/zeromicro/go-zero/rest/httpx"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/logic/storage"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"schisandra-album-cloud-microservices/common/xhttp"
|
||||
)
|
||||
|
||||
func GetDeleteRecordHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.QueryDeleteRecordRequest
|
||||
if err := httpx.Parse(r, &req); err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
return
|
||||
}
|
||||
|
||||
l := storage.NewGetDeleteRecordLogic(r.Context(), svcCtx)
|
||||
resp, err := l.GetDeleteRecord(&req)
|
||||
if err != nil {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, err)
|
||||
} else {
|
||||
xhttp.JsonBaseResponseCtx(r.Context(), w, resp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -35,7 +35,7 @@ func (l *ListShareRecordLogic) ListShareRecord(req *types.ShareRecordListRequest
|
||||
var recordList []types.ShareRecord
|
||||
query := storageShare.
|
||||
Select(storageShare.ID,
|
||||
storageShare.ShareCode,
|
||||
storageShare.InviteCode,
|
||||
storageShare.VisitLimit,
|
||||
storageShare.AccessPassword,
|
||||
storageShare.ValidityPeriod,
|
||||
|
@@ -41,7 +41,7 @@ func (l *QueryShareImageLogic) QueryShareImage(req *types.QueryShareImageRequest
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 获取分享记录
|
||||
cacheKey := constant.ImageSharePrefix + req.ShareCode
|
||||
cacheKey := constant.ImageSharePrefix + req.InviteCode
|
||||
shareData, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
@@ -66,7 +66,7 @@ func (l *QueryShareImageLogic) QueryShareImage(req *types.QueryShareImageRequest
|
||||
|
||||
// 检查访问限制
|
||||
if storageShare.VisitLimit > 0 {
|
||||
err = l.incrementVisitCount(req.ShareCode, storageShare.VisitLimit)
|
||||
err = l.incrementVisitCount(req.InviteCode, storageShare.VisitLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func (l *QueryShareImageLogic) QueryShareImage(req *types.QueryShareImageRequest
|
||||
}
|
||||
|
||||
// 生成缓存键(在验证通过后)
|
||||
resultCacheKey := constant.ImageListPrefix + req.ShareCode + ":" + req.AccessPassword
|
||||
resultCacheKey := constant.ImageCachePrefix + storageShare.UserID + ":share:" + req.InviteCode
|
||||
|
||||
// 尝试从缓存中获取结果
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, resultCacheKey).Result()
|
||||
@@ -131,7 +131,7 @@ func (l *QueryShareImageLogic) queryShareImageFromSource(storageShare *model.Sca
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
Where(
|
||||
storageInfo.Type.Eq(constant.ImageTypeShared),
|
||||
storageInfo.AlbumID.Eq(storageShare.AlbumID)).
|
||||
@@ -175,8 +175,8 @@ func (l *QueryShareImageLogic) queryShareImageFromSource(storageShare *model.Sca
|
||||
ResultList = append(ResultList, types.ShareImageListMeta{
|
||||
ID: imgInfo.ID,
|
||||
FileName: imgInfo.FileName,
|
||||
ThumbH: imgInfo.ThumbH,
|
||||
ThumbW: imgInfo.ThumbW,
|
||||
Height: imgInfo.ThumbH,
|
||||
Width: imgInfo.ThumbW,
|
||||
ThumbSize: imgInfo.ThumbSize,
|
||||
CreatedAt: imgInfo.CreatedAt.Format(constant.TimeFormat),
|
||||
URL: ossURL,
|
||||
@@ -192,7 +192,7 @@ func (l *QueryShareImageLogic) queryShareImageFromSource(storageShare *model.Sca
|
||||
}
|
||||
|
||||
return &types.QueryShareImageResponse{
|
||||
List: ResultList}, nil
|
||||
Records: ResultList}, nil
|
||||
}
|
||||
|
||||
func (l *QueryShareImageLogic) recordUserVisit(shareID int64, userID string) error {
|
||||
|
75
app/auth/api/internal/logic/share/query_share_info_logic.go
Normal file
75
app/auth/api/internal/logic/share/query_share_info_logic.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
)
|
||||
|
||||
type QueryShareInfoLogic struct {
|
||||
logx.Logger
|
||||
ctx context.Context
|
||||
svcCtx *svc.ServiceContext
|
||||
}
|
||||
|
||||
func NewQueryShareInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *QueryShareInfoLogic {
|
||||
return &QueryShareInfoLogic{
|
||||
Logger: logx.WithContext(ctx),
|
||||
ctx: ctx,
|
||||
svcCtx: svcCtx,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *QueryShareInfoLogic) QueryShareInfo(req *types.QueryShareInfoRequest) (resp *types.ShareInfoResponse, err error) {
|
||||
uid, ok := l.ctx.Value("user_id").(string)
|
||||
if !ok {
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
|
||||
storageShare := l.svcCtx.DB.ScaStorageShare
|
||||
storageAlbum := l.svcCtx.DB.ScaStorageAlbum
|
||||
shareVisit := l.svcCtx.DB.ScaStorageShareVisit
|
||||
authUser := l.svcCtx.DB.ScaAuthUser
|
||||
|
||||
var shareInfo types.ShareInfoResponse
|
||||
err = storageShare.Select(
|
||||
storageShare.ID,
|
||||
storageShare.VisitLimit,
|
||||
storageShare.InviteCode,
|
||||
storageShare.ExpireTime,
|
||||
storageShare.CreatedAt,
|
||||
storageAlbum.CoverImage,
|
||||
storageAlbum.AlbumName,
|
||||
storageShare.ImageCount,
|
||||
shareVisit.Views.As("visit_count"),
|
||||
shareVisit.UserID.Count().As("viewer_count"),
|
||||
authUser.Avatar.As("sharer_avatar"),
|
||||
authUser.Nickname.As("sharer_name")).
|
||||
LeftJoin(storageAlbum, storageShare.AlbumID.EqCol(storageAlbum.ID)).
|
||||
Join(shareVisit, storageShare.ID.EqCol(shareVisit.ShareID)).
|
||||
LeftJoin(authUser, storageShare.UserID.EqCol(authUser.UID)).
|
||||
Where(
|
||||
storageShare.InviteCode.Eq(req.InviteCode),
|
||||
shareVisit.UserID.Eq(uid)).
|
||||
Group(
|
||||
storageShare.ID,
|
||||
storageShare.VisitLimit,
|
||||
storageShare.InviteCode,
|
||||
storageShare.ExpireTime,
|
||||
storageShare.CreatedAt,
|
||||
storageAlbum.CoverImage,
|
||||
storageShare.ImageCount,
|
||||
storageAlbum.AlbumName,
|
||||
shareVisit.Views,
|
||||
authUser.Avatar,
|
||||
authUser.Nickname).
|
||||
Scan(&shareInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &shareInfo, nil
|
||||
}
|
@@ -0,0 +1,80 @@
|
||||
package share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"time"
|
||||
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
)
|
||||
|
||||
type QueryShareOverviewLogic struct {
|
||||
logx.Logger
|
||||
ctx context.Context
|
||||
svcCtx *svc.ServiceContext
|
||||
}
|
||||
|
||||
func NewQueryShareOverviewLogic(ctx context.Context, svcCtx *svc.ServiceContext) *QueryShareOverviewLogic {
|
||||
return &QueryShareOverviewLogic{
|
||||
Logger: logx.WithContext(ctx),
|
||||
ctx: ctx,
|
||||
svcCtx: svcCtx,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *QueryShareOverviewLogic) QueryShareOverview() (resp *types.ShareOverviewResponse, err error) {
|
||||
uid, ok := l.ctx.Value("user_id").(string)
|
||||
if !ok {
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
storageShare := l.svcCtx.DB.ScaStorageShare
|
||||
shareVisit := l.svcCtx.DB.ScaStorageShareVisit
|
||||
// 统计所有数据
|
||||
var totalResult struct {
|
||||
TotalCount int64
|
||||
TotalViews int64
|
||||
TotalUsers int64
|
||||
}
|
||||
err = storageShare.Select(
|
||||
storageShare.ID.Count().As("total_count"),
|
||||
shareVisit.Views.Sum().As("total_views"),
|
||||
shareVisit.UserID.Distinct().Count().As("total_users"),
|
||||
).
|
||||
Join(shareVisit, storageShare.ID.EqCol(shareVisit.ShareID)).
|
||||
Where(storageShare.UserID.Eq(uid)).
|
||||
Scan(&totalResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 统计当天数据
|
||||
var dailyResult struct {
|
||||
DailyCount int64
|
||||
DailyViews int64
|
||||
DailyUsers int64
|
||||
}
|
||||
err = storageShare.Select(
|
||||
storageShare.ID.Count().As("daily_count"),
|
||||
shareVisit.Views.Sum().As("daily_views"),
|
||||
shareVisit.UserID.Distinct().Count().As("daily_users"),
|
||||
).
|
||||
Join(shareVisit, storageShare.ID.EqCol(shareVisit.ShareID)).
|
||||
Where(storageShare.UserID.Eq(uid),
|
||||
shareVisit.CreatedAt.Gte(time.Now().Truncate(24*time.Hour))).
|
||||
Scan(&dailyResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 合并结果到 ShareOverviewResponse
|
||||
response := types.ShareOverviewResponse{
|
||||
VisitCount: totalResult.TotalViews, // 总访问量
|
||||
VisitCountToday: dailyResult.DailyViews, // 当天访问量
|
||||
ViewerCount: totalResult.TotalUsers, // 总独立用户数
|
||||
ViewerCountToday: dailyResult.DailyUsers, // 当天独立用户数
|
||||
PublishCount: totalResult.TotalCount, // 总发布量
|
||||
PublishCountToday: dailyResult.DailyCount, // 当天发布量
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
@@ -94,12 +94,13 @@ func (l *UploadShareImageLogic) UploadShareImage(req *types.ShareImageRequest) (
|
||||
storageShare := model.ScaStorageShare{
|
||||
UserID: uid,
|
||||
AlbumID: album.ID,
|
||||
ShareCode: kgo.SimpleUuid(),
|
||||
InviteCode: kgo.SimpleUuid(),
|
||||
Status: 0,
|
||||
AccessPassword: req.AccessPassword,
|
||||
VisitLimit: req.AccessLimit,
|
||||
ValidityPeriod: int64(duration),
|
||||
ExpireTime: expiryTime,
|
||||
ImageCount: int64(len(req.Images)),
|
||||
}
|
||||
err = tx.ScaStorageShare.Create(&storageShare)
|
||||
if err != nil {
|
||||
@@ -112,7 +113,7 @@ func (l *UploadShareImageLogic) UploadShareImage(req *types.ShareImageRequest) (
|
||||
tx.Rollback()
|
||||
return "", err
|
||||
}
|
||||
cacheKey := constant.ImageSharePrefix + storageShare.ShareCode
|
||||
cacheKey := constant.ImageSharePrefix + storageShare.InviteCode
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, marshal, time.Duration(duration)*time.Hour*24).Err()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
@@ -124,56 +125,10 @@ func (l *UploadShareImageLogic) UploadShareImage(req *types.ShareImageRequest) (
|
||||
logx.Errorf("Transaction commit failed: %v", err)
|
||||
return "", err
|
||||
}
|
||||
return storageShare.ShareCode, nil
|
||||
return storageShare.InviteCode, nil
|
||||
}
|
||||
|
||||
func (l *UploadShareImageLogic) uploadImageAndRecord(tx *query.QueryTx, uid string, album model.ScaStorageAlbum, img types.ShareImageMeta, req *types.ShareImageRequest) error {
|
||||
// 上传缩略图到 Minio
|
||||
thumbnail, err := base64.StdEncoding.DecodeString(img.Thumbnail)
|
||||
if err != nil {
|
||||
return fmt.Errorf("base64 decode failed: %v", err)
|
||||
}
|
||||
thumbObjectKey := path.Join(
|
||||
uid,
|
||||
time.Now().Format("2006/01"),
|
||||
l.classifyFile(img.FileType),
|
||||
fmt.Sprintf("%s_%s.jpg", time.Now().Format("20060102150405"), kgo.SimpleUuid()),
|
||||
)
|
||||
exists, err := l.svcCtx.MinioClient.BucketExists(l.ctx, constant.ThumbnailBucketName)
|
||||
if err != nil || !exists {
|
||||
err = l.svcCtx.MinioClient.MakeBucket(l.ctx, constant.ThumbnailBucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
|
||||
if err != nil {
|
||||
logx.Errorf("Failed to create MinIO bucket: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = l.svcCtx.MinioClient.PutObject(
|
||||
l.ctx,
|
||||
constant.ThumbnailBucketName,
|
||||
thumbObjectKey,
|
||||
bytes.NewReader(thumbnail),
|
||||
int64(len(thumbnail)),
|
||||
minio.PutObjectOptions{
|
||||
ContentType: "image/jpeg",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logx.Errorf("Failed to upload MinIO object: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 记录缩略图
|
||||
thumbRecord := model.ScaStorageThumb{
|
||||
UserID: uid,
|
||||
ThumbPath: thumbObjectKey,
|
||||
ThumbW: img.ThumbW,
|
||||
ThumbH: img.ThumbH,
|
||||
ThumbSize: float64(len(thumbnail)),
|
||||
}
|
||||
err = tx.ScaStorageThumb.Create(&thumbRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 上传原始图片到用户的存储桶
|
||||
originImage, err := base64.StdEncoding.DecodeString(img.OriginImage)
|
||||
@@ -211,24 +166,72 @@ func (l *UploadShareImageLogic) uploadImageAndRecord(tx *query.QueryTx, uid stri
|
||||
|
||||
// 记录原始图片信息
|
||||
imageRecord := model.ScaStorageInfo{
|
||||
UserID: uid,
|
||||
Provider: req.Provider,
|
||||
Bucket: req.Bucket,
|
||||
Path: originObjectKey,
|
||||
FileName: img.FileName,
|
||||
FileSize: strconv.Itoa(size),
|
||||
FileType: img.FileType,
|
||||
Width: float64(width),
|
||||
Height: float64(height),
|
||||
Type: constant.ImageTypeShared,
|
||||
AlbumID: album.ID,
|
||||
ThumbID: thumbRecord.ID,
|
||||
UserID: uid,
|
||||
Provider: req.Provider,
|
||||
Bucket: req.Bucket,
|
||||
Path: originObjectKey,
|
||||
FileName: img.FileName,
|
||||
FileSize: strconv.Itoa(size),
|
||||
FileType: img.FileType,
|
||||
Width: float64(width),
|
||||
Height: float64(height),
|
||||
Type: constant.ImageTypeShared,
|
||||
AlbumID: album.ID,
|
||||
IsDisplayed: 1,
|
||||
}
|
||||
err = tx.ScaStorageInfo.Create(&imageRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 上传缩略图到 Minio
|
||||
thumbnail, err := base64.StdEncoding.DecodeString(img.Thumbnail)
|
||||
if err != nil {
|
||||
return fmt.Errorf("base64 decode failed: %v", err)
|
||||
}
|
||||
thumbObjectKey := path.Join(
|
||||
uid,
|
||||
time.Now().Format("2006/01"),
|
||||
l.classifyFile(img.FileType),
|
||||
fmt.Sprintf("%s_%s.jpg", time.Now().Format("20060102150405"), kgo.SimpleUuid()),
|
||||
)
|
||||
exists, err := l.svcCtx.MinioClient.BucketExists(l.ctx, constant.ThumbnailBucketName)
|
||||
if err != nil || !exists {
|
||||
err = l.svcCtx.MinioClient.MakeBucket(l.ctx, constant.ThumbnailBucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
|
||||
if err != nil {
|
||||
logx.Errorf("Failed to create MinIO bucket: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = l.svcCtx.MinioClient.PutObject(
|
||||
l.ctx,
|
||||
constant.ThumbnailBucketName,
|
||||
thumbObjectKey,
|
||||
bytes.NewReader(thumbnail),
|
||||
int64(len(thumbnail)),
|
||||
minio.PutObjectOptions{
|
||||
ContentType: "image/jpeg",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logx.Errorf("Failed to upload MinIO object: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 记录缩略图
|
||||
thumbRecord := model.ScaStorageThumb{
|
||||
InfoID: imageRecord.ID,
|
||||
UserID: uid,
|
||||
ThumbPath: thumbObjectKey,
|
||||
ThumbW: img.ThumbW,
|
||||
ThumbH: img.ThumbH,
|
||||
ThumbSize: float64(len(thumbnail)),
|
||||
}
|
||||
err = tx.ScaStorageThumb.Create(&thumbRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
97
app/auth/api/internal/logic/storage/delete_image_logic.go
Normal file
97
app/auth/api/internal/logic/storage/delete_image_logic.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
)
|
||||
|
||||
type DeleteImageLogic struct {
|
||||
logx.Logger
|
||||
ctx context.Context
|
||||
svcCtx *svc.ServiceContext
|
||||
}
|
||||
|
||||
func NewDeleteImageLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteImageLogic {
|
||||
return &DeleteImageLogic{
|
||||
Logger: logx.WithContext(ctx),
|
||||
ctx: ctx,
|
||||
svcCtx: svcCtx,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DeleteImageLogic) DeleteImage(req *types.DeleteImageRequest) (resp string, err error) {
|
||||
uid, ok := l.ctx.Value("user_id").(string)
|
||||
if !ok {
|
||||
return "", errors.New("user_id not found")
|
||||
}
|
||||
tx := l.svcCtx.DB.Begin()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
storageInfo := tx.ScaStorageInfo
|
||||
info, err := storageInfo.Where(storageInfo.UserID.Eq(uid),
|
||||
storageInfo.ID.In(req.IDS...),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket)).Delete()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return "", err
|
||||
}
|
||||
if info.RowsAffected == 0 {
|
||||
tx.Rollback()
|
||||
return "", errors.New("no image found")
|
||||
}
|
||||
storageThumb := tx.ScaStorageThumb
|
||||
resultInfo, err := storageThumb.Where(storageThumb.UserID.Eq(uid), storageThumb.InfoID.In(req.IDS...)).Delete()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return "", err
|
||||
}
|
||||
if resultInfo.RowsAffected == 0 {
|
||||
tx.Rollback()
|
||||
return "", errors.New("no thumb found")
|
||||
}
|
||||
storageExtra := tx.ScaStorageExtra
|
||||
resultExtra, err := storageExtra.Where(storageExtra.UserID.Eq(uid), storageExtra.InfoID.In(req.IDS...)).Delete()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return "", err
|
||||
}
|
||||
if resultExtra.RowsAffected == 0 {
|
||||
tx.Rollback()
|
||||
return "", errors.New("no extra found")
|
||||
}
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return "", err
|
||||
}
|
||||
// 删除缓存
|
||||
keyPattern := fmt.Sprintf("%s%s:%s", constant.ImageCachePrefix, uid, "*")
|
||||
// 获取所有匹配的键
|
||||
keys, err := l.svcCtx.RedisClient.Keys(l.ctx, keyPattern).Result()
|
||||
if err != nil {
|
||||
logx.Errorf("获取缓存键 %s 失败: %v", keyPattern, err)
|
||||
return "", err
|
||||
}
|
||||
// 如果没有匹配的键,直接返回
|
||||
if len(keys) == 0 {
|
||||
logx.Infof("没有找到匹配的缓存键: %s", keyPattern)
|
||||
return "", nil
|
||||
}
|
||||
// 删除所有匹配的键
|
||||
if err := l.svcCtx.RedisClient.Del(l.ctx, keys...).Err(); err != nil {
|
||||
logx.Errorf("删除缓存键 %s 失败: %v", keyPattern, err)
|
||||
return "", err
|
||||
}
|
||||
return "success", nil
|
||||
}
|
@@ -13,6 +13,7 @@ import (
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -42,7 +43,7 @@ func (l *GetAlbumDetailLogic) GetAlbumDetail(req *types.AlbumDetailListRequest)
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 缓存获取数据 v1.0.0
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%v", constant.ImageListPrefix, uid, req.Provider, req.Bucket, req.ID)
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%s:%v", constant.ImageCachePrefix, uid, "album", req.Provider, req.Bucket, req.ID)
|
||||
// 尝试从缓存获取
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err == nil {
|
||||
@@ -72,7 +73,7 @@ func (l *GetAlbumDetailLogic) GetAlbumDetail(req *types.AlbumDetailListRequest)
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
@@ -147,13 +148,19 @@ func (l *GetAlbumDetailLogic) GetAlbumDetail(req *types.AlbumDetailListRequest)
|
||||
})
|
||||
return true
|
||||
})
|
||||
// 按日期排序,最新的在最上面
|
||||
sort.Slice(imageList, func(i, j int) bool {
|
||||
dateI, _ := time.Parse("2006年1月2日 星期一", imageList[i].Date)
|
||||
dateJ, _ := time.Parse("2006年1月2日 星期一", imageList[j].Date)
|
||||
return dateI.After(dateJ)
|
||||
})
|
||||
resp = &types.AlbumDetailListResponse{
|
||||
Records: imageList,
|
||||
}
|
||||
|
||||
// 缓存结果
|
||||
if data, err := json.Marshal(resp); err == nil {
|
||||
expireTime := 7*24*time.Hour - time.Duration(rand.Intn(60))*time.Minute
|
||||
expireTime := 5*time.Minute + time.Duration(rand.Intn(300))*time.Second
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, data, expireTime).Err(); err != nil {
|
||||
logx.Error("Failed to cache image list:", err)
|
||||
}
|
||||
|
211
app/auth/api/internal/logic/storage/get_bucket_capacity_logic.go
Normal file
211
app/auth/api/internal/logic/storage/get_bucket_capacity_logic.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"math"
|
||||
"math/rand"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"time"
|
||||
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
)
|
||||
|
||||
type GetBucketCapacityLogic struct {
|
||||
logx.Logger
|
||||
ctx context.Context
|
||||
svcCtx *svc.ServiceContext
|
||||
}
|
||||
|
||||
func NewGetBucketCapacityLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetBucketCapacityLogic {
|
||||
return &GetBucketCapacityLogic{
|
||||
Logger: logx.WithContext(ctx),
|
||||
ctx: ctx,
|
||||
svcCtx: svcCtx,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *GetBucketCapacityLogic) GetBucketCapacity(req *types.BucketCapacityRequest) (resp *types.BucketCapacityResponse, err error) {
|
||||
uid, ok := l.ctx.Value("user_id").(string)
|
||||
if !ok {
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 设计缓存键
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s", constant.BucketCapacityCachePrefix, uid, req.Provider, req.Bucket)
|
||||
// 尝试从缓存中获取容量信息
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
logx.Errorf("get bucket capacity from cache failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 如果缓存存在,直接返回缓存结果
|
||||
if cachedResult != "" {
|
||||
// 如果是空值缓存(防缓存穿透),返回空结果
|
||||
if cachedResult == "{}" {
|
||||
return &types.BucketCapacityResponse{}, nil
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(cachedResult), &resp)
|
||||
if err != nil {
|
||||
return nil, errors.New("unmarshal cached result failed")
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
// 加载用户oss配置信息
|
||||
cacheOssConfigKey := constant.UserOssConfigPrefix + uid + ":" + req.Provider
|
||||
ossConfig, err := l.getOssConfigFromCacheOrDb(cacheOssConfigKey, uid, req.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
service, err := l.svcCtx.StorageManager.GetStorage(uid, ossConfig)
|
||||
if err != nil {
|
||||
return nil, errors.New("get storage failed")
|
||||
}
|
||||
bucketStat, err := service.GetBucketStat(l.ctx, ossConfig.BucketName)
|
||||
if err != nil {
|
||||
// 如果 OSS 接口调用失败,设置空值缓存(防缓存穿透)
|
||||
emptyData := "{}"
|
||||
emptyCacheExpire := 5 * time.Minute // 空值缓存过期时间
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, emptyData, emptyCacheExpire).Err(); err != nil {
|
||||
logx.Errorf("set empty cache failed: %v", err)
|
||||
}
|
||||
return nil, errors.New("get bucket stat failed")
|
||||
}
|
||||
scaStorageConfig := l.svcCtx.DB.ScaStorageConfig
|
||||
capacity, err := scaStorageConfig.Select(scaStorageConfig.Capacity).
|
||||
Where(scaStorageConfig.UserID.Eq(uid), scaStorageConfig.Provider.Eq(req.Provider), scaStorageConfig.Bucket.Eq(req.Bucket)).First()
|
||||
if err != nil {
|
||||
return nil, errors.New("get storage config failed")
|
||||
}
|
||||
|
||||
// 总容量(单位:GB)
|
||||
totalCapacityGB := capacity.Capacity
|
||||
|
||||
// 已用容量(单位:字节转换为 GB)
|
||||
const bytesToGB = 1024 * 1024 * 1024
|
||||
usedCapacityGB := float64(bucketStat.StandardStorage) / bytesToGB
|
||||
|
||||
// 计算百分比
|
||||
percentage := calculatePercentage(usedCapacityGB, float64(totalCapacityGB))
|
||||
|
||||
// 格式化容量信息
|
||||
capacityStr := fmt.Sprintf("%.2v GB", totalCapacityGB) // 总容量(GB)
|
||||
|
||||
resp = &types.BucketCapacityResponse{
|
||||
Capacity: capacityStr,
|
||||
Used: formatBytes(bucketStat.StandardStorage),
|
||||
Percentage: percentage,
|
||||
}
|
||||
// 缓存容量信息
|
||||
marshalData, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return nil, errors.New("marshal bucket capacity failed")
|
||||
}
|
||||
// 添加随机值(防缓存雪崩)
|
||||
// 计算缓存过期时间:距离第二天凌晨 12 点的剩余时间
|
||||
cacheExpire := timeUntilNextMidnight() + time.Duration(rand.Intn(300))*time.Second
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, marshalData, cacheExpire).Err()
|
||||
if err != nil {
|
||||
return nil, errors.New("set bucket capacity failed")
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// 提取解密操作为函数
|
||||
func (l *GetBucketCapacityLogic) decryptConfig(config *model.ScaStorageConfig) (*storageConfig.StorageConfig, error) {
|
||||
accessKey, err := encrypt.Decrypt(config.AccessKey, l.svcCtx.Config.Encrypt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("decrypt access key failed")
|
||||
}
|
||||
secretKey, err := encrypt.Decrypt(config.SecretKey, l.svcCtx.Config.Encrypt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("decrypt secret key failed")
|
||||
}
|
||||
return &storageConfig.StorageConfig{
|
||||
Provider: config.Provider,
|
||||
Endpoint: config.Endpoint,
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
BucketName: config.Bucket,
|
||||
Region: config.Region,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 从缓存或数据库中获取 OSS 配置
|
||||
func (l *GetBucketCapacityLogic) getOssConfigFromCacheOrDb(cacheKey, uid, provider string) (*storageConfig.StorageConfig, error) {
|
||||
result, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return nil, errors.New("get oss config failed")
|
||||
}
|
||||
|
||||
var ossConfig *storageConfig.StorageConfig
|
||||
if result != "" {
|
||||
var redisOssConfig model.ScaStorageConfig
|
||||
if err = json.Unmarshal([]byte(result), &redisOssConfig); err != nil {
|
||||
return nil, errors.New("unmarshal oss config failed")
|
||||
}
|
||||
return l.decryptConfig(&redisOssConfig)
|
||||
}
|
||||
|
||||
// 缓存未命中,从数据库中加载
|
||||
scaOssConfig := l.svcCtx.DB.ScaStorageConfig
|
||||
dbOssConfig, err := scaOssConfig.Where(scaOssConfig.UserID.Eq(uid), scaOssConfig.Provider.Eq(provider)).First()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 缓存数据库配置
|
||||
ossConfig, err = l.decryptConfig(dbOssConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalData, err := json.Marshal(dbOssConfig)
|
||||
if err != nil {
|
||||
return nil, errors.New("marshal oss config failed")
|
||||
}
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, marshalData, 0).Err()
|
||||
if err != nil {
|
||||
return nil, errors.New("set oss config failed")
|
||||
}
|
||||
|
||||
return ossConfig, nil
|
||||
}
|
||||
|
||||
// 格式化字节大小为更友好的单位(KB、MB、GB 等)
|
||||
func formatBytes(bytes int64) string {
|
||||
const unit = 1024
|
||||
if bytes < unit {
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
div, exp := int64(unit), 0
|
||||
for n := bytes / unit; n >= unit; n /= unit {
|
||||
div *= unit
|
||||
exp++
|
||||
}
|
||||
return fmt.Sprintf("%.2f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
|
||||
}
|
||||
|
||||
// 计算使用量百分比(基于 GB)
|
||||
func calculatePercentage(usedGB, totalGB float64) float64 {
|
||||
if totalGB == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Round(usedGB/totalGB*100*100) / 100
|
||||
}
|
||||
|
||||
// 计算距离第二天凌晨 12 点的剩余时间
|
||||
func timeUntilNextMidnight() time.Duration {
|
||||
now := time.Now()
|
||||
nextMidnight := time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, now.Location())
|
||||
return nextMidnight.Sub(now)
|
||||
}
|
231
app/auth/api/internal/logic/storage/get_delete_record_logic.go
Normal file
231
app/auth/api/internal/logic/storage/get_delete_record_logic.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
)
|
||||
|
||||
type GetDeleteRecordLogic struct {
|
||||
logx.Logger
|
||||
ctx context.Context
|
||||
svcCtx *svc.ServiceContext
|
||||
}
|
||||
|
||||
func NewGetDeleteRecordLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetDeleteRecordLogic {
|
||||
return &GetDeleteRecordLogic{
|
||||
Logger: logx.WithContext(ctx),
|
||||
ctx: ctx,
|
||||
svcCtx: svcCtx,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *GetDeleteRecordLogic) GetDeleteRecord(req *types.QueryDeleteRecordRequest) (resp *types.DeleteRecordListResponse, err error) {
|
||||
uid, ok := l.ctx.Value("user_id").(string)
|
||||
if !ok {
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 缓存获取数据 v1.0.0
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%s", constant.ImageCachePrefix, uid, "deleted", req.Provider, req.Bucket)
|
||||
// 尝试从缓存获取
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err == nil {
|
||||
var cachedResponse types.DeleteRecordListResponse
|
||||
if err := json.Unmarshal([]byte(cachedResult), &cachedResponse); err == nil {
|
||||
return &cachedResponse, nil
|
||||
}
|
||||
logx.Error("Failed to unmarshal cached image list:", err)
|
||||
return nil, errors.New("get cached image list failed")
|
||||
} else if !errors.Is(err, redis.Nil) {
|
||||
logx.Error("Redis error:", err)
|
||||
return nil, errors.New("get cached image list failed")
|
||||
}
|
||||
// 缓存未命中,从数据库中查询
|
||||
storageInfo := l.svcCtx.DB.ScaStorageInfo
|
||||
storageThumb := l.svcCtx.DB.ScaStorageThumb
|
||||
var storageInfoList []types.FileInfoResult
|
||||
err = storageInfo.Select(
|
||||
storageInfo.ID,
|
||||
storageInfo.FileName,
|
||||
storageInfo.CreatedAt,
|
||||
storageThumb.ThumbPath,
|
||||
storageInfo.Path,
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
Unscoped().
|
||||
Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.DeletedAt.IsNotNull(),
|
||||
storageInfo.Type.Neq(constant.ImageTypeShared)).
|
||||
Order(storageInfo.CreatedAt.Desc()).
|
||||
Scan(&storageInfoList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(storageInfoList) == 0 {
|
||||
return &types.DeleteRecordListResponse{}, nil
|
||||
}
|
||||
|
||||
// 加载用户oss配置信息
|
||||
cacheOssConfigKey := constant.UserOssConfigPrefix + uid + ":" + req.Provider
|
||||
ossConfig, err := l.getOssConfigFromCacheOrDb(cacheOssConfigKey, uid, req.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
service, err := l.svcCtx.StorageManager.GetStorage(uid, ossConfig)
|
||||
if err != nil {
|
||||
return nil, errors.New("get storage failed")
|
||||
}
|
||||
|
||||
// 按日期进行分组
|
||||
var wg sync.WaitGroup
|
||||
groupedImages := sync.Map{}
|
||||
|
||||
for _, dbFileInfo := range storageInfoList {
|
||||
wg.Add(1)
|
||||
go func(dbFileInfo *types.FileInfoResult) {
|
||||
defer wg.Done()
|
||||
weekday := WeekdayMap[dbFileInfo.CreatedAt.Weekday()]
|
||||
date := dbFileInfo.CreatedAt.Format("2006年1月2日 星期" + weekday)
|
||||
reqParams := make(url.Values)
|
||||
presignedUrl, err := l.svcCtx.MinioClient.PresignedGetObject(l.ctx, constant.ThumbnailBucketName, dbFileInfo.ThumbPath, time.Hour*24*7, reqParams)
|
||||
if err != nil {
|
||||
logx.Error(err)
|
||||
return
|
||||
}
|
||||
url, err := service.PresignedURL(l.ctx, ossConfig.BucketName, dbFileInfo.Path, time.Hour*24*7)
|
||||
if err != nil {
|
||||
logx.Error(err)
|
||||
return
|
||||
}
|
||||
// 使用 Load 或 Store 确保原子操作
|
||||
value, _ := groupedImages.LoadOrStore(date, []types.ImageMeta{})
|
||||
images := value.([]types.ImageMeta)
|
||||
|
||||
images = append(images, types.ImageMeta{
|
||||
ID: dbFileInfo.ID,
|
||||
FileName: dbFileInfo.FileName,
|
||||
Thumbnail: presignedUrl.String(),
|
||||
URL: url,
|
||||
Width: dbFileInfo.ThumbW,
|
||||
Height: dbFileInfo.ThumbH,
|
||||
CreatedAt: dbFileInfo.CreatedAt.Format("2006-01-02 15:04:05"),
|
||||
})
|
||||
|
||||
// 重新存储更新后的图像列表
|
||||
groupedImages.Store(date, images)
|
||||
}(&dbFileInfo)
|
||||
}
|
||||
wg.Wait()
|
||||
var imageList []types.AllImageDetail
|
||||
groupedImages.Range(func(key, value interface{}) bool {
|
||||
imageList = append(imageList, types.AllImageDetail{
|
||||
Date: key.(string),
|
||||
List: value.([]types.ImageMeta),
|
||||
})
|
||||
return true
|
||||
})
|
||||
// 按日期排序,最新的在最上面
|
||||
sort.Slice(imageList, func(i, j int) bool {
|
||||
dateI, _ := time.Parse("2006年1月2日 星期一", imageList[i].Date)
|
||||
dateJ, _ := time.Parse("2006年1月2日 星期一", imageList[j].Date)
|
||||
return dateI.After(dateJ)
|
||||
})
|
||||
resp = &types.DeleteRecordListResponse{
|
||||
Records: imageList,
|
||||
}
|
||||
|
||||
// 缓存结果
|
||||
if data, err := json.Marshal(resp); err == nil {
|
||||
expireTime := 5*time.Minute + time.Duration(rand.Intn(300))*time.Second
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, data, expireTime).Err(); err != nil {
|
||||
logx.Error("Failed to cache image list:", err)
|
||||
}
|
||||
} else {
|
||||
logx.Error("Failed to marshal image list for caching:", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
// 提取解密操作为函数
|
||||
func (l *GetDeleteRecordLogic) decryptConfig(config *model.ScaStorageConfig) (*storageConfig.StorageConfig, error) {
|
||||
accessKey, err := encrypt.Decrypt(config.AccessKey, l.svcCtx.Config.Encrypt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("decrypt access key failed")
|
||||
}
|
||||
secretKey, err := encrypt.Decrypt(config.SecretKey, l.svcCtx.Config.Encrypt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("decrypt secret key failed")
|
||||
}
|
||||
return &storageConfig.StorageConfig{
|
||||
Provider: config.Provider,
|
||||
Endpoint: config.Endpoint,
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
BucketName: config.Bucket,
|
||||
Region: config.Region,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 从缓存或数据库中获取 OSS 配置
|
||||
func (l *GetDeleteRecordLogic) getOssConfigFromCacheOrDb(cacheKey, uid, provider string) (*storageConfig.StorageConfig, error) {
|
||||
result, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return nil, errors.New("get oss config failed")
|
||||
}
|
||||
|
||||
var ossConfig *storageConfig.StorageConfig
|
||||
if result != "" {
|
||||
var redisOssConfig model.ScaStorageConfig
|
||||
if err = json.Unmarshal([]byte(result), &redisOssConfig); err != nil {
|
||||
return nil, errors.New("unmarshal oss config failed")
|
||||
}
|
||||
return l.decryptConfig(&redisOssConfig)
|
||||
}
|
||||
|
||||
// 缓存未命中,从数据库中加载
|
||||
scaOssConfig := l.svcCtx.DB.ScaStorageConfig
|
||||
dbOssConfig, err := scaOssConfig.Where(scaOssConfig.UserID.Eq(uid), scaOssConfig.Provider.Eq(provider)).First()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 缓存数据库配置
|
||||
ossConfig, err = l.decryptConfig(dbOssConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalData, err := json.Marshal(dbOssConfig)
|
||||
if err != nil {
|
||||
return nil, errors.New("marshal oss config failed")
|
||||
}
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, marshalData, 0).Err()
|
||||
if err != nil {
|
||||
return nil, errors.New("set oss config failed")
|
||||
}
|
||||
|
||||
return ossConfig, nil
|
||||
}
|
@@ -12,6 +12,7 @@ import (
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -41,7 +42,7 @@ func (l *GetFaceDetailListLogic) GetFaceDetailList(req *types.FaceDetailListRequ
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 缓存获取数据 v1.0.0
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%v", constant.ImageFaceListPrefix, uid, req.Provider, req.Bucket, req.FaceID)
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%s:%v", constant.ImageCachePrefix, uid, "faces", req.Provider, req.Bucket, req.FaceID)
|
||||
// 尝试从缓存获取
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err == nil {
|
||||
@@ -70,7 +71,7 @@ func (l *GetFaceDetailListLogic) GetFaceDetailList(req *types.FaceDetailListRequ
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).Where(
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
@@ -144,13 +145,19 @@ func (l *GetFaceDetailListLogic) GetFaceDetailList(req *types.FaceDetailListRequ
|
||||
})
|
||||
return true
|
||||
})
|
||||
// 按日期排序,最新的在最上面
|
||||
sort.Slice(imageList, func(i, j int) bool {
|
||||
dateI, _ := time.Parse("2006年1月2日 星期一", imageList[i].Date)
|
||||
dateJ, _ := time.Parse("2006年1月2日 星期一", imageList[j].Date)
|
||||
return dateI.After(dateJ)
|
||||
})
|
||||
resp = &types.FaceDetailListResponse{
|
||||
Records: imageList,
|
||||
}
|
||||
|
||||
// 缓存结果
|
||||
if data, err := json.Marshal(resp); err == nil {
|
||||
expireTime := 7*24*time.Hour - time.Duration(rand.Intn(60))*time.Minute
|
||||
expireTime := 5*time.Minute + time.Duration(rand.Intn(300))*time.Second
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, data, expireTime).Err(); err != nil {
|
||||
logx.Error("Failed to cache image list:", err)
|
||||
}
|
||||
|
@@ -75,12 +75,12 @@ func (l *GetImageUrlLogic) GetImageUrl(req *types.SingleImageRequest) (resp stri
|
||||
if err != nil {
|
||||
return "", errors.New("get storage failed")
|
||||
}
|
||||
url, err := service.PresignedURL(l.ctx, ossConfig.BucketName, result.Path, 7*24*time.Hour)
|
||||
url, err := service.PresignedURL(l.ctx, ossConfig.BucketName, result.Path, 15*time.Minute)
|
||||
if err != nil {
|
||||
return "", errors.New("get presigned url failed")
|
||||
}
|
||||
// 缓存url
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, url, 7*24*time.Hour).Err()
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, url, 15*time.Minute).Err()
|
||||
if err != nil {
|
||||
logx.Info(err)
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
"gorm.io/gen"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -49,7 +51,7 @@ func (l *QueryAllImageListLogic) QueryAllImageList(req *types.AllImageListReques
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 缓存获取数据 v1.0.0
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%t", constant.ImageListPrefix, uid, req.Provider, req.Bucket, req.Sort)
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%s:%s:%t", constant.ImageCachePrefix, uid, "list", req.Provider, req.Bucket, req.Type, req.Sort)
|
||||
// 尝试从缓存获取
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err == nil {
|
||||
@@ -63,47 +65,36 @@ func (l *QueryAllImageListLogic) QueryAllImageList(req *types.AllImageListReques
|
||||
logx.Error("Redis error:", err)
|
||||
return nil, errors.New("get cached image list failed")
|
||||
}
|
||||
|
||||
// 缓存未命中,从数据库中查询
|
||||
storageInfo := l.svcCtx.DB.ScaStorageInfo
|
||||
storageThumb := l.svcCtx.DB.ScaStorageThumb
|
||||
var storageInfoList []types.FileInfoResult
|
||||
if req.Sort {
|
||||
err = storageInfo.Select(
|
||||
storageInfo.ID,
|
||||
storageInfo.FileName,
|
||||
storageInfo.CreatedAt,
|
||||
storageInfo.Path,
|
||||
storageThumb.ThumbPath,
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).
|
||||
Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.Type.Eq(req.Type),
|
||||
storageInfo.AlbumID.IsNull()).
|
||||
Order(storageInfo.CreatedAt.Desc()).Scan(&storageInfoList)
|
||||
} else {
|
||||
err = storageInfo.Select(
|
||||
storageInfo.ID,
|
||||
storageInfo.FileName,
|
||||
storageInfo.CreatedAt,
|
||||
storageThumb.ThumbPath,
|
||||
storageInfo.Path,
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).
|
||||
Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.Type.Eq(req.Type)).
|
||||
Order(storageInfo.CreatedAt.Desc()).Scan(&storageInfoList)
|
||||
var queryCondition []gen.Condition
|
||||
conditions := []gen.Condition{
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.Type.Neq(constant.ImageTypeShared),
|
||||
}
|
||||
queryCondition = append(queryCondition, conditions...)
|
||||
if req.Type != "all" {
|
||||
queryCondition = append(queryCondition, storageInfo.Type.Eq(req.Type))
|
||||
}
|
||||
if req.Sort {
|
||||
queryCondition = append(queryCondition, storageInfo.AlbumID.Eq(0))
|
||||
}
|
||||
var storageInfoList []types.FileInfoResult
|
||||
err = storageInfo.Select(
|
||||
storageInfo.ID,
|
||||
storageInfo.FileName,
|
||||
storageInfo.CreatedAt,
|
||||
storageThumb.ThumbPath,
|
||||
storageInfo.Path,
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
Where(queryCondition...).
|
||||
Order(storageInfo.CreatedAt.Desc()).Scan(&storageInfoList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -171,13 +162,19 @@ func (l *QueryAllImageListLogic) QueryAllImageList(req *types.AllImageListReques
|
||||
})
|
||||
return true
|
||||
})
|
||||
// 按日期排序,最新的在最上面
|
||||
sort.Slice(imageList, func(i, j int) bool {
|
||||
dateI, _ := time.Parse("2006年1月2日 星期一", imageList[i].Date)
|
||||
dateJ, _ := time.Parse("2006年1月2日 星期一", imageList[j].Date)
|
||||
return dateI.After(dateJ)
|
||||
})
|
||||
resp = &types.AllImageListResponse{
|
||||
Records: imageList,
|
||||
}
|
||||
|
||||
// 缓存结果
|
||||
if data, err := json.Marshal(resp); err == nil {
|
||||
expireTime := 7*24*time.Hour - time.Duration(rand.Intn(60))*time.Minute
|
||||
expireTime := 5*time.Minute + time.Duration(rand.Intn(300))*time.Second
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, data, expireTime).Err(); err != nil {
|
||||
logx.Error("Failed to cache image list:", err)
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -42,7 +43,7 @@ func (l *QueryLocationDetailListLogic) QueryLocationDetailList(req *types.Locati
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 缓存获取数据 v1.0.0
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%v", constant.ImageListPrefix, uid, req.Provider, req.Bucket, req.ID)
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%s:%v", constant.ImageCachePrefix, uid, "location", req.Provider, req.Bucket, req.ID)
|
||||
// 尝试从缓存获取
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err == nil {
|
||||
@@ -59,6 +60,7 @@ func (l *QueryLocationDetailListLogic) QueryLocationDetailList(req *types.Locati
|
||||
|
||||
storageInfo := l.svcCtx.DB.ScaStorageInfo
|
||||
storageThumb := l.svcCtx.DB.ScaStorageThumb
|
||||
storageLocation := l.svcCtx.DB.ScaStorageLocation
|
||||
// 数据库查询文件信息列表
|
||||
var storageInfoQuery query.IScaStorageInfoDo
|
||||
var storageInfoList []types.FileInfoResult
|
||||
@@ -72,11 +74,13 @@ func (l *QueryLocationDetailListLogic) QueryLocationDetailList(req *types.Locati
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.LocationID.Eq(req.ID)).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
LeftJoin(storageLocation, storageInfo.LocationID.EqCol(storageLocation.ID)).
|
||||
Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageLocation.ID.Eq(req.ID)).
|
||||
Order(storageInfo.CreatedAt.Desc())
|
||||
err = storageInfoQuery.Scan(&storageInfoList)
|
||||
if err != nil {
|
||||
@@ -146,13 +150,19 @@ func (l *QueryLocationDetailListLogic) QueryLocationDetailList(req *types.Locati
|
||||
})
|
||||
return true
|
||||
})
|
||||
// 按日期排序,最新的在最上面
|
||||
sort.Slice(imageList, func(i, j int) bool {
|
||||
dateI, _ := time.Parse("2006年1月2日 星期一", imageList[i].Date)
|
||||
dateJ, _ := time.Parse("2006年1月2日 星期一", imageList[j].Date)
|
||||
return dateI.After(dateJ)
|
||||
})
|
||||
resp = &types.LocationDetailListResponse{
|
||||
Records: imageList,
|
||||
}
|
||||
|
||||
// 缓存结果
|
||||
if data, err := json.Marshal(resp); err == nil {
|
||||
expireTime := 7*24*time.Hour - time.Duration(rand.Intn(60))*time.Minute
|
||||
expireTime := 5*time.Minute + time.Duration(rand.Intn(300))*time.Second
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, data, expireTime).Err(); err != nil {
|
||||
logx.Error("Failed to cache image list:", err)
|
||||
}
|
||||
|
@@ -2,17 +2,12 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"net/url"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/svc"
|
||||
"schisandra-album-cloud-microservices/app/auth/api/internal/types"
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"time"
|
||||
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
@@ -38,32 +33,27 @@ func (l *QueryLocationImageListLogic) QueryLocationImageList(req *types.Location
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
storageLocation := l.svcCtx.DB.ScaStorageLocation
|
||||
storageInfo := l.svcCtx.DB.ScaStorageInfo
|
||||
|
||||
locations, err := storageLocation.Select(
|
||||
var locations []types.LocationInfo
|
||||
err = storageLocation.Select(
|
||||
storageLocation.ID,
|
||||
storageLocation.Country,
|
||||
storageLocation.City,
|
||||
storageLocation.Province,
|
||||
storageLocation.CoverImage,
|
||||
storageLocation.Total).Where(storageLocation.UserID.Eq(uid),
|
||||
storageLocation.Provider.Eq(req.Provider),
|
||||
storageLocation.Bucket.Eq(req.Bucket)).
|
||||
Order(storageLocation.CreatedAt.Desc()).Find()
|
||||
storageInfo.ID.Count().As("total")).
|
||||
LeftJoin(storageInfo, storageInfo.LocationID.EqCol(storageLocation.ID)).
|
||||
Where(storageLocation.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket)).
|
||||
Order(storageLocation.CreatedAt.Desc()).
|
||||
Group(storageLocation.ID).
|
||||
Scan(&locations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 加载用户oss配置信息
|
||||
//cacheOssConfigKey := constant.UserOssConfigPrefix + uid + ":" + req.Provider
|
||||
//ossConfig, err := l.getOssConfigFromCacheOrDb(cacheOssConfigKey, uid, req.Provider)
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//service, err := l.svcCtx.StorageManager.GetStorage(uid, ossConfig)
|
||||
//if err != nil {
|
||||
// return nil, errors.New("get storage failed")
|
||||
//}
|
||||
locationMap := make(map[string][]types.LocationMeta)
|
||||
|
||||
for _, loc := range locations {
|
||||
@@ -79,7 +69,7 @@ func (l *QueryLocationImageListLogic) QueryLocationImageList(req *types.Location
|
||||
city = loc.Country
|
||||
}
|
||||
reqParams := make(url.Values)
|
||||
presignedUrl, err := l.svcCtx.MinioClient.PresignedGetObject(l.ctx, constant.ThumbnailBucketName, loc.CoverImage, 7*24*time.Hour, reqParams)
|
||||
presignedUrl, err := l.svcCtx.MinioClient.PresignedGetObject(l.ctx, constant.ThumbnailBucketName, loc.CoverImage, 15*time.Minute, reqParams)
|
||||
if err != nil {
|
||||
return nil, errors.New("get presigned url failed")
|
||||
}
|
||||
@@ -103,63 +93,3 @@ func (l *QueryLocationImageListLogic) QueryLocationImageList(req *types.Location
|
||||
|
||||
return &types.LocationListResponse{Records: locationListData}, nil
|
||||
}
|
||||
|
||||
// 提取解密操作为函数
|
||||
func (l *QueryLocationImageListLogic) decryptConfig(config *model.ScaStorageConfig) (*storageConfig.StorageConfig, error) {
|
||||
accessKey, err := encrypt.Decrypt(config.AccessKey, l.svcCtx.Config.Encrypt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("decrypt access key failed")
|
||||
}
|
||||
secretKey, err := encrypt.Decrypt(config.SecretKey, l.svcCtx.Config.Encrypt.Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("decrypt secret key failed")
|
||||
}
|
||||
return &storageConfig.StorageConfig{
|
||||
Provider: config.Provider,
|
||||
Endpoint: config.Endpoint,
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
BucketName: config.Bucket,
|
||||
Region: config.Region,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 从缓存或数据库中获取 OSS 配置
|
||||
func (l *QueryLocationImageListLogic) getOssConfigFromCacheOrDb(cacheKey, uid, provider string) (*storageConfig.StorageConfig, error) {
|
||||
result, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return nil, errors.New("get oss config failed")
|
||||
}
|
||||
|
||||
var ossConfig *storageConfig.StorageConfig
|
||||
if result != "" {
|
||||
var redisOssConfig model.ScaStorageConfig
|
||||
if err = json.Unmarshal([]byte(result), &redisOssConfig); err != nil {
|
||||
return nil, errors.New("unmarshal oss config failed")
|
||||
}
|
||||
return l.decryptConfig(&redisOssConfig)
|
||||
}
|
||||
|
||||
// 缓存未命中,从数据库中加载
|
||||
scaOssConfig := l.svcCtx.DB.ScaStorageConfig
|
||||
dbOssConfig, err := scaOssConfig.Where(scaOssConfig.UserID.Eq(uid), scaOssConfig.Provider.Eq(provider)).First()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 缓存数据库配置
|
||||
ossConfig, err = l.decryptConfig(dbOssConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalData, err := json.Marshal(dbOssConfig)
|
||||
if err != nil {
|
||||
return nil, errors.New("marshal oss config failed")
|
||||
}
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, cacheKey, marshalData, 0).Err()
|
||||
if err != nil {
|
||||
return nil, errors.New("set oss config failed")
|
||||
}
|
||||
|
||||
return ossConfig, nil
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
storageConfig "schisandra-album-cloud-microservices/common/storage/config"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -42,7 +43,7 @@ func (l *QueryThingDetailListLogic) QueryThingDetailList(req *types.ThingDetailL
|
||||
return nil, errors.New("user_id not found")
|
||||
}
|
||||
// 缓存获取数据 v1.0.0
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%v", constant.ImageListPrefix, uid, req.Provider, req.Bucket, req.TagName)
|
||||
cacheKey := fmt.Sprintf("%s%s:%s:%s:%s:%v", constant.ImageCachePrefix, uid, "thing", req.Provider, req.Bucket, req.TagName)
|
||||
// 尝试从缓存获取
|
||||
cachedResult, err := l.svcCtx.RedisClient.Get(l.ctx, cacheKey).Result()
|
||||
if err == nil {
|
||||
@@ -59,6 +60,7 @@ func (l *QueryThingDetailListLogic) QueryThingDetailList(req *types.ThingDetailL
|
||||
|
||||
storageInfo := l.svcCtx.DB.ScaStorageInfo
|
||||
storageThumb := l.svcCtx.DB.ScaStorageThumb
|
||||
storageExtra := l.svcCtx.DB.ScaStorageExtra
|
||||
// 数据库查询文件信息列表
|
||||
var storageInfoQuery query.IScaStorageInfoDo
|
||||
var storageInfoList []types.FileInfoResult
|
||||
@@ -72,12 +74,13 @@ func (l *QueryThingDetailListLogic) QueryThingDetailList(req *types.ThingDetailL
|
||||
storageThumb.ThumbW,
|
||||
storageThumb.ThumbH,
|
||||
storageThumb.ThumbSize).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
LeftJoin(storageExtra, storageInfo.ID.EqCol(storageExtra.InfoID)).
|
||||
Where(
|
||||
storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.Tag.Eq(req.TagName)).
|
||||
storageExtra.Tag.Eq(req.TagName)).
|
||||
Order(storageInfo.CreatedAt.Desc())
|
||||
err = storageInfoQuery.Scan(&storageInfoList)
|
||||
if err != nil {
|
||||
@@ -147,13 +150,19 @@ func (l *QueryThingDetailListLogic) QueryThingDetailList(req *types.ThingDetailL
|
||||
})
|
||||
return true
|
||||
})
|
||||
// 按日期排序,最新的在最上面
|
||||
sort.Slice(imageList, func(i, j int) bool {
|
||||
dateI, _ := time.Parse("2006年1月2日 星期一", imageList[i].Date)
|
||||
dateJ, _ := time.Parse("2006年1月2日 星期一", imageList[j].Date)
|
||||
return dateI.After(dateJ)
|
||||
})
|
||||
resp = &types.ThingDetailListResponse{
|
||||
Records: imageList,
|
||||
}
|
||||
|
||||
// 缓存结果
|
||||
if data, err := json.Marshal(resp); err == nil {
|
||||
expireTime := 7*24*time.Hour - time.Duration(rand.Intn(60))*time.Minute
|
||||
expireTime := 5*time.Minute + time.Duration(rand.Intn(300))*time.Second
|
||||
if err := l.svcCtx.RedisClient.Set(l.ctx, cacheKey, data, expireTime).Err(); err != nil {
|
||||
logx.Error("Failed to cache image list:", err)
|
||||
}
|
||||
|
@@ -40,21 +40,24 @@ func (l *QueryThingImageListLogic) QueryThingImageList(req *types.ThingListReque
|
||||
}
|
||||
storageInfo := l.svcCtx.DB.ScaStorageInfo
|
||||
storageThumb := l.svcCtx.DB.ScaStorageThumb
|
||||
storageExtra := l.svcCtx.DB.ScaStorageExtra
|
||||
|
||||
var thingList []types.ThingImageList
|
||||
err = storageInfo.Select(
|
||||
storageInfo.ID,
|
||||
storageInfo.Category,
|
||||
storageInfo.Tag,
|
||||
storageExtra.Category,
|
||||
storageExtra.Tag,
|
||||
storageThumb.ThumbPath,
|
||||
storageInfo.CreatedAt).
|
||||
LeftJoin(storageThumb, storageInfo.ThumbID.EqCol(storageThumb.ID)).
|
||||
LeftJoin(storageThumb, storageInfo.ID.EqCol(storageThumb.InfoID)).
|
||||
LeftJoin(storageExtra, storageInfo.ID.EqCol(storageExtra.InfoID)).
|
||||
Where(storageInfo.UserID.Eq(uid),
|
||||
storageInfo.Provider.Eq(req.Provider),
|
||||
storageInfo.Bucket.Eq(req.Bucket),
|
||||
storageInfo.Category.IsNotNull(),
|
||||
storageInfo.Tag.IsNotNull(),
|
||||
storageInfo.Category.Length().Gt(0),
|
||||
storageInfo.Tag.Length().Gte(0)).
|
||||
storageExtra.Category.IsNotNull(),
|
||||
storageExtra.Tag.IsNotNull(),
|
||||
storageExtra.Category.Length().Gt(0),
|
||||
storageExtra.Tag.Length().Gte(0)).
|
||||
Order(storageInfo.CreatedAt.Desc()).
|
||||
Scan(&thingList)
|
||||
if err != nil {
|
||||
|
@@ -3,7 +3,6 @@ package storage
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/zeromicro/go-zero/core/logx"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"gorm.io/gorm"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
@@ -26,9 +24,7 @@ import (
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
"schisandra-album-cloud-microservices/common/constant"
|
||||
"schisandra-album-cloud-microservices/common/encrypt"
|
||||
"schisandra-album-cloud-microservices/common/geo_json"
|
||||
"schisandra-album-cloud-microservices/common/storage/config"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -172,49 +168,9 @@ func (l *UploadFileLogic) UploadFile(r *http.Request) (resp string, err error) {
|
||||
if err != nil {
|
||||
return "", errors.New("publish message failed")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
//// 根据 GPS 信息获取地理位置信息
|
||||
//country, province, city, err := l.getGeoLocation(result.Latitude, result.Longitude)
|
||||
//if err != nil {
|
||||
// return "", err
|
||||
//}
|
||||
//// 将地址信息保存到数据库
|
||||
//locationId, err := l.saveFileLocationInfoToDB(uid, result.Provider, result.Bucket, result.Latitude, result.Longitude, country, province, city, filePath)
|
||||
//if err != nil {
|
||||
// return "", err
|
||||
//}
|
||||
//
|
||||
//// 将 EXIF 和文件信息存入数据库
|
||||
//id, err := l.saveFileInfoToDB(uid, bucket, provider, header, result, locationId, faceId, filePath)
|
||||
//if err != nil {
|
||||
// return "", err
|
||||
//}
|
||||
//// 删除缓存
|
||||
//l.afterImageUpload(uid, provider, bucket)
|
||||
//
|
||||
//// redis 保存最近7天上传的文件列表
|
||||
//err = l.saveRecentFileList(uid, url, id, result, header.Filename)
|
||||
//if err != nil {
|
||||
// return "", err
|
||||
//}
|
||||
|
||||
return "success", nil
|
||||
}
|
||||
|
||||
// 将 multipart.File 转为 Base64 字符串
|
||||
func (l *UploadFileLogic) fileToBase64(file multipart.File) (string, error) {
|
||||
// 读取文件内容
|
||||
fileBytes, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将文件内容转为 Base64 编码
|
||||
return base64.StdEncoding.EncodeToString(fileBytes), nil
|
||||
}
|
||||
|
||||
// 获取用户 ID
|
||||
func (l *UploadFileLogic) getUserID() (string, error) {
|
||||
uid, ok := l.ctx.Value("user_id").(string)
|
||||
@@ -224,16 +180,6 @@ func (l *UploadFileLogic) getUserID() (string, error) {
|
||||
return uid, nil
|
||||
}
|
||||
|
||||
// 在UploadImageLogic或其他需要使缓存失效的逻辑中添加:
|
||||
func (l *UploadFileLogic) afterImageUpload(uid, provider, bucket string) {
|
||||
for _, sort := range []bool{true, false} {
|
||||
key := fmt.Sprintf("%s%s:%s:%s:%v", constant.ImageListPrefix, uid, provider, bucket, sort)
|
||||
if err := l.svcCtx.RedisClient.Del(l.ctx, key).Err(); err != nil {
|
||||
logx.Errorf("删除缓存键 %s 失败: %v", key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 解析上传的文件
|
||||
func (l *UploadFileLogic) getUploadedFile(r *http.Request) (multipart.File, *multipart.FileHeader, error) {
|
||||
file, header, err := r.FormFile("file")
|
||||
@@ -262,19 +208,6 @@ func (l *UploadFileLogic) parseImageInfoResult(r *http.Request) (types.File, err
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// 根据 GPS 信息获取地理位置信息
|
||||
func (l *UploadFileLogic) getGeoLocation(latitude, longitude float64) (string, string, string, error) {
|
||||
if latitude == 0.000000 || longitude == 0.000000 {
|
||||
return "", "", "", nil
|
||||
}
|
||||
country, province, city, err := geo_json.GetAddress(latitude, longitude, l.svcCtx.GeoRegionData)
|
||||
if err != nil {
|
||||
return "", "", "", errors.New("get geo location failed")
|
||||
}
|
||||
|
||||
return country, province, city, nil
|
||||
}
|
||||
|
||||
// 上传文件到 OSS
|
||||
func (l *UploadFileLogic) uploadFileToOSS(uid string, header *multipart.FileHeader, file multipart.File, result types.File) (string, error) {
|
||||
cacheKey := constant.UserOssConfigPrefix + uid + ":" + result.Provider
|
||||
@@ -345,75 +278,6 @@ func (l *UploadFileLogic) uploadFileToMinio(uid string, header *multipart.FileHe
|
||||
return objectKey, presignedURL.String(), nil
|
||||
}
|
||||
|
||||
func (l *UploadFileLogic) saveFileLocationInfoToDB(uid string, provider string, bucket string, latitude float64, longitude float64, country string, province string, city string, filePath string) (int64, error) {
|
||||
if latitude == 0.000000 || longitude == 0.000000 {
|
||||
return 0, nil
|
||||
}
|
||||
locationDB := l.svcCtx.DB.ScaStorageLocation
|
||||
storageLocations, err := locationDB.Where(locationDB.UserID.Eq(uid), locationDB.Province.Eq(province), locationDB.City.Eq(city)).First()
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, err
|
||||
}
|
||||
if storageLocations == nil {
|
||||
locationInfo := model.ScaStorageLocation{
|
||||
Provider: provider,
|
||||
Bucket: bucket,
|
||||
UserID: uid,
|
||||
Country: country,
|
||||
City: city,
|
||||
Province: province,
|
||||
Latitude: fmt.Sprintf("%f", latitude),
|
||||
Longitude: fmt.Sprintf("%f", longitude),
|
||||
Total: 1,
|
||||
CoverImage: filePath,
|
||||
}
|
||||
err = locationDB.Create(&locationInfo)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return locationInfo.ID, nil
|
||||
} else {
|
||||
info, err := locationDB.Where(locationDB.ID.Eq(storageLocations.ID), locationDB.UserID.Eq(uid)).UpdateColumnSimple(locationDB.Total.Add(1), locationDB.CoverImage.Value(filePath))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if info.RowsAffected == 0 {
|
||||
return 0, errors.New("update location failed")
|
||||
}
|
||||
return storageLocations.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// 将 EXIF 和文件信息存入数据库
|
||||
func (l *UploadFileLogic) saveFileInfoToDB(uid, bucket, provider string, header *multipart.FileHeader, result types.File, locationId, faceId int64, filePath string) (int64, error) {
|
||||
|
||||
typeName := l.classifyFile(result.FileType, result.IsScreenshot)
|
||||
scaStorageInfo := &model.ScaStorageInfo{
|
||||
UserID: uid,
|
||||
Provider: provider,
|
||||
Bucket: bucket,
|
||||
FileName: header.Filename,
|
||||
FileSize: strconv.FormatInt(header.Size, 10),
|
||||
FileType: result.FileType,
|
||||
Path: filePath,
|
||||
Landscape: result.Landscape,
|
||||
Tag: result.TagName,
|
||||
IsAnime: strconv.FormatBool(result.IsAnime),
|
||||
Category: result.TopCategory,
|
||||
LocationID: locationId,
|
||||
FaceID: faceId,
|
||||
Type: typeName,
|
||||
Width: result.Width,
|
||||
Height: result.Height,
|
||||
}
|
||||
|
||||
err := l.svcCtx.DB.ScaStorageInfo.Create(scaStorageInfo)
|
||||
if err != nil {
|
||||
return 0, errors.New("create storage info failed")
|
||||
}
|
||||
return scaStorageInfo.ID, nil
|
||||
}
|
||||
|
||||
// 提取解密操作为函数
|
||||
func (l *UploadFileLogic) decryptConfig(dbConfig *model.ScaStorageConfig) (*config.StorageConfig, error) {
|
||||
accessKey, err := encrypt.Decrypt(dbConfig.AccessKey, l.svcCtx.Config.Encrypt.Key)
|
||||
@@ -503,28 +367,3 @@ func (l *UploadFileLogic) classifyFile(mimeType string, isScreenshot bool) strin
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// 保存最近7天上传的文件列表
|
||||
func (l *UploadFileLogic) saveRecentFileList(uid, url string, id int64, result types.File, filename string) error {
|
||||
|
||||
redisKey := constant.ImageRecentPrefix + uid + ":" + strconv.FormatInt(id, 10)
|
||||
imageMeta := types.ImageMeta{
|
||||
ID: id,
|
||||
URL: url,
|
||||
FileName: filename,
|
||||
Width: result.Width,
|
||||
Height: result.Height,
|
||||
CreatedAt: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
marshal, err := json.Marshal(imageMeta)
|
||||
if err != nil {
|
||||
logx.Error(err)
|
||||
return errors.New("marshal image meta failed")
|
||||
}
|
||||
err = l.svcCtx.RedisClient.Set(l.ctx, redisKey, marshal, time.Hour*24*7).Err()
|
||||
if err != nil {
|
||||
logx.Error(err)
|
||||
return errors.New("save recent file list failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -55,23 +55,27 @@ func (c *NsqImageProcessConsumer) HandleMessage(msg *nsq.Message) error {
|
||||
return err
|
||||
}
|
||||
// 将地址信息保存到数据库
|
||||
locationId, err := c.saveFileLocationInfoToDB(message.UID, message.Data.Provider, message.Data.Bucket, message.Data.Latitude, message.Data.Longitude, country, province, city, message.ThumbPath)
|
||||
locationId, err := c.saveFileLocationInfoToDB(message.UID, message.Data.Latitude, message.Data.Longitude, country, province, city, message.ThumbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
thumbnailId, err := c.saveFileThumbnailInfoToDB(message.UID, message.ThumbPath, message.Data.ThumbW, message.Data.ThumbH, message.Data.ThumbSize)
|
||||
|
||||
// 将文件信息存入数据库
|
||||
id, err := c.saveFileInfoToDB(message.UID, message.Data.Bucket, message.Data.Provider, message.FileHeader, message.Data, locationId, message.FaceID, message.FilePath, thumbnailId)
|
||||
storageId, err := c.saveFileInfoToDB(message.UID, message.Data.Bucket, message.Data.Provider, message.FileHeader, message.Data, message.FaceID, message.FilePath, locationId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.saveFileThumbnailInfoToDB(message.UID, message.ThumbPath, message.Data.ThumbW, message.Data.ThumbH, message.Data.ThumbSize, storageId)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 删除缓存
|
||||
c.afterImageUpload(message.UID, message.Data.Provider, message.Data.Bucket)
|
||||
c.afterImageUpload(message.UID)
|
||||
|
||||
// redis 保存最近7天上传的文件列表
|
||||
err = c.saveRecentFileList(message.UID, message.PresignedURL, id, message.Data, message.FileHeader.Filename)
|
||||
err = c.saveRecentFileList(message.UID, message.PresignedURL, storageId, message.Data, message.FileHeader.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,15 +154,16 @@ func (c *NsqImageProcessConsumer) classifyFile(mimeType string, isScreenshot boo
|
||||
"video/x-matroska": "video",
|
||||
}
|
||||
|
||||
// 如果isScreenshot为true,则返回"screenshot"
|
||||
if isScreenshot {
|
||||
return "screenshot"
|
||||
}
|
||||
|
||||
// 根据MIME类型从map中获取分类
|
||||
if classification, exists := typeMap[mimeType]; exists {
|
||||
return classification
|
||||
}
|
||||
|
||||
// 如果isScreenshot为true,则返回"screenshot"
|
||||
if isScreenshot {
|
||||
return "screenshot"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
@@ -207,7 +212,7 @@ func (c *NsqImageProcessConsumer) decryptConfig(dbConfig *model.ScaStorageConfig
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *NsqImageProcessConsumer) saveFileLocationInfoToDB(uid string, provider string, bucket string, latitude float64, longitude float64, country string, province string, city string, filePath string) (int64, error) {
|
||||
func (c *NsqImageProcessConsumer) saveFileLocationInfoToDB(uid string, latitude float64, longitude float64, country string, province string, city string, filePath string) (int64, error) {
|
||||
if latitude == 0.000000 || longitude == 0.000000 {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -218,35 +223,24 @@ func (c *NsqImageProcessConsumer) saveFileLocationInfoToDB(uid string, provider
|
||||
}
|
||||
if storageLocations == nil {
|
||||
locationInfo := model.ScaStorageLocation{
|
||||
Provider: provider,
|
||||
Bucket: bucket,
|
||||
UserID: uid,
|
||||
Country: country,
|
||||
City: city,
|
||||
Province: province,
|
||||
Latitude: fmt.Sprintf("%f", latitude),
|
||||
Longitude: fmt.Sprintf("%f", longitude),
|
||||
Total: 1,
|
||||
CoverImage: filePath,
|
||||
}
|
||||
err = locationDB.Create(&locationInfo)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return locationInfo.ID, nil
|
||||
} else {
|
||||
info, err := locationDB.Where(locationDB.ID.Eq(storageLocations.ID), locationDB.UserID.Eq(uid)).UpdateColumnSimple(locationDB.Total.Add(1), locationDB.CoverImage.Value(filePath))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if info.RowsAffected == 0 {
|
||||
return 0, errors.New("update location failed")
|
||||
}
|
||||
return storageLocations.ID, nil
|
||||
return 0, nil
|
||||
}
|
||||
return storageLocations.ID, nil
|
||||
}
|
||||
|
||||
func (c *NsqImageProcessConsumer) saveFileThumbnailInfoToDB(uid string, filePath string, width, height float64, size float64) (int64, error) {
|
||||
func (c *NsqImageProcessConsumer) saveFileThumbnailInfoToDB(uid string, filePath string, width, height float64, size float64, storageId int64) error {
|
||||
storageThumb := c.svcCtx.DB.ScaStorageThumb
|
||||
storageThumbInfo := &model.ScaStorageThumb{
|
||||
UserID: uid,
|
||||
@@ -254,18 +248,25 @@ func (c *NsqImageProcessConsumer) saveFileThumbnailInfoToDB(uid string, filePath
|
||||
ThumbW: width,
|
||||
ThumbH: height,
|
||||
ThumbSize: size,
|
||||
InfoID: storageId,
|
||||
}
|
||||
err := storageThumb.Create(storageThumbInfo)
|
||||
if err != nil {
|
||||
logx.Error(err)
|
||||
return 0, errors.New("create storage thumb failed")
|
||||
return errors.New("create storage thumb failed")
|
||||
}
|
||||
return storageThumbInfo.ID, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// 将 EXIF 和文件信息存入数据库
|
||||
func (c *NsqImageProcessConsumer) saveFileInfoToDB(uid, bucket, provider string, header *multipart.FileHeader, result types.File, locationId, faceId int64, filePath string, thumbnailId int64) (int64, error) {
|
||||
|
||||
func (c *NsqImageProcessConsumer) saveFileInfoToDB(uid, bucket, provider string, header *multipart.FileHeader, result types.File, faceId int64, filePath string, locationID int64) (int64, error) {
|
||||
tx := c.svcCtx.DB.Begin()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tx.Rollback() // 如果有panic发生,回滚事务
|
||||
logx.Errorf("transaction rollback: %v", r)
|
||||
}
|
||||
}()
|
||||
typeName := c.classifyFile(result.FileType, result.IsScreenshot)
|
||||
scaStorageInfo := &model.ScaStorageInfo{
|
||||
UserID: uid,
|
||||
@@ -275,31 +276,55 @@ func (c *NsqImageProcessConsumer) saveFileInfoToDB(uid, bucket, provider string,
|
||||
FileSize: strconv.FormatInt(header.Size, 10),
|
||||
FileType: result.FileType,
|
||||
Path: filePath,
|
||||
Landscape: result.Landscape,
|
||||
Tag: result.TagName,
|
||||
IsAnime: strconv.FormatBool(result.IsAnime),
|
||||
Category: result.TopCategory,
|
||||
LocationID: locationId,
|
||||
FaceID: faceId,
|
||||
Type: typeName,
|
||||
Width: result.Width,
|
||||
Height: result.Height,
|
||||
ThumbID: thumbnailId,
|
||||
LocationID: locationID,
|
||||
}
|
||||
|
||||
err := c.svcCtx.DB.ScaStorageInfo.Create(scaStorageInfo)
|
||||
err := tx.ScaStorageInfo.Create(scaStorageInfo)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, errors.New("create storage info failed")
|
||||
}
|
||||
scaStorageExtra := &model.ScaStorageExtra{
|
||||
UserID: uid,
|
||||
InfoID: scaStorageInfo.ID,
|
||||
Landscape: result.Landscape,
|
||||
Tag: result.TagName,
|
||||
IsAnime: strconv.FormatBool(result.IsAnime),
|
||||
Category: result.TopCategory,
|
||||
}
|
||||
err = tx.ScaStorageExtra.Create(scaStorageExtra)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, errors.New("create storage extra failed")
|
||||
}
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, errors.New("commit failed")
|
||||
}
|
||||
return scaStorageInfo.ID, nil
|
||||
}
|
||||
|
||||
// 在UploadImageLogic或其他需要使缓存失效的逻辑中添加:
|
||||
func (c *NsqImageProcessConsumer) afterImageUpload(uid, provider, bucket string) {
|
||||
for _, sort := range []bool{true, false} {
|
||||
key := fmt.Sprintf("%s%s:%s:%s:%v", constant.ImageListPrefix, uid, provider, bucket, sort)
|
||||
if err := c.svcCtx.RedisClient.Del(c.ctx, key).Err(); err != nil {
|
||||
logx.Errorf("删除缓存键 %s 失败: %v", key, err)
|
||||
}
|
||||
func (c *NsqImageProcessConsumer) afterImageUpload(uid string) {
|
||||
// 删除缓存
|
||||
keyPattern := fmt.Sprintf("%s%s:%s", constant.ImageCachePrefix, uid, "*")
|
||||
// 获取所有匹配的键
|
||||
keys, err := c.svcCtx.RedisClient.Keys(c.ctx, keyPattern).Result()
|
||||
if err != nil {
|
||||
logx.Errorf("获取缓存键 %s 失败: %v", keyPattern, err)
|
||||
}
|
||||
// 如果没有匹配的键,直接返回
|
||||
if len(keys) == 0 {
|
||||
logx.Infof("没有找到匹配的缓存键: %s", keyPattern)
|
||||
return
|
||||
}
|
||||
// 删除所有匹配的键
|
||||
if err := c.svcCtx.RedisClient.Del(c.ctx, keys...).Err(); err != nil {
|
||||
logx.Errorf("删除缓存键 %s 失败: %v", keyPattern, err)
|
||||
|
||||
}
|
||||
}
|
||||
|
@@ -63,3 +63,12 @@ type ShareImageInfo struct {
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
type LocationInfo struct {
|
||||
ID int64 `json:"id"`
|
||||
Country string `json:"country"`
|
||||
City string `json:"city"`
|
||||
Province string `json:"province"`
|
||||
CoverImage string `json:"cover_image"`
|
||||
Total int64 `json:"total"`
|
||||
}
|
||||
|
@@ -76,6 +76,17 @@ type AllImageListResponse struct {
|
||||
Records []AllImageDetail `json:"records"`
|
||||
}
|
||||
|
||||
type BucketCapacityRequest struct {
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
type BucketCapacityResponse struct {
|
||||
Capacity string `json:"capacity"`
|
||||
Used string `json:"used"`
|
||||
Percentage float64 `json:"percentage"`
|
||||
}
|
||||
|
||||
type CommentContent struct {
|
||||
NickName string `json:"nickname"`
|
||||
Avatar string `json:"avatar"`
|
||||
@@ -147,6 +158,16 @@ type CommentResponse struct {
|
||||
ReplyTo int64 `json:"reply_to,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteImageRequest struct {
|
||||
IDS []int64 `json:"ids"`
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
type DeleteRecordListResponse struct {
|
||||
Records []AllImageDetail `json:"records"`
|
||||
}
|
||||
|
||||
type FaceDetailListRequest struct {
|
||||
FaceID int64 `json:"face_id"`
|
||||
Provider string `json:"provider"`
|
||||
@@ -254,13 +275,22 @@ type PhoneLoginRequest struct {
|
||||
AutoLogin bool `json:"auto_login"`
|
||||
}
|
||||
|
||||
type QueryDeleteRecordRequest struct {
|
||||
Provider string `json:"provider"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
type QueryShareImageRequest struct {
|
||||
ShareCode string `json:"share_code"`
|
||||
InviteCode string `json:"invite_code"`
|
||||
AccessPassword string `json:"access_password,omitempty"`
|
||||
}
|
||||
|
||||
type QueryShareImageResponse struct {
|
||||
List []ShareImageListMeta `json:"list"`
|
||||
Records []ShareImageListMeta `json:"records"`
|
||||
}
|
||||
|
||||
type QueryShareInfoRequest struct {
|
||||
InviteCode string `json:"invite_code"`
|
||||
}
|
||||
|
||||
type RecentListResponse struct {
|
||||
@@ -320,8 +350,8 @@ type ShareImageListMeta struct {
|
||||
FileName string `json:"file_name"`
|
||||
URL string `json:"url"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
ThumbW float64 `json:"thumb_w"`
|
||||
ThumbH float64 `json:"thumb_h"`
|
||||
Width float64 `json:"width"`
|
||||
Height float64 `json:"height"`
|
||||
ThumbSize float64 `json:"thumb_size"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
@@ -346,11 +376,34 @@ type ShareImageRequest struct {
|
||||
Images []ShareImageMeta `json:"images"`
|
||||
}
|
||||
|
||||
type ShareInfoResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
CoverImage string `json:"cover_image"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
VisitLimit int64 `json:"visit_limit"`
|
||||
ExpireTime string `json:"expire_time"`
|
||||
ImageCount int64 `json:"image_count"`
|
||||
VisitCount int64 `json:"visit_count"`
|
||||
ViewerCount int64 `json:"viewer_count"`
|
||||
SharerAvatar string `json:"sharer_avatar"`
|
||||
SharerName string `json:"sharer_name"`
|
||||
AlbumName string `json:"album_name"`
|
||||
}
|
||||
|
||||
type ShareOverviewResponse struct {
|
||||
VisitCount int64 `json:"visit_count"`
|
||||
VisitCountToday int64 `json:"visit_count_today"`
|
||||
ViewerCount int64 `json:"viewer_count"`
|
||||
ViewerCountToday int64 `json:"viewer_count_today"`
|
||||
PublishCount int64 `json:"publish_count"`
|
||||
PublishCountToday int64 `json:"publish_count_today"`
|
||||
}
|
||||
|
||||
type ShareRecord struct {
|
||||
ID int64 `json:"id"`
|
||||
CoverImage string `json:"cover_image"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
ShareCode string `json:"share_code"`
|
||||
InviteCode string `json:"invite_code"`
|
||||
VisitLimit int64 `json:"visit_limit"`
|
||||
AccessPassword string `json:"access_password"`
|
||||
ValidityPeriod int64 `json:"validity_period"`
|
||||
|
@@ -110,8 +110,6 @@ func main() {
|
||||
scaMessageReport := g.GenerateModel("sca_message_report", fieldOpts...)
|
||||
scaStorageConfig := g.GenerateModel("sca_storage_config", fieldOpts...)
|
||||
scaStorageInfo := g.GenerateModel("sca_storage_info", fieldOpts...)
|
||||
scaStorageTag := g.GenerateModel("sca_storage_tag", fieldOpts...)
|
||||
scaStorageTagInfo := g.GenerateModel("sca_storage_tag_info", fieldOpts...)
|
||||
scaUserFollows := g.GenerateModel("sca_user_follows", fieldOpts...)
|
||||
scaUserLevel := g.GenerateModel("sca_user_level", fieldOpts...)
|
||||
scaUserMessage := g.GenerateModel("sca_user_message", fieldOpts...)
|
||||
@@ -120,6 +118,7 @@ func main() {
|
||||
scaStorageThumb := g.GenerateModel("sca_storage_thumb", fieldOpts...)
|
||||
scaStorageShare := g.GenerateModel("sca_storage_share", fieldOpts...)
|
||||
scaStorageShareVisit := g.GenerateModel("sca_storage_share_visit", fieldOpts...)
|
||||
scaStorageExtra := g.GenerateModel("sca_storage_extra", fieldOpts...)
|
||||
|
||||
g.ApplyBasic(
|
||||
scaAuthMenu,
|
||||
@@ -133,8 +132,6 @@ func main() {
|
||||
scaMessageReport,
|
||||
scaStorageConfig,
|
||||
scaStorageInfo,
|
||||
scaStorageTag,
|
||||
scaStorageTagInfo,
|
||||
scaUserFollows,
|
||||
scaUserLevel,
|
||||
scaUserMessage,
|
||||
@@ -143,6 +140,7 @@ func main() {
|
||||
scaStorageThumb,
|
||||
scaStorageShare,
|
||||
scaStorageShareVisit,
|
||||
scaStorageExtra,
|
||||
)
|
||||
|
||||
g.Execute()
|
||||
|
@@ -22,6 +22,7 @@ type ScaStorageConfig struct {
|
||||
SecretKey string `gorm:"column:secret_key;type:varchar(100);comment:密钥" json:"secret_key"` // 密钥
|
||||
Bucket string `gorm:"column:bucket;type:varchar(50);comment:存储桶" json:"bucket"` // 存储桶
|
||||
Region string `gorm:"column:region;type:varchar(50);comment:地域" json:"region"` // 地域
|
||||
Capacity int64 `gorm:"column:capacity;type:bigint(20);comment:容量" json:"capacity"` // 容量
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
|
33
app/auth/model/mysql/model/sca_storage_extra.gen.go
Normal file
33
app/auth/model/mysql/model/sca_storage_extra.gen.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageExtra = "sca_storage_extra"
|
||||
|
||||
// ScaStorageExtra 文件信息额外表
|
||||
type ScaStorageExtra struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(255);comment:用户ID" json:"user_id"` // 用户ID
|
||||
InfoID int64 `gorm:"column:info_id;type:bigint(20);comment:文件信息ID" json:"info_id"` // 文件信息ID
|
||||
Category string `gorm:"column:category;type:varchar(50);comment:分类" json:"category"` // 分类
|
||||
Tag string `gorm:"column:tag;type:varchar(255);comment:标签" json:"tag"` // 标签
|
||||
IsAnime string `gorm:"column:is_anime;type:varchar(50);comment:是否是动漫图片" json:"is_anime"` // 是否是动漫图片
|
||||
Landscape string `gorm:"column:landscape;type:varchar(50);comment:风景类型" json:"landscape"` // 风景类型
|
||||
Hash string `gorm:"column:hash;type:varchar(255);comment:哈希值" json:"hash"` // 哈希值
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageExtra's table name
|
||||
func (*ScaStorageExtra) TableName() string {
|
||||
return TableNameScaStorageExtra
|
||||
}
|
@@ -24,17 +24,11 @@ type ScaStorageInfo struct {
|
||||
FileType string `gorm:"column:file_type;type:varchar(50);comment:文件类型" json:"file_type"` // 文件类型
|
||||
Width float64 `gorm:"column:width;type:double;comment:宽" json:"width"` // 宽
|
||||
Height float64 `gorm:"column:height;type:double;comment:高" json:"height"` // 高
|
||||
ThumbID int64 `gorm:"column:thumb_id;type:bigint(20);comment:缩略图id" json:"thumb_id"` // 缩略图id
|
||||
Category string `gorm:"column:category;type:varchar(50);comment:分类" json:"category"` // 分类
|
||||
Tag string `gorm:"column:tag;type:varchar(255);comment:标签" json:"tag"` // 标签
|
||||
Type string `gorm:"column:type;type:varchar(50);comment:类型" json:"type"` // 类型
|
||||
LocationID int64 `gorm:"column:location_id;type:bigint(20);comment:地址ID" json:"location_id"` // 地址ID
|
||||
Hash string `gorm:"column:hash;type:varchar(255);comment:哈希值" json:"hash"` // 哈希值
|
||||
IsAnime string `gorm:"column:is_anime;type:varchar(50);comment:是否是动漫图片" json:"is_anime"` // 是否是动漫图片
|
||||
FaceID int64 `gorm:"column:face_id;type:bigint(20);comment:人像ID" json:"face_id"` // 人像ID
|
||||
Landscape string `gorm:"column:landscape;type:varchar(50);comment:风景类型" json:"landscape"` // 风景类型
|
||||
IsDisplayed int64 `gorm:"column:is_displayed;type:tinyint(4);comment:是否隐藏(0 不隐藏 1 隐藏)" json:"is_displayed"` // 是否隐藏(0 不隐藏 1 隐藏)
|
||||
AlbumID int64 `gorm:"column:album_id;type:bigint(20);comment:相册ID" json:"album_id"` // 相册ID
|
||||
LocationID int64 `gorm:"column:location_id;type:bigint(20);comment:地址ID" json:"location_id"` // 地址ID
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
|
@@ -17,14 +17,11 @@ const TableNameScaStorageLocation = "sca_storage_location"
|
||||
type ScaStorageLocation struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);comment:用户id" json:"user_id"` // 用户id
|
||||
Provider string `gorm:"column:provider;type:varchar(50);comment:供应商" json:"provider"` // 供应商
|
||||
Bucket string `gorm:"column:bucket;type:varchar(50);comment:存储桶" json:"bucket"` // 存储桶
|
||||
Country string `gorm:"column:country;type:varchar(100);comment:国家" json:"country"` // 国家
|
||||
Province string `gorm:"column:province;type:varchar(100);comment:省" json:"province"` // 省
|
||||
City string `gorm:"column:city;type:varchar(100);comment:城市" json:"city"` // 城市
|
||||
Latitude string `gorm:"column:latitude;type:varchar(50);comment:纬度" json:"latitude"` // 纬度
|
||||
Longitude string `gorm:"column:longitude;type:varchar(50);comment:经度" json:"longitude"` // 经度
|
||||
Total int64 `gorm:"column:total;type:bigint(20);comment:数量" json:"total"` // 数量
|
||||
CoverImage string `gorm:"column:cover_image;type:text;comment:封面图片" json:"cover_image"` // 封面图片
|
||||
Version optimisticlock.Version `gorm:"column:version;type:bigint(20);comment:版本" json:"version"` // 版本
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
|
@@ -18,12 +18,13 @@ type ScaStorageShare struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);comment:用户ID" json:"user_id"` // 用户ID
|
||||
AlbumID int64 `gorm:"column:album_id;type:bigint(20);comment:相册ID" json:"album_id"` // 相册ID
|
||||
ShareCode string `gorm:"column:share_code;type:varchar(50);comment:分享码(用于访问分享链接)" json:"share_code"` // 分享码(用于访问分享链接)
|
||||
InviteCode string `gorm:"column:invite_code;type:varchar(50);comment:邀请码(用于访问分享链接)" json:"invite_code"` // 邀请码(用于访问分享链接)
|
||||
ExpireTime time.Time `gorm:"column:expire_time;type:datetime;comment:过期时间" json:"expire_time"` // 过期时间
|
||||
ValidityPeriod int64 `gorm:"column:validity_period;type:int(11);comment:有效期" json:"validity_period"` // 有效期
|
||||
Status int64 `gorm:"column:status;type:tinyint(4);comment:是否失效(0 有效 -1已失效 1 永久)" json:"status"` // 是否失效(0 有效 -1已失效 1 永久)
|
||||
AccessPassword string `gorm:"column:access_password;type:varchar(50);comment:访问密码" json:"access_password"` // 访问密码
|
||||
VisitLimit int64 `gorm:"column:visit_limit;type:bigint(20);comment:限制次数" json:"visit_limit"` // 限制次数
|
||||
ImageCount int64 `gorm:"column:image_count;type:bigint(20);comment:图片数量" json:"image_count"` // 图片数量
|
||||
Version optimisticlock.Version `gorm:"column:version;type:bigint(20);comment:版本" json:"version"` // 版本
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
|
@@ -1,28 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageTag = "sca_storage_tag"
|
||||
|
||||
// ScaStorageTag 标签表
|
||||
type ScaStorageTag struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
FileID int64 `gorm:"column:file_id;type:bigint(20);comment:文件ID" json:"file_id"` // 文件ID
|
||||
TagID int64 `gorm:"column:tag_id;type:bigint(20);comment:标签ID" json:"tag_id"` // 标签ID
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageTag's table name
|
||||
func (*ScaStorageTag) TableName() string {
|
||||
return TableNameScaStorageTag
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const TableNameScaStorageTagInfo = "sca_storage_tag_info"
|
||||
|
||||
// ScaStorageTagInfo mapped from table <sca_storage_tag_info>
|
||||
type ScaStorageTagInfo struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
TagName string `gorm:"column:tag_name;type:varchar(50);not null;comment:标签名称" json:"tag_name"` // 标签名称
|
||||
TagKey string `gorm:"column:tag_key;type:varchar(50);comment:标签关键字" json:"tag_key"` // 标签关键字
|
||||
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;autoCreateTime;comment:创建时间" json:"created_at"` // 创建时间
|
||||
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;autoUpdateTime;comment:更新时间" json:"updated_at"` // 更新时间
|
||||
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp;comment:删除时间" json:"deleted_at"` // 删除时间
|
||||
}
|
||||
|
||||
// TableName ScaStorageTagInfo's table name
|
||||
func (*ScaStorageTagInfo) TableName() string {
|
||||
return TableNameScaStorageTagInfo
|
||||
}
|
@@ -16,6 +16,7 @@ const TableNameScaStorageThumb = "sca_storage_thumb"
|
||||
type ScaStorageThumb struct {
|
||||
ID int64 `gorm:"column:id;type:bigint(20);primaryKey;autoIncrement:true;comment:主键;primary_key" json:"id,string"` // 主键
|
||||
UserID string `gorm:"column:user_id;type:varchar(50);not null;comment:用户ID" json:"user_id"` // 用户ID
|
||||
InfoID int64 `gorm:"column:info_id;type:bigint(20);comment:信息ID" json:"info_id"` // 信息ID
|
||||
ThumbPath string `gorm:"column:thumb_path;type:text;comment:缩略图路径" json:"thumb_path"` // 缩略图路径
|
||||
ThumbW float64 `gorm:"column:thumb_w;type:double;comment:缩略图宽" json:"thumb_w"` // 缩略图宽
|
||||
ThumbH float64 `gorm:"column:thumb_h;type:double;comment:缩略图高" json:"thumb_h"` // 缩略图高
|
||||
|
@@ -78,8 +78,6 @@ func Migrate(db *gorm.DB) {
|
||||
&model.ScaCommentLike{},
|
||||
&model.ScaCommentReply{},
|
||||
&model.ScaStorageInfo{},
|
||||
&model.ScaStorageTag{},
|
||||
&model.ScaStorageTagInfo{},
|
||||
&model.ScaMessageReport{},
|
||||
&model.ScaStorageConfig{},
|
||||
&model.ScaUserFollow{},
|
||||
|
@@ -28,12 +28,11 @@ var (
|
||||
ScaMessageReport *scaMessageReport
|
||||
ScaStorageAlbum *scaStorageAlbum
|
||||
ScaStorageConfig *scaStorageConfig
|
||||
ScaStorageExtra *scaStorageExtra
|
||||
ScaStorageInfo *scaStorageInfo
|
||||
ScaStorageLocation *scaStorageLocation
|
||||
ScaStorageShare *scaStorageShare
|
||||
ScaStorageShareVisit *scaStorageShareVisit
|
||||
ScaStorageTag *scaStorageTag
|
||||
ScaStorageTagInfo *scaStorageTagInfo
|
||||
ScaStorageThumb *scaStorageThumb
|
||||
ScaUserFollow *scaUserFollow
|
||||
ScaUserLevel *scaUserLevel
|
||||
@@ -53,12 +52,11 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) {
|
||||
ScaMessageReport = &Q.ScaMessageReport
|
||||
ScaStorageAlbum = &Q.ScaStorageAlbum
|
||||
ScaStorageConfig = &Q.ScaStorageConfig
|
||||
ScaStorageExtra = &Q.ScaStorageExtra
|
||||
ScaStorageInfo = &Q.ScaStorageInfo
|
||||
ScaStorageLocation = &Q.ScaStorageLocation
|
||||
ScaStorageShare = &Q.ScaStorageShare
|
||||
ScaStorageShareVisit = &Q.ScaStorageShareVisit
|
||||
ScaStorageTag = &Q.ScaStorageTag
|
||||
ScaStorageTagInfo = &Q.ScaStorageTagInfo
|
||||
ScaStorageThumb = &Q.ScaStorageThumb
|
||||
ScaUserFollow = &Q.ScaUserFollow
|
||||
ScaUserLevel = &Q.ScaUserLevel
|
||||
@@ -79,12 +77,11 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query {
|
||||
ScaMessageReport: newScaMessageReport(db, opts...),
|
||||
ScaStorageAlbum: newScaStorageAlbum(db, opts...),
|
||||
ScaStorageConfig: newScaStorageConfig(db, opts...),
|
||||
ScaStorageExtra: newScaStorageExtra(db, opts...),
|
||||
ScaStorageInfo: newScaStorageInfo(db, opts...),
|
||||
ScaStorageLocation: newScaStorageLocation(db, opts...),
|
||||
ScaStorageShare: newScaStorageShare(db, opts...),
|
||||
ScaStorageShareVisit: newScaStorageShareVisit(db, opts...),
|
||||
ScaStorageTag: newScaStorageTag(db, opts...),
|
||||
ScaStorageTagInfo: newScaStorageTagInfo(db, opts...),
|
||||
ScaStorageThumb: newScaStorageThumb(db, opts...),
|
||||
ScaUserFollow: newScaUserFollow(db, opts...),
|
||||
ScaUserLevel: newScaUserLevel(db, opts...),
|
||||
@@ -106,12 +103,11 @@ type Query struct {
|
||||
ScaMessageReport scaMessageReport
|
||||
ScaStorageAlbum scaStorageAlbum
|
||||
ScaStorageConfig scaStorageConfig
|
||||
ScaStorageExtra scaStorageExtra
|
||||
ScaStorageInfo scaStorageInfo
|
||||
ScaStorageLocation scaStorageLocation
|
||||
ScaStorageShare scaStorageShare
|
||||
ScaStorageShareVisit scaStorageShareVisit
|
||||
ScaStorageTag scaStorageTag
|
||||
ScaStorageTagInfo scaStorageTagInfo
|
||||
ScaStorageThumb scaStorageThumb
|
||||
ScaUserFollow scaUserFollow
|
||||
ScaUserLevel scaUserLevel
|
||||
@@ -134,12 +130,11 @@ func (q *Query) clone(db *gorm.DB) *Query {
|
||||
ScaMessageReport: q.ScaMessageReport.clone(db),
|
||||
ScaStorageAlbum: q.ScaStorageAlbum.clone(db),
|
||||
ScaStorageConfig: q.ScaStorageConfig.clone(db),
|
||||
ScaStorageExtra: q.ScaStorageExtra.clone(db),
|
||||
ScaStorageInfo: q.ScaStorageInfo.clone(db),
|
||||
ScaStorageLocation: q.ScaStorageLocation.clone(db),
|
||||
ScaStorageShare: q.ScaStorageShare.clone(db),
|
||||
ScaStorageShareVisit: q.ScaStorageShareVisit.clone(db),
|
||||
ScaStorageTag: q.ScaStorageTag.clone(db),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.clone(db),
|
||||
ScaStorageThumb: q.ScaStorageThumb.clone(db),
|
||||
ScaUserFollow: q.ScaUserFollow.clone(db),
|
||||
ScaUserLevel: q.ScaUserLevel.clone(db),
|
||||
@@ -169,12 +164,11 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query {
|
||||
ScaMessageReport: q.ScaMessageReport.replaceDB(db),
|
||||
ScaStorageAlbum: q.ScaStorageAlbum.replaceDB(db),
|
||||
ScaStorageConfig: q.ScaStorageConfig.replaceDB(db),
|
||||
ScaStorageExtra: q.ScaStorageExtra.replaceDB(db),
|
||||
ScaStorageInfo: q.ScaStorageInfo.replaceDB(db),
|
||||
ScaStorageLocation: q.ScaStorageLocation.replaceDB(db),
|
||||
ScaStorageShare: q.ScaStorageShare.replaceDB(db),
|
||||
ScaStorageShareVisit: q.ScaStorageShareVisit.replaceDB(db),
|
||||
ScaStorageTag: q.ScaStorageTag.replaceDB(db),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.replaceDB(db),
|
||||
ScaStorageThumb: q.ScaStorageThumb.replaceDB(db),
|
||||
ScaUserFollow: q.ScaUserFollow.replaceDB(db),
|
||||
ScaUserLevel: q.ScaUserLevel.replaceDB(db),
|
||||
@@ -194,12 +188,11 @@ type queryCtx struct {
|
||||
ScaMessageReport IScaMessageReportDo
|
||||
ScaStorageAlbum IScaStorageAlbumDo
|
||||
ScaStorageConfig IScaStorageConfigDo
|
||||
ScaStorageExtra IScaStorageExtraDo
|
||||
ScaStorageInfo IScaStorageInfoDo
|
||||
ScaStorageLocation IScaStorageLocationDo
|
||||
ScaStorageShare IScaStorageShareDo
|
||||
ScaStorageShareVisit IScaStorageShareVisitDo
|
||||
ScaStorageTag IScaStorageTagDo
|
||||
ScaStorageTagInfo IScaStorageTagInfoDo
|
||||
ScaStorageThumb IScaStorageThumbDo
|
||||
ScaUserFollow IScaUserFollowDo
|
||||
ScaUserLevel IScaUserLevelDo
|
||||
@@ -219,12 +212,11 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx {
|
||||
ScaMessageReport: q.ScaMessageReport.WithContext(ctx),
|
||||
ScaStorageAlbum: q.ScaStorageAlbum.WithContext(ctx),
|
||||
ScaStorageConfig: q.ScaStorageConfig.WithContext(ctx),
|
||||
ScaStorageExtra: q.ScaStorageExtra.WithContext(ctx),
|
||||
ScaStorageInfo: q.ScaStorageInfo.WithContext(ctx),
|
||||
ScaStorageLocation: q.ScaStorageLocation.WithContext(ctx),
|
||||
ScaStorageShare: q.ScaStorageShare.WithContext(ctx),
|
||||
ScaStorageShareVisit: q.ScaStorageShareVisit.WithContext(ctx),
|
||||
ScaStorageTag: q.ScaStorageTag.WithContext(ctx),
|
||||
ScaStorageTagInfo: q.ScaStorageTagInfo.WithContext(ctx),
|
||||
ScaStorageThumb: q.ScaStorageThumb.WithContext(ctx),
|
||||
ScaUserFollow: q.ScaUserFollow.WithContext(ctx),
|
||||
ScaUserLevel: q.ScaUserLevel.WithContext(ctx),
|
||||
|
@@ -35,6 +35,7 @@ func newScaStorageConfig(db *gorm.DB, opts ...gen.DOOption) scaStorageConfig {
|
||||
_scaStorageConfig.SecretKey = field.NewString(tableName, "secret_key")
|
||||
_scaStorageConfig.Bucket = field.NewString(tableName, "bucket")
|
||||
_scaStorageConfig.Region = field.NewString(tableName, "region")
|
||||
_scaStorageConfig.Capacity = field.NewInt64(tableName, "capacity")
|
||||
_scaStorageConfig.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageConfig.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageConfig.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
@@ -57,6 +58,7 @@ type scaStorageConfig struct {
|
||||
SecretKey field.String // 密钥
|
||||
Bucket field.String // 存储桶
|
||||
Region field.String // 地域
|
||||
Capacity field.Int64 // 容量
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
@@ -84,6 +86,7 @@ func (s *scaStorageConfig) updateTableName(table string) *scaStorageConfig {
|
||||
s.SecretKey = field.NewString(table, "secret_key")
|
||||
s.Bucket = field.NewString(table, "bucket")
|
||||
s.Region = field.NewString(table, "region")
|
||||
s.Capacity = field.NewInt64(table, "capacity")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
@@ -103,7 +106,7 @@ func (s *scaStorageConfig) GetFieldByName(fieldName string) (field.OrderExpr, bo
|
||||
}
|
||||
|
||||
func (s *scaStorageConfig) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 11)
|
||||
s.fieldMap = make(map[string]field.Expr, 12)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["provider"] = s.Provider
|
||||
@@ -112,6 +115,7 @@ func (s *scaStorageConfig) fillFieldMap() {
|
||||
s.fieldMap["secret_key"] = s.SecretKey
|
||||
s.fieldMap["bucket"] = s.Bucket
|
||||
s.fieldMap["region"] = s.Region
|
||||
s.fieldMap["capacity"] = s.Capacity
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
|
421
app/auth/model/mysql/query/sca_storage_extra.gen.go
Normal file
421
app/auth/model/mysql/query/sca_storage_extra.gen.go
Normal file
@@ -0,0 +1,421 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageExtra(db *gorm.DB, opts ...gen.DOOption) scaStorageExtra {
|
||||
_scaStorageExtra := scaStorageExtra{}
|
||||
|
||||
_scaStorageExtra.scaStorageExtraDo.UseDB(db, opts...)
|
||||
_scaStorageExtra.scaStorageExtraDo.UseModel(&model.ScaStorageExtra{})
|
||||
|
||||
tableName := _scaStorageExtra.scaStorageExtraDo.TableName()
|
||||
_scaStorageExtra.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageExtra.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageExtra.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageExtra.InfoID = field.NewInt64(tableName, "info_id")
|
||||
_scaStorageExtra.Category = field.NewString(tableName, "category")
|
||||
_scaStorageExtra.Tag = field.NewString(tableName, "tag")
|
||||
_scaStorageExtra.IsAnime = field.NewString(tableName, "is_anime")
|
||||
_scaStorageExtra.Landscape = field.NewString(tableName, "landscape")
|
||||
_scaStorageExtra.Hash = field.NewString(tableName, "hash")
|
||||
_scaStorageExtra.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageExtra.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageExtra.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageExtra.fillFieldMap()
|
||||
|
||||
return _scaStorageExtra
|
||||
}
|
||||
|
||||
// scaStorageExtra 文件信息额外表
|
||||
type scaStorageExtra struct {
|
||||
scaStorageExtraDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
InfoID field.Int64 // 文件信息ID
|
||||
Category field.String // 分类
|
||||
Tag field.String // 标签
|
||||
IsAnime field.String // 是否是动漫图片
|
||||
Landscape field.String // 风景类型
|
||||
Hash field.String // 哈希值
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageExtra) Table(newTableName string) *scaStorageExtra {
|
||||
s.scaStorageExtraDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageExtra) As(alias string) *scaStorageExtra {
|
||||
s.scaStorageExtraDo.DO = *(s.scaStorageExtraDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageExtra) updateTableName(table string) *scaStorageExtra {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.InfoID = field.NewInt64(table, "info_id")
|
||||
s.Category = field.NewString(table, "category")
|
||||
s.Tag = field.NewString(table, "tag")
|
||||
s.IsAnime = field.NewString(table, "is_anime")
|
||||
s.Landscape = field.NewString(table, "landscape")
|
||||
s.Hash = field.NewString(table, "hash")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageExtra) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageExtra) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 11)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["info_id"] = s.InfoID
|
||||
s.fieldMap["category"] = s.Category
|
||||
s.fieldMap["tag"] = s.Tag
|
||||
s.fieldMap["is_anime"] = s.IsAnime
|
||||
s.fieldMap["landscape"] = s.Landscape
|
||||
s.fieldMap["hash"] = s.Hash
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageExtra) clone(db *gorm.DB) scaStorageExtra {
|
||||
s.scaStorageExtraDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageExtra) replaceDB(db *gorm.DB) scaStorageExtra {
|
||||
s.scaStorageExtraDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageExtraDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageExtraDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageExtraDo
|
||||
WithContext(ctx context.Context) IScaStorageExtraDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageExtraDo
|
||||
WriteDB() IScaStorageExtraDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageExtraDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageExtraDo
|
||||
Not(conds ...gen.Condition) IScaStorageExtraDo
|
||||
Or(conds ...gen.Condition) IScaStorageExtraDo
|
||||
Select(conds ...field.Expr) IScaStorageExtraDo
|
||||
Where(conds ...gen.Condition) IScaStorageExtraDo
|
||||
Order(conds ...field.Expr) IScaStorageExtraDo
|
||||
Distinct(cols ...field.Expr) IScaStorageExtraDo
|
||||
Omit(cols ...field.Expr) IScaStorageExtraDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageExtraDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageExtraDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageExtraDo
|
||||
Group(cols ...field.Expr) IScaStorageExtraDo
|
||||
Having(conds ...gen.Condition) IScaStorageExtraDo
|
||||
Limit(limit int) IScaStorageExtraDo
|
||||
Offset(offset int) IScaStorageExtraDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageExtraDo
|
||||
Unscoped() IScaStorageExtraDo
|
||||
Create(values ...*model.ScaStorageExtra) error
|
||||
CreateInBatches(values []*model.ScaStorageExtra, batchSize int) error
|
||||
Save(values ...*model.ScaStorageExtra) error
|
||||
First() (*model.ScaStorageExtra, error)
|
||||
Take() (*model.ScaStorageExtra, error)
|
||||
Last() (*model.ScaStorageExtra, error)
|
||||
Find() ([]*model.ScaStorageExtra, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageExtra, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageExtra, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageExtra) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageExtraDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageExtraDo
|
||||
Joins(fields ...field.RelationField) IScaStorageExtraDo
|
||||
Preload(fields ...field.RelationField) IScaStorageExtraDo
|
||||
FirstOrInit() (*model.ScaStorageExtra, error)
|
||||
FirstOrCreate() (*model.ScaStorageExtra, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageExtra, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageExtraDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Debug() IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) WithContext(ctx context.Context) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) ReadDB() IScaStorageExtraDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) WriteDB() IScaStorageExtraDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Session(config *gorm.Session) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Clauses(conds ...clause.Expression) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Returning(value interface{}, columns ...string) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Not(conds ...gen.Condition) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Or(conds ...gen.Condition) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Select(conds ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Where(conds ...gen.Condition) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Order(conds ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Distinct(cols ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Omit(cols ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Group(cols ...field.Expr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Having(conds ...gen.Condition) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Limit(limit int) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Offset(offset int) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Unscoped() IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Create(values ...*model.ScaStorageExtra) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) CreateInBatches(values []*model.ScaStorageExtra, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageExtraDo) Save(values ...*model.ScaStorageExtra) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) First() (*model.ScaStorageExtra, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageExtra), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Take() (*model.ScaStorageExtra, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageExtra), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Last() (*model.ScaStorageExtra, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageExtra), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Find() ([]*model.ScaStorageExtra, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageExtra), err
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageExtra, err error) {
|
||||
buf := make([]*model.ScaStorageExtra, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) FindInBatches(result *[]*model.ScaStorageExtra, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Attrs(attrs ...field.AssignExpr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Assign(attrs ...field.AssignExpr) IScaStorageExtraDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Joins(fields ...field.RelationField) IScaStorageExtraDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Preload(fields ...field.RelationField) IScaStorageExtraDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) FirstOrInit() (*model.ScaStorageExtra, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageExtra), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) FirstOrCreate() (*model.ScaStorageExtra, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageExtra), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) FindByPage(offset int, limit int) (result []*model.ScaStorageExtra, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageExtraDo) Delete(models ...*model.ScaStorageExtra) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageExtraDo) withDO(do gen.Dao) *scaStorageExtraDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -37,17 +37,11 @@ func newScaStorageInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageInfo {
|
||||
_scaStorageInfo.FileType = field.NewString(tableName, "file_type")
|
||||
_scaStorageInfo.Width = field.NewFloat64(tableName, "width")
|
||||
_scaStorageInfo.Height = field.NewFloat64(tableName, "height")
|
||||
_scaStorageInfo.ThumbID = field.NewInt64(tableName, "thumb_id")
|
||||
_scaStorageInfo.Category = field.NewString(tableName, "category")
|
||||
_scaStorageInfo.Tag = field.NewString(tableName, "tag")
|
||||
_scaStorageInfo.Type = field.NewString(tableName, "type")
|
||||
_scaStorageInfo.LocationID = field.NewInt64(tableName, "location_id")
|
||||
_scaStorageInfo.Hash = field.NewString(tableName, "hash")
|
||||
_scaStorageInfo.IsAnime = field.NewString(tableName, "is_anime")
|
||||
_scaStorageInfo.FaceID = field.NewInt64(tableName, "face_id")
|
||||
_scaStorageInfo.Landscape = field.NewString(tableName, "landscape")
|
||||
_scaStorageInfo.IsDisplayed = field.NewInt64(tableName, "is_displayed")
|
||||
_scaStorageInfo.AlbumID = field.NewInt64(tableName, "album_id")
|
||||
_scaStorageInfo.LocationID = field.NewInt64(tableName, "location_id")
|
||||
_scaStorageInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
@@ -72,17 +66,11 @@ type scaStorageInfo struct {
|
||||
FileType field.String // 文件类型
|
||||
Width field.Float64 // 宽
|
||||
Height field.Float64 // 高
|
||||
ThumbID field.Int64 // 缩略图id
|
||||
Category field.String // 分类
|
||||
Tag field.String // 标签
|
||||
Type field.String // 类型
|
||||
LocationID field.Int64 // 地址ID
|
||||
Hash field.String // 哈希值
|
||||
IsAnime field.String // 是否是动漫图片
|
||||
FaceID field.Int64 // 人像ID
|
||||
Landscape field.String // 风景类型
|
||||
IsDisplayed field.Int64 // 是否隐藏(0 不隐藏 1 隐藏)
|
||||
AlbumID field.Int64 // 相册ID
|
||||
LocationID field.Int64 // 地址ID
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
@@ -112,17 +100,11 @@ func (s *scaStorageInfo) updateTableName(table string) *scaStorageInfo {
|
||||
s.FileType = field.NewString(table, "file_type")
|
||||
s.Width = field.NewFloat64(table, "width")
|
||||
s.Height = field.NewFloat64(table, "height")
|
||||
s.ThumbID = field.NewInt64(table, "thumb_id")
|
||||
s.Category = field.NewString(table, "category")
|
||||
s.Tag = field.NewString(table, "tag")
|
||||
s.Type = field.NewString(table, "type")
|
||||
s.LocationID = field.NewInt64(table, "location_id")
|
||||
s.Hash = field.NewString(table, "hash")
|
||||
s.IsAnime = field.NewString(table, "is_anime")
|
||||
s.FaceID = field.NewInt64(table, "face_id")
|
||||
s.Landscape = field.NewString(table, "landscape")
|
||||
s.IsDisplayed = field.NewInt64(table, "is_displayed")
|
||||
s.AlbumID = field.NewInt64(table, "album_id")
|
||||
s.LocationID = field.NewInt64(table, "location_id")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
@@ -142,7 +124,7 @@ func (s *scaStorageInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool
|
||||
}
|
||||
|
||||
func (s *scaStorageInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 24)
|
||||
s.fieldMap = make(map[string]field.Expr, 18)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["provider"] = s.Provider
|
||||
@@ -153,17 +135,11 @@ func (s *scaStorageInfo) fillFieldMap() {
|
||||
s.fieldMap["file_type"] = s.FileType
|
||||
s.fieldMap["width"] = s.Width
|
||||
s.fieldMap["height"] = s.Height
|
||||
s.fieldMap["thumb_id"] = s.ThumbID
|
||||
s.fieldMap["category"] = s.Category
|
||||
s.fieldMap["tag"] = s.Tag
|
||||
s.fieldMap["type"] = s.Type
|
||||
s.fieldMap["location_id"] = s.LocationID
|
||||
s.fieldMap["hash"] = s.Hash
|
||||
s.fieldMap["is_anime"] = s.IsAnime
|
||||
s.fieldMap["face_id"] = s.FaceID
|
||||
s.fieldMap["landscape"] = s.Landscape
|
||||
s.fieldMap["is_displayed"] = s.IsDisplayed
|
||||
s.fieldMap["album_id"] = s.AlbumID
|
||||
s.fieldMap["location_id"] = s.LocationID
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
|
@@ -29,14 +29,11 @@ func newScaStorageLocation(db *gorm.DB, opts ...gen.DOOption) scaStorageLocation
|
||||
_scaStorageLocation.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageLocation.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageLocation.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageLocation.Provider = field.NewString(tableName, "provider")
|
||||
_scaStorageLocation.Bucket = field.NewString(tableName, "bucket")
|
||||
_scaStorageLocation.Country = field.NewString(tableName, "country")
|
||||
_scaStorageLocation.Province = field.NewString(tableName, "province")
|
||||
_scaStorageLocation.City = field.NewString(tableName, "city")
|
||||
_scaStorageLocation.Latitude = field.NewString(tableName, "latitude")
|
||||
_scaStorageLocation.Longitude = field.NewString(tableName, "longitude")
|
||||
_scaStorageLocation.Total = field.NewInt64(tableName, "total")
|
||||
_scaStorageLocation.CoverImage = field.NewString(tableName, "cover_image")
|
||||
_scaStorageLocation.Version = field.NewField(tableName, "version")
|
||||
_scaStorageLocation.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
@@ -55,14 +52,11 @@ type scaStorageLocation struct {
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户id
|
||||
Provider field.String // 供应商
|
||||
Bucket field.String // 存储桶
|
||||
Country field.String // 国家
|
||||
Province field.String // 省
|
||||
City field.String // 城市
|
||||
Latitude field.String // 纬度
|
||||
Longitude field.String // 经度
|
||||
Total field.Int64 // 数量
|
||||
CoverImage field.String // 封面图片
|
||||
Version field.Field // 版本
|
||||
CreatedAt field.Time // 创建时间
|
||||
@@ -86,14 +80,11 @@ func (s *scaStorageLocation) updateTableName(table string) *scaStorageLocation {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.Provider = field.NewString(table, "provider")
|
||||
s.Bucket = field.NewString(table, "bucket")
|
||||
s.Country = field.NewString(table, "country")
|
||||
s.Province = field.NewString(table, "province")
|
||||
s.City = field.NewString(table, "city")
|
||||
s.Latitude = field.NewString(table, "latitude")
|
||||
s.Longitude = field.NewString(table, "longitude")
|
||||
s.Total = field.NewInt64(table, "total")
|
||||
s.CoverImage = field.NewString(table, "cover_image")
|
||||
s.Version = field.NewField(table, "version")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
@@ -115,17 +106,14 @@ func (s *scaStorageLocation) GetFieldByName(fieldName string) (field.OrderExpr,
|
||||
}
|
||||
|
||||
func (s *scaStorageLocation) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 15)
|
||||
s.fieldMap = make(map[string]field.Expr, 12)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["provider"] = s.Provider
|
||||
s.fieldMap["bucket"] = s.Bucket
|
||||
s.fieldMap["country"] = s.Country
|
||||
s.fieldMap["province"] = s.Province
|
||||
s.fieldMap["city"] = s.City
|
||||
s.fieldMap["latitude"] = s.Latitude
|
||||
s.fieldMap["longitude"] = s.Longitude
|
||||
s.fieldMap["total"] = s.Total
|
||||
s.fieldMap["cover_image"] = s.CoverImage
|
||||
s.fieldMap["version"] = s.Version
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
|
@@ -30,12 +30,13 @@ func newScaStorageShare(db *gorm.DB, opts ...gen.DOOption) scaStorageShare {
|
||||
_scaStorageShare.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageShare.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageShare.AlbumID = field.NewInt64(tableName, "album_id")
|
||||
_scaStorageShare.ShareCode = field.NewString(tableName, "share_code")
|
||||
_scaStorageShare.InviteCode = field.NewString(tableName, "invite_code")
|
||||
_scaStorageShare.ExpireTime = field.NewTime(tableName, "expire_time")
|
||||
_scaStorageShare.ValidityPeriod = field.NewInt64(tableName, "validity_period")
|
||||
_scaStorageShare.Status = field.NewInt64(tableName, "status")
|
||||
_scaStorageShare.AccessPassword = field.NewString(tableName, "access_password")
|
||||
_scaStorageShare.VisitLimit = field.NewInt64(tableName, "visit_limit")
|
||||
_scaStorageShare.ImageCount = field.NewInt64(tableName, "image_count")
|
||||
_scaStorageShare.Version = field.NewField(tableName, "version")
|
||||
_scaStorageShare.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageShare.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
@@ -54,12 +55,13 @@ type scaStorageShare struct {
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
AlbumID field.Int64 // 相册ID
|
||||
ShareCode field.String // 分享码(用于访问分享链接)
|
||||
InviteCode field.String // 邀请码(用于访问分享链接)
|
||||
ExpireTime field.Time // 过期时间
|
||||
ValidityPeriod field.Int64 // 有效期
|
||||
Status field.Int64 // 是否失效(0 有效 -1已失效 1 永久)
|
||||
AccessPassword field.String // 访问密码
|
||||
VisitLimit field.Int64 // 限制次数
|
||||
ImageCount field.Int64 // 图片数量
|
||||
Version field.Field // 版本
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
@@ -83,12 +85,13 @@ func (s *scaStorageShare) updateTableName(table string) *scaStorageShare {
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.AlbumID = field.NewInt64(table, "album_id")
|
||||
s.ShareCode = field.NewString(table, "share_code")
|
||||
s.InviteCode = field.NewString(table, "invite_code")
|
||||
s.ExpireTime = field.NewTime(table, "expire_time")
|
||||
s.ValidityPeriod = field.NewInt64(table, "validity_period")
|
||||
s.Status = field.NewInt64(table, "status")
|
||||
s.AccessPassword = field.NewString(table, "access_password")
|
||||
s.VisitLimit = field.NewInt64(table, "visit_limit")
|
||||
s.ImageCount = field.NewInt64(table, "image_count")
|
||||
s.Version = field.NewField(table, "version")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
@@ -109,16 +112,17 @@ func (s *scaStorageShare) GetFieldByName(fieldName string) (field.OrderExpr, boo
|
||||
}
|
||||
|
||||
func (s *scaStorageShare) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 13)
|
||||
s.fieldMap = make(map[string]field.Expr, 14)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["album_id"] = s.AlbumID
|
||||
s.fieldMap["share_code"] = s.ShareCode
|
||||
s.fieldMap["invite_code"] = s.InviteCode
|
||||
s.fieldMap["expire_time"] = s.ExpireTime
|
||||
s.fieldMap["validity_period"] = s.ValidityPeriod
|
||||
s.fieldMap["status"] = s.Status
|
||||
s.fieldMap["access_password"] = s.AccessPassword
|
||||
s.fieldMap["visit_limit"] = s.VisitLimit
|
||||
s.fieldMap["image_count"] = s.ImageCount
|
||||
s.fieldMap["version"] = s.Version
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
|
@@ -1,401 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageTag(db *gorm.DB, opts ...gen.DOOption) scaStorageTag {
|
||||
_scaStorageTag := scaStorageTag{}
|
||||
|
||||
_scaStorageTag.scaStorageTagDo.UseDB(db, opts...)
|
||||
_scaStorageTag.scaStorageTagDo.UseModel(&model.ScaStorageTag{})
|
||||
|
||||
tableName := _scaStorageTag.scaStorageTagDo.TableName()
|
||||
_scaStorageTag.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageTag.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageTag.FileID = field.NewInt64(tableName, "file_id")
|
||||
_scaStorageTag.TagID = field.NewInt64(tableName, "tag_id")
|
||||
_scaStorageTag.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageTag.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageTag.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageTag.fillFieldMap()
|
||||
|
||||
return _scaStorageTag
|
||||
}
|
||||
|
||||
// scaStorageTag 标签表
|
||||
type scaStorageTag struct {
|
||||
scaStorageTagDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
FileID field.Int64 // 文件ID
|
||||
TagID field.Int64 // 标签ID
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageTag) Table(newTableName string) *scaStorageTag {
|
||||
s.scaStorageTagDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageTag) As(alias string) *scaStorageTag {
|
||||
s.scaStorageTagDo.DO = *(s.scaStorageTagDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) updateTableName(table string) *scaStorageTag {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.FileID = field.NewInt64(table, "file_id")
|
||||
s.TagID = field.NewInt64(table, "tag_id")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageTag) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 6)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["file_id"] = s.FileID
|
||||
s.fieldMap["tag_id"] = s.TagID
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageTag) clone(db *gorm.DB) scaStorageTag {
|
||||
s.scaStorageTagDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageTag) replaceDB(db *gorm.DB) scaStorageTag {
|
||||
s.scaStorageTagDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageTagDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageTagDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageTagDo
|
||||
WithContext(ctx context.Context) IScaStorageTagDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageTagDo
|
||||
WriteDB() IScaStorageTagDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageTagDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageTagDo
|
||||
Not(conds ...gen.Condition) IScaStorageTagDo
|
||||
Or(conds ...gen.Condition) IScaStorageTagDo
|
||||
Select(conds ...field.Expr) IScaStorageTagDo
|
||||
Where(conds ...gen.Condition) IScaStorageTagDo
|
||||
Order(conds ...field.Expr) IScaStorageTagDo
|
||||
Distinct(cols ...field.Expr) IScaStorageTagDo
|
||||
Omit(cols ...field.Expr) IScaStorageTagDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo
|
||||
Group(cols ...field.Expr) IScaStorageTagDo
|
||||
Having(conds ...gen.Condition) IScaStorageTagDo
|
||||
Limit(limit int) IScaStorageTagDo
|
||||
Offset(offset int) IScaStorageTagDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo
|
||||
Unscoped() IScaStorageTagDo
|
||||
Create(values ...*model.ScaStorageTag) error
|
||||
CreateInBatches(values []*model.ScaStorageTag, batchSize int) error
|
||||
Save(values ...*model.ScaStorageTag) error
|
||||
First() (*model.ScaStorageTag, error)
|
||||
Take() (*model.ScaStorageTag, error)
|
||||
Last() (*model.ScaStorageTag, error)
|
||||
Find() ([]*model.ScaStorageTag, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageTag) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageTagDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageTagDo
|
||||
Joins(fields ...field.RelationField) IScaStorageTagDo
|
||||
Preload(fields ...field.RelationField) IScaStorageTagDo
|
||||
FirstOrInit() (*model.ScaStorageTag, error)
|
||||
FirstOrCreate() (*model.ScaStorageTag, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageTagDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Debug() IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) WithContext(ctx context.Context) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) ReadDB() IScaStorageTagDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) WriteDB() IScaStorageTagDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Session(config *gorm.Session) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Clauses(conds ...clause.Expression) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Returning(value interface{}, columns ...string) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Not(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Or(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Select(conds ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Where(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Order(conds ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Distinct(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Omit(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Group(cols ...field.Expr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Having(conds ...gen.Condition) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Limit(limit int) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Offset(offset int) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Unscoped() IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Create(values ...*model.ScaStorageTag) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) CreateInBatches(values []*model.ScaStorageTag, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageTagDo) Save(values ...*model.ScaStorageTag) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) First() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Take() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Last() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Find() ([]*model.ScaStorageTag, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageTag), err
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTag, err error) {
|
||||
buf := make([]*model.ScaStorageTag, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindInBatches(result *[]*model.ScaStorageTag, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Assign(attrs ...field.AssignExpr) IScaStorageTagDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Joins(fields ...field.RelationField) IScaStorageTagDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Preload(fields ...field.RelationField) IScaStorageTagDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FirstOrInit() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FirstOrCreate() (*model.ScaStorageTag, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTag), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTag, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageTagDo) Delete(models ...*model.ScaStorageTag) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagDo) withDO(do gen.Dao) *scaStorageTagDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -1,400 +0,0 @@
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
// Code generated by gorm.io/gen. DO NOT EDIT.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
|
||||
"gorm.io/gen"
|
||||
"gorm.io/gen/field"
|
||||
|
||||
"gorm.io/plugin/dbresolver"
|
||||
|
||||
"schisandra-album-cloud-microservices/app/auth/model/mysql/model"
|
||||
)
|
||||
|
||||
func newScaStorageTagInfo(db *gorm.DB, opts ...gen.DOOption) scaStorageTagInfo {
|
||||
_scaStorageTagInfo := scaStorageTagInfo{}
|
||||
|
||||
_scaStorageTagInfo.scaStorageTagInfoDo.UseDB(db, opts...)
|
||||
_scaStorageTagInfo.scaStorageTagInfoDo.UseModel(&model.ScaStorageTagInfo{})
|
||||
|
||||
tableName := _scaStorageTagInfo.scaStorageTagInfoDo.TableName()
|
||||
_scaStorageTagInfo.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageTagInfo.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageTagInfo.TagName = field.NewString(tableName, "tag_name")
|
||||
_scaStorageTagInfo.TagKey = field.NewString(tableName, "tag_key")
|
||||
_scaStorageTagInfo.CreatedAt = field.NewTime(tableName, "created_at")
|
||||
_scaStorageTagInfo.UpdatedAt = field.NewTime(tableName, "updated_at")
|
||||
_scaStorageTagInfo.DeletedAt = field.NewField(tableName, "deleted_at")
|
||||
|
||||
_scaStorageTagInfo.fillFieldMap()
|
||||
|
||||
return _scaStorageTagInfo
|
||||
}
|
||||
|
||||
type scaStorageTagInfo struct {
|
||||
scaStorageTagInfoDo
|
||||
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
TagName field.String // 标签名称
|
||||
TagKey field.String // 标签关键字
|
||||
CreatedAt field.Time // 创建时间
|
||||
UpdatedAt field.Time // 更新时间
|
||||
DeletedAt field.Field // 删除时间
|
||||
|
||||
fieldMap map[string]field.Expr
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) Table(newTableName string) *scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.UseTable(newTableName)
|
||||
return s.updateTableName(newTableName)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) As(alias string) *scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.DO = *(s.scaStorageTagInfoDo.As(alias).(*gen.DO))
|
||||
return s.updateTableName(alias)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) updateTableName(table string) *scaStorageTagInfo {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.TagName = field.NewString(table, "tag_name")
|
||||
s.TagKey = field.NewString(table, "tag_key")
|
||||
s.CreatedAt = field.NewTime(table, "created_at")
|
||||
s.UpdatedAt = field.NewTime(table, "updated_at")
|
||||
s.DeletedAt = field.NewField(table, "deleted_at")
|
||||
|
||||
s.fillFieldMap()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
|
||||
_f, ok := s.fieldMap[fieldName]
|
||||
if !ok || _f == nil {
|
||||
return nil, false
|
||||
}
|
||||
_oe, ok := _f.(field.OrderExpr)
|
||||
return _oe, ok
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfo) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 6)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["tag_name"] = s.TagName
|
||||
s.fieldMap["tag_key"] = s.TagKey
|
||||
s.fieldMap["created_at"] = s.CreatedAt
|
||||
s.fieldMap["updated_at"] = s.UpdatedAt
|
||||
s.fieldMap["deleted_at"] = s.DeletedAt
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) clone(db *gorm.DB) scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.ReplaceConnPool(db.Statement.ConnPool)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfo) replaceDB(db *gorm.DB) scaStorageTagInfo {
|
||||
s.scaStorageTagInfoDo.ReplaceDB(db)
|
||||
return s
|
||||
}
|
||||
|
||||
type scaStorageTagInfoDo struct{ gen.DO }
|
||||
|
||||
type IScaStorageTagInfoDo interface {
|
||||
gen.SubQuery
|
||||
Debug() IScaStorageTagInfoDo
|
||||
WithContext(ctx context.Context) IScaStorageTagInfoDo
|
||||
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
|
||||
ReplaceDB(db *gorm.DB)
|
||||
ReadDB() IScaStorageTagInfoDo
|
||||
WriteDB() IScaStorageTagInfoDo
|
||||
As(alias string) gen.Dao
|
||||
Session(config *gorm.Session) IScaStorageTagInfoDo
|
||||
Columns(cols ...field.Expr) gen.Columns
|
||||
Clauses(conds ...clause.Expression) IScaStorageTagInfoDo
|
||||
Not(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Or(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Select(conds ...field.Expr) IScaStorageTagInfoDo
|
||||
Where(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Order(conds ...field.Expr) IScaStorageTagInfoDo
|
||||
Distinct(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Omit(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo
|
||||
Group(cols ...field.Expr) IScaStorageTagInfoDo
|
||||
Having(conds ...gen.Condition) IScaStorageTagInfoDo
|
||||
Limit(limit int) IScaStorageTagInfoDo
|
||||
Offset(offset int) IScaStorageTagInfoDo
|
||||
Count() (count int64, err error)
|
||||
Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo
|
||||
Unscoped() IScaStorageTagInfoDo
|
||||
Create(values ...*model.ScaStorageTagInfo) error
|
||||
CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error
|
||||
Save(values ...*model.ScaStorageTagInfo) error
|
||||
First() (*model.ScaStorageTagInfo, error)
|
||||
Take() (*model.ScaStorageTagInfo, error)
|
||||
Last() (*model.ScaStorageTagInfo, error)
|
||||
Find() ([]*model.ScaStorageTagInfo, error)
|
||||
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error)
|
||||
FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error
|
||||
Pluck(column field.Expr, dest interface{}) error
|
||||
Delete(...*model.ScaStorageTagInfo) (info gen.ResultInfo, err error)
|
||||
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
Updates(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
|
||||
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
|
||||
UpdateFrom(q gen.SubQuery) gen.Dao
|
||||
Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo
|
||||
Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo
|
||||
Joins(fields ...field.RelationField) IScaStorageTagInfoDo
|
||||
Preload(fields ...field.RelationField) IScaStorageTagInfoDo
|
||||
FirstOrInit() (*model.ScaStorageTagInfo, error)
|
||||
FirstOrCreate() (*model.ScaStorageTagInfo, error)
|
||||
FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error)
|
||||
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
|
||||
Scan(result interface{}) (err error)
|
||||
Returning(value interface{}, columns ...string) IScaStorageTagInfoDo
|
||||
UnderlyingDB() *gorm.DB
|
||||
schema.Tabler
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Debug() IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Debug())
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) WithContext(ctx context.Context) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) ReadDB() IScaStorageTagInfoDo {
|
||||
return s.Clauses(dbresolver.Read)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) WriteDB() IScaStorageTagInfoDo {
|
||||
return s.Clauses(dbresolver.Write)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Session(config *gorm.Session) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Session(config))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Clauses(conds ...clause.Expression) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Clauses(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Returning(value interface{}, columns ...string) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Returning(value, columns...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Not(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Not(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Or(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Or(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Select(conds ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Select(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Where(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Where(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Order(conds ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Order(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Distinct(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Distinct(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Omit(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Omit(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Join(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Join(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) LeftJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.LeftJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) RightJoin(table schema.Tabler, on ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.RightJoin(table, on...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Group(cols ...field.Expr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Group(cols...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Having(conds ...gen.Condition) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Having(conds...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Limit(limit int) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Limit(limit))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Offset(offset int) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Offset(offset))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Scopes(funcs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Unscoped() IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Unscoped())
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Create(values ...*model.ScaStorageTagInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Create(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) CreateInBatches(values []*model.ScaStorageTagInfo, batchSize int) error {
|
||||
return s.DO.CreateInBatches(values, batchSize)
|
||||
}
|
||||
|
||||
// Save : !!! underlying implementation is different with GORM
|
||||
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
|
||||
func (s scaStorageTagInfoDo) Save(values ...*model.ScaStorageTagInfo) error {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.DO.Save(values)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) First() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.First(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Take() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.Take(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Last() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.Last(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Find() ([]*model.ScaStorageTagInfo, error) {
|
||||
result, err := s.DO.Find()
|
||||
return result.([]*model.ScaStorageTagInfo), err
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ScaStorageTagInfo, err error) {
|
||||
buf := make([]*model.ScaStorageTagInfo, 0, batchSize)
|
||||
err = s.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
|
||||
defer func() { results = append(results, buf...) }()
|
||||
return fc(tx, batch)
|
||||
})
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindInBatches(result *[]*model.ScaStorageTagInfo, batchSize int, fc func(tx gen.Dao, batch int) error) error {
|
||||
return s.DO.FindInBatches(result, batchSize, fc)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Attrs(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Attrs(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Assign(attrs ...field.AssignExpr) IScaStorageTagInfoDo {
|
||||
return s.withDO(s.DO.Assign(attrs...))
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Joins(fields ...field.RelationField) IScaStorageTagInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Joins(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Preload(fields ...field.RelationField) IScaStorageTagInfoDo {
|
||||
for _, _f := range fields {
|
||||
s = *s.withDO(s.DO.Preload(_f))
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FirstOrInit() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.FirstOrInit(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FirstOrCreate() (*model.ScaStorageTagInfo, error) {
|
||||
if result, err := s.DO.FirstOrCreate(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return result.(*model.ScaStorageTagInfo), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) FindByPage(offset int, limit int) (result []*model.ScaStorageTagInfo, count int64, err error) {
|
||||
result, err = s.Offset(offset).Limit(limit).Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size := len(result); 0 < limit && 0 < size && size < limit {
|
||||
count = int64(size + offset)
|
||||
return
|
||||
}
|
||||
|
||||
count, err = s.Offset(-1).Limit(-1).Count()
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
|
||||
count, err = s.Count()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.Offset(offset).Limit(limit).Scan(result)
|
||||
return
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Scan(result interface{}) (err error) {
|
||||
return s.DO.Scan(result)
|
||||
}
|
||||
|
||||
func (s scaStorageTagInfoDo) Delete(models ...*model.ScaStorageTagInfo) (result gen.ResultInfo, err error) {
|
||||
return s.DO.Delete(models)
|
||||
}
|
||||
|
||||
func (s *scaStorageTagInfoDo) withDO(do gen.Dao) *scaStorageTagInfoDo {
|
||||
s.DO = *do.(*gen.DO)
|
||||
return s
|
||||
}
|
@@ -29,6 +29,7 @@ func newScaStorageThumb(db *gorm.DB, opts ...gen.DOOption) scaStorageThumb {
|
||||
_scaStorageThumb.ALL = field.NewAsterisk(tableName)
|
||||
_scaStorageThumb.ID = field.NewInt64(tableName, "id")
|
||||
_scaStorageThumb.UserID = field.NewString(tableName, "user_id")
|
||||
_scaStorageThumb.InfoID = field.NewInt64(tableName, "info_id")
|
||||
_scaStorageThumb.ThumbPath = field.NewString(tableName, "thumb_path")
|
||||
_scaStorageThumb.ThumbW = field.NewFloat64(tableName, "thumb_w")
|
||||
_scaStorageThumb.ThumbH = field.NewFloat64(tableName, "thumb_h")
|
||||
@@ -49,6 +50,7 @@ type scaStorageThumb struct {
|
||||
ALL field.Asterisk
|
||||
ID field.Int64 // 主键
|
||||
UserID field.String // 用户ID
|
||||
InfoID field.Int64 // 信息ID
|
||||
ThumbPath field.String // 缩略图路径
|
||||
ThumbW field.Float64 // 缩略图宽
|
||||
ThumbH field.Float64 // 缩略图高
|
||||
@@ -74,6 +76,7 @@ func (s *scaStorageThumb) updateTableName(table string) *scaStorageThumb {
|
||||
s.ALL = field.NewAsterisk(table)
|
||||
s.ID = field.NewInt64(table, "id")
|
||||
s.UserID = field.NewString(table, "user_id")
|
||||
s.InfoID = field.NewInt64(table, "info_id")
|
||||
s.ThumbPath = field.NewString(table, "thumb_path")
|
||||
s.ThumbW = field.NewFloat64(table, "thumb_w")
|
||||
s.ThumbH = field.NewFloat64(table, "thumb_h")
|
||||
@@ -97,9 +100,10 @@ func (s *scaStorageThumb) GetFieldByName(fieldName string) (field.OrderExpr, boo
|
||||
}
|
||||
|
||||
func (s *scaStorageThumb) fillFieldMap() {
|
||||
s.fieldMap = make(map[string]field.Expr, 9)
|
||||
s.fieldMap = make(map[string]field.Expr, 10)
|
||||
s.fieldMap["id"] = s.ID
|
||||
s.fieldMap["user_id"] = s.UserID
|
||||
s.fieldMap["info_id"] = s.InfoID
|
||||
s.fieldMap["thumb_path"] = s.ThumbPath
|
||||
s.fieldMap["thumb_w"] = s.ThumbW
|
||||
s.fieldMap["thumb_h"] = s.ThumbH
|
||||
|
@@ -21,10 +21,13 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
ImageListPrefix = "image:list:"
|
||||
ImageCachePrefix = "image:cache:"
|
||||
ImageRecentPrefix = "image:recent:"
|
||||
ImageFaceListPrefix = "image:faces:"
|
||||
ImageSinglePrefix = "image:single:"
|
||||
ImageSharePrefix = "image:share:"
|
||||
ImageShareVisitPrefix = "image:share:visit:"
|
||||
)
|
||||
|
||||
const (
|
||||
BucketCapacityCachePrefix = "bucket:capacity:"
|
||||
)
|
||||
|
@@ -131,9 +131,11 @@ func (a *AliOSS) GetBucketStat(ctx context.Context, bucketName string) (*BucketS
|
||||
return nil, fmt.Errorf("failed to get bucket stat, error: %v", err)
|
||||
}
|
||||
return &BucketStat{
|
||||
Storage: result.Storage,
|
||||
ObjectCount: result.ObjectCount,
|
||||
LastModified: result.LastModifiedTime,
|
||||
Storage: result.Storage,
|
||||
ObjectCount: result.ObjectCount,
|
||||
LastModified: result.LastModifiedTime,
|
||||
StandardStorage: result.StandardStorage,
|
||||
StandardObjectCount: result.StandardObjectCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@@ -19,9 +19,11 @@ type BucketProperties struct {
|
||||
|
||||
// 通用存储桶统计信息
|
||||
type BucketStat struct {
|
||||
Storage int64
|
||||
ObjectCount int64
|
||||
LastModified int64
|
||||
Storage int64
|
||||
ObjectCount int64
|
||||
LastModified int64
|
||||
StandardStorage int64
|
||||
StandardObjectCount int64
|
||||
}
|
||||
|
||||
// 通用存储桶信息
|
||||
|
Reference in New Issue
Block a user